From: Chandan Padhi Date: Wed, 16 Nov 2022 09:25:34 +0000 (+0530) Subject: fixup! Use upstream clang 14 for desktop X-Git-Tag: submit/tizen/20230227.160252~205 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fchanges%2F72%2F284372%2F3;p=platform%2Fframework%2Fweb%2Fchromium-efl.git fixup! Use upstream clang 14 for desktop Some of the changes got missed during rebase of the main patch. Change-Id: Ia249e2dd01728c34c7042d8aefb8b0cf35438497 Signed-off-by: Chandan Padhi --- diff --git a/build/toolchain/toolchain.gni b/build/toolchain/toolchain.gni index 6a49c39..5c24f73 100644 --- a/build/toolchain/toolchain.gni +++ b/build/toolchain/toolchain.gni @@ -33,7 +33,7 @@ if (generate_linker_map) { } declare_args() { - clang_version = "16.0.0" + clang_version = "14.0.0" } # Extension for shared library files (including leading dot). diff --git a/third_party/.gitignore b/third_party/.gitignore index 8f7abb1..6f554af 100644 --- a/third_party/.gitignore +++ b/third_party/.gitignore @@ -163,19 +163,13 @@ /libunwindstack /libvpx/source/libvpx /libwebm/source -<<<<<<< HEAD -/libxml -/libxslt -======= /libwebp/src ->>>>>>> b97a2ffe11d... Upload upstream chromium 108.0.5359.1 /libyuv /lighttpd /llvm /llvm-allocated-type /llvm-bootstrap /llvm-bootstrap-install -/llvm-build /llvm-build-tools /lss /material_design_icons/src diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_builtin_vars.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_builtin_vars.h new file mode 100644 index 0000000..412e823 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_builtin_vars.h @@ -0,0 +1,121 @@ +/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CUDA_BUILTIN_VARS_H +#define __CUDA_BUILTIN_VARS_H + +// Forward declares from vector_types.h. +struct uint3; +struct dim3; + +// The file implements built-in CUDA variables using __declspec(property). +// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx +// All read accesses of built-in variable fields get converted into calls to a +// getter function which in turn calls the appropriate builtin to fetch the +// value. +// +// Example: +// int x = threadIdx.x; +// IR output: +// %0 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #3 +// PTX output: +// mov.u32 %r2, %tid.x; + +#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC) \ + __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD; \ + static inline __attribute__((always_inline)) \ + __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) { \ + return INTRINSIC; \ + } + +#if __cplusplus >= 201103L +#define __DELETE =delete +#else +#define __DELETE +#endif + +// Make sure nobody can create instances of the special variable types. nvcc +// also disallows taking address of special variables, so we disable address-of +// operator as well. +#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \ + __attribute__((device)) TypeName() __DELETE; \ + __attribute__((device)) TypeName(const TypeName &) __DELETE; \ + __attribute__((device)) void operator=(const TypeName &) const __DELETE; \ + __attribute__((device)) TypeName *operator&() const __DELETE + +struct __cuda_builtin_threadIdx_t { + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_tid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_tid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z()); + // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a + // uint3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator dim3() const; + __attribute__((device)) operator uint3() const; + +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t); +}; + +struct __cuda_builtin_blockIdx_t { + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ctaid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ctaid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z()); + // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a + // uint3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator dim3() const; + __attribute__((device)) operator uint3() const; + +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t); +}; + +struct __cuda_builtin_blockDim_t { + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ntid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ntid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ntid_z()); + // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a + // dim3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator dim3() const; + __attribute__((device)) operator uint3() const; + +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t); +}; + +struct __cuda_builtin_gridDim_t { + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_nctaid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_nctaid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_nctaid_z()); + // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a + // dim3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator dim3() const; + __attribute__((device)) operator uint3() const; + +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t); +}; + +#define __CUDA_BUILTIN_VAR \ + extern const __attribute__((device)) __attribute__((weak)) +__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx; +__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx; +__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim; +__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim; + +// warpSize should translate to read of %WARP_SZ but there's currently no +// builtin to do so. According to PTX v4.2 docs 'to date, all target +// architectures have a WARP_SZ value of 32'. +__attribute__((device)) const int warpSize = 32; + +#undef __CUDA_DEVICE_BUILTIN +#undef __CUDA_BUILTIN_VAR +#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS +#undef __DELETE + +#endif /* __CUDA_BUILTIN_VARS_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_cmath.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_cmath.h new file mode 100644 index 0000000..5bbb59a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_cmath.h @@ -0,0 +1,512 @@ +/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_CUDA_CMATH_H__ +#define __CLANG_CUDA_CMATH_H__ +#ifndef __CUDA__ +#error "This file is for CUDA compilation only." +#endif + +#ifndef __OPENMP_NVPTX__ +#include +#endif + +// CUDA lets us use various std math functions on the device side. This file +// works in concert with __clang_cuda_math_forward_declares.h to make this work. +// +// Specifically, the forward-declares header declares __device__ overloads for +// these functions in the global namespace, then pulls them into namespace std +// with 'using' statements. Then this file implements those functions, after +// their implementations have been pulled in. +// +// It's important that we declare the functions in the global namespace and pull +// them into namespace std with using statements, as opposed to simply declaring +// these functions in namespace std, because our device functions need to +// overload the standard library functions, which may be declared in the global +// namespace or in std, depending on the degree of conformance of the stdlib +// implementation. Declaring in the global namespace and pulling into namespace +// std covers all of the known knowns. + +#ifdef __OPENMP_NVPTX__ +#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) +#else +#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline)) +#endif + +__DEVICE__ long long abs(long long __n) { return ::llabs(__n); } +__DEVICE__ long abs(long __n) { return ::labs(__n); } +__DEVICE__ float abs(float __x) { return ::fabsf(__x); } +__DEVICE__ double abs(double __x) { return ::fabs(__x); } +__DEVICE__ float acos(float __x) { return ::acosf(__x); } +__DEVICE__ float asin(float __x) { return ::asinf(__x); } +__DEVICE__ float atan(float __x) { return ::atanf(__x); } +__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); } +__DEVICE__ float ceil(float __x) { return ::ceilf(__x); } +__DEVICE__ float cos(float __x) { return ::cosf(__x); } +__DEVICE__ float cosh(float __x) { return ::coshf(__x); } +__DEVICE__ float exp(float __x) { return ::expf(__x); } +__DEVICE__ float fabs(float __x) { return ::fabsf(__x); } +__DEVICE__ float floor(float __x) { return ::floorf(__x); } +__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); } +__DEVICE__ int fpclassify(float __x) { + return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, + FP_ZERO, __x); +} +__DEVICE__ int fpclassify(double __x) { + return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, + FP_ZERO, __x); +} +__DEVICE__ float frexp(float __arg, int *__exp) { + return ::frexpf(__arg, __exp); +} + +// For inscrutable reasons, the CUDA headers define these functions for us on +// Windows. +#if !defined(_MSC_VER) || defined(__OPENMP_NVPTX__) + +// For OpenMP we work around some old system headers that have non-conforming +// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do +// this by providing two versions of these functions, differing only in the +// return type. To avoid conflicting definitions we disable implicit base +// function generation. That means we will end up with two specializations, one +// per type, but only one has a base function defined by the system header. +#if defined(__OPENMP_NVPTX__) +#pragma omp begin declare variant match( \ + implementation = {extension(disable_implicit_base)}) + +// FIXME: We lack an extension to customize the mangling of the variants, e.g., +// add a suffix. This means we would clash with the names of the variants +// (note that we do not create implicit base functions here). To avoid +// this clash we add a new trait to some of them that is always true +// (this is LLVM after all ;)). It will only influence the mangled name +// of the variants inside the inner region and avoid the clash. +#pragma omp begin declare variant match(implementation = {vendor(llvm)}) + +__DEVICE__ int isinf(float __x) { return ::__isinff(__x); } +__DEVICE__ int isinf(double __x) { return ::__isinf(__x); } +__DEVICE__ int isfinite(float __x) { return ::__finitef(__x); } +__DEVICE__ int isfinite(double __x) { return ::__isfinited(__x); } +__DEVICE__ int isnan(float __x) { return ::__isnanf(__x); } +__DEVICE__ int isnan(double __x) { return ::__isnan(__x); } + +#pragma omp end declare variant + +#endif + +__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); } +__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); } +__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); } +// For inscrutable reasons, __finite(), the double-precision version of +// __finitef, does not exist when compiling for MacOS. __isfinited is available +// everywhere and is just as good. +__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); } +__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); } +__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); } + +#if defined(__OPENMP_NVPTX__) +#pragma omp end declare variant +#endif + +#endif + +__DEVICE__ bool isgreater(float __x, float __y) { + return __builtin_isgreater(__x, __y); +} +__DEVICE__ bool isgreater(double __x, double __y) { + return __builtin_isgreater(__x, __y); +} +__DEVICE__ bool isgreaterequal(float __x, float __y) { + return __builtin_isgreaterequal(__x, __y); +} +__DEVICE__ bool isgreaterequal(double __x, double __y) { + return __builtin_isgreaterequal(__x, __y); +} +__DEVICE__ bool isless(float __x, float __y) { + return __builtin_isless(__x, __y); +} +__DEVICE__ bool isless(double __x, double __y) { + return __builtin_isless(__x, __y); +} +__DEVICE__ bool islessequal(float __x, float __y) { + return __builtin_islessequal(__x, __y); +} +__DEVICE__ bool islessequal(double __x, double __y) { + return __builtin_islessequal(__x, __y); +} +__DEVICE__ bool islessgreater(float __x, float __y) { + return __builtin_islessgreater(__x, __y); +} +__DEVICE__ bool islessgreater(double __x, double __y) { + return __builtin_islessgreater(__x, __y); +} +__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); } +__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); } +__DEVICE__ bool isunordered(float __x, float __y) { + return __builtin_isunordered(__x, __y); +} +__DEVICE__ bool isunordered(double __x, double __y) { + return __builtin_isunordered(__x, __y); +} +__DEVICE__ float ldexp(float __arg, int __exp) { + return ::ldexpf(__arg, __exp); +} +__DEVICE__ float log(float __x) { return ::logf(__x); } +__DEVICE__ float log10(float __x) { return ::log10f(__x); } +__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); } +__DEVICE__ float pow(float __base, float __exp) { + return ::powf(__base, __exp); +} +__DEVICE__ float pow(float __base, int __iexp) { + return ::powif(__base, __iexp); +} +__DEVICE__ double pow(double __base, int __iexp) { + return ::powi(__base, __iexp); +} +__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); } +__DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); } +__DEVICE__ float sin(float __x) { return ::sinf(__x); } +__DEVICE__ float sinh(float __x) { return ::sinhf(__x); } +__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); } +__DEVICE__ float tan(float __x) { return ::tanf(__x); } +__DEVICE__ float tanh(float __x) { return ::tanhf(__x); } + +// There was a redefinition error for this this overload in CUDA mode. +// We restrict it to OpenMP mode for now, that is where it is actually needed +// anyway. +#ifdef __OPENMP_NVPTX__ +__DEVICE__ float remquo(float __n, float __d, int *__q) { + return ::remquof(__n, __d, __q); +} +#endif + +// Notably missing above is nexttoward. We omit it because +// libdevice doesn't provide an implementation, and we don't want to be in the +// business of implementing tricky libm functions in this header. + +#ifndef __OPENMP_NVPTX__ + +// Now we've defined everything we promised we'd define in +// __clang_cuda_math_forward_declares.h. We need to do two additional things to +// fix up our math functions. +// +// 1) Define __device__ overloads for e.g. sin(int). The CUDA headers define +// only sin(float) and sin(double), which means that e.g. sin(0) is +// ambiguous. +// +// 2) Pull the __device__ overloads of "foobarf" math functions into namespace +// std. These are defined in the CUDA headers in the global namespace, +// independent of everything else we've done here. + +// We can't use std::enable_if, because we want to be pre-C++11 compatible. But +// we go ahead and unconditionally define functions that are only available when +// compiling for C++11 to match the behavior of the CUDA headers. +template +struct __clang_cuda_enable_if {}; + +template struct __clang_cuda_enable_if { + typedef __T type; +}; + +// Defines an overload of __fn that accepts one integral argument, calls +// __fn((double)x), and returns __retty. +#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn) \ + template \ + __DEVICE__ \ + typename __clang_cuda_enable_if::is_integer, \ + __retty>::type \ + __fn(__T __x) { \ + return ::__fn((double)__x); \ + } + +// Defines an overload of __fn that accepts one two arithmetic arguments, calls +// __fn((double)x, (double)y), and returns a double. +// +// Note this is different from OVERLOAD_1, which generates an overload that +// accepts only *integral* arguments. +#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn) \ + template \ + __DEVICE__ typename __clang_cuda_enable_if< \ + std::numeric_limits<__T1>::is_specialized && \ + std::numeric_limits<__T2>::is_specialized, \ + __retty>::type \ + __fn(__T1 __x, __T2 __y) { \ + return __fn((double)__x, (double)__y); \ + } + +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc); + +#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1 +#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2 + +// Overloads for functions that don't match the patterns expected by +// __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}. +template +__DEVICE__ typename __clang_cuda_enable_if< + std::numeric_limits<__T1>::is_specialized && + std::numeric_limits<__T2>::is_specialized && + std::numeric_limits<__T3>::is_specialized, + double>::type +fma(__T1 __x, __T2 __y, __T3 __z) { + return std::fma((double)__x, (double)__y, (double)__z); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +frexp(__T __x, int *__exp) { + return std::frexp((double)__x, __exp); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +ldexp(__T __x, int __exp) { + return std::ldexp((double)__x, __exp); +} + +template +__DEVICE__ typename __clang_cuda_enable_if< + std::numeric_limits<__T1>::is_specialized && + std::numeric_limits<__T2>::is_specialized, + double>::type +remquo(__T1 __x, __T2 __y, int *__quo) { + return std::remquo((double)__x, (double)__y, __quo); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +scalbln(__T __x, long __exp) { + return std::scalbln((double)__x, __exp); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +scalbn(__T __x, int __exp) { + return std::scalbn((double)__x, __exp); +} + +// We need to define these overloads in exactly the namespace our standard +// library uses (including the right inline namespace), otherwise they won't be +// picked up by other functions in the standard library (e.g. functions in +// ). Thus the ugliness below. +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif +#endif + +// Pull the new overloads we defined above into namespace std. +using ::acos; +using ::acosh; +using ::asin; +using ::asinh; +using ::atan; +using ::atan2; +using ::atanh; +using ::cbrt; +using ::ceil; +using ::copysign; +using ::cos; +using ::cosh; +using ::erf; +using ::erfc; +using ::exp; +using ::exp2; +using ::expm1; +using ::fabs; +using ::fdim; +using ::floor; +using ::fma; +using ::fmax; +using ::fmin; +using ::fmod; +using ::fpclassify; +using ::frexp; +using ::hypot; +using ::ilogb; +using ::isfinite; +using ::isgreater; +using ::isgreaterequal; +using ::isless; +using ::islessequal; +using ::islessgreater; +using ::isnormal; +using ::isunordered; +using ::ldexp; +using ::lgamma; +using ::llrint; +using ::llround; +using ::log; +using ::log10; +using ::log1p; +using ::log2; +using ::logb; +using ::lrint; +using ::lround; +using ::nearbyint; +using ::nextafter; +using ::pow; +using ::remainder; +using ::remquo; +using ::rint; +using ::round; +using ::scalbln; +using ::scalbn; +using ::signbit; +using ::sin; +using ::sinh; +using ::sqrt; +using ::tan; +using ::tanh; +using ::tgamma; +using ::trunc; + +// Well this is fun: We need to pull these symbols in for libc++, but we can't +// pull them in with libstdc++, because its ::isinf and ::isnan are different +// than its std::isinf and std::isnan. +#ifndef __GLIBCXX__ +using ::isinf; +using ::isnan; +#endif + +// Finally, pull the "foobarf" functions that CUDA defines in its headers into +// namespace std. +using ::acosf; +using ::acoshf; +using ::asinf; +using ::asinhf; +using ::atan2f; +using ::atanf; +using ::atanhf; +using ::cbrtf; +using ::ceilf; +using ::copysignf; +using ::cosf; +using ::coshf; +using ::erfcf; +using ::erff; +using ::exp2f; +using ::expf; +using ::expm1f; +using ::fabsf; +using ::fdimf; +using ::floorf; +using ::fmaf; +using ::fmaxf; +using ::fminf; +using ::fmodf; +using ::frexpf; +using ::hypotf; +using ::ilogbf; +using ::ldexpf; +using ::lgammaf; +using ::llrintf; +using ::llroundf; +using ::log10f; +using ::log1pf; +using ::log2f; +using ::logbf; +using ::logf; +using ::lrintf; +using ::lroundf; +using ::modff; +using ::nearbyintf; +using ::nextafterf; +using ::powf; +using ::remainderf; +using ::remquof; +using ::rintf; +using ::roundf; +using ::scalblnf; +using ::scalbnf; +using ::sinf; +using ::sinhf; +using ::sqrtf; +using ::tanf; +using ::tanhf; +using ::tgammaf; +using ::truncf; + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif +} // namespace std +#endif + +#endif // __OPENMP_NVPTX__ + +#undef __DEVICE__ + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_complex_builtins.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_complex_builtins.h new file mode 100644 index 0000000..2b701fe --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_complex_builtins.h @@ -0,0 +1,285 @@ +/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_COMPLEX_BUILTINS +#define __CLANG_CUDA_COMPLEX_BUILTINS + +// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are +// libgcc functions that clang assumes are available when compiling c99 complex +// operations. (These implementations come from libc++, and have been modified +// to work with CUDA and OpenMP target offloading [in C and C++ mode].) + +#pragma push_macro("__DEVICE__") +#ifdef __OPENMP_NVPTX__ +#pragma omp declare target +#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak)) +#else +#define __DEVICE__ __device__ inline +#endif + +// To make the algorithms available for C and C++ in CUDA and OpenMP we select +// different but equivalent function versions. TODO: For OpenMP we currently +// select the native builtins as the overload support for templates is lacking. +#if !defined(__OPENMP_NVPTX__) +#define _ISNANd std::isnan +#define _ISNANf std::isnan +#define _ISINFd std::isinf +#define _ISINFf std::isinf +#define _ISFINITEd std::isfinite +#define _ISFINITEf std::isfinite +#define _COPYSIGNd std::copysign +#define _COPYSIGNf std::copysign +#define _SCALBNd std::scalbn +#define _SCALBNf std::scalbn +#define _ABSd std::abs +#define _ABSf std::abs +#define _LOGBd std::logb +#define _LOGBf std::logb +// Rather than pulling in std::max from algorithm everytime, use available ::max. +#define _fmaxd max +#define _fmaxf max +#else +#ifdef __AMDGCN__ +#define _ISNANd __ocml_isnan_f64 +#define _ISNANf __ocml_isnan_f32 +#define _ISINFd __ocml_isinf_f64 +#define _ISINFf __ocml_isinf_f32 +#define _ISFINITEd __ocml_isfinite_f64 +#define _ISFINITEf __ocml_isfinite_f32 +#define _COPYSIGNd __ocml_copysign_f64 +#define _COPYSIGNf __ocml_copysign_f32 +#define _SCALBNd __ocml_scalbn_f64 +#define _SCALBNf __ocml_scalbn_f32 +#define _ABSd __ocml_fabs_f64 +#define _ABSf __ocml_fabs_f32 +#define _LOGBd __ocml_logb_f64 +#define _LOGBf __ocml_logb_f32 +#define _fmaxd __ocml_fmax_f64 +#define _fmaxf __ocml_fmax_f32 +#else +#define _ISNANd __nv_isnand +#define _ISNANf __nv_isnanf +#define _ISINFd __nv_isinfd +#define _ISINFf __nv_isinff +#define _ISFINITEd __nv_isfinited +#define _ISFINITEf __nv_finitef +#define _COPYSIGNd __nv_copysign +#define _COPYSIGNf __nv_copysignf +#define _SCALBNd __nv_scalbn +#define _SCALBNf __nv_scalbnf +#define _ABSd __nv_fabs +#define _ABSf __nv_fabsf +#define _LOGBd __nv_logb +#define _LOGBf __nv_logbf +#define _fmaxd __nv_fmax +#define _fmaxf __nv_fmaxf +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +__DEVICE__ double _Complex __muldc3(double __a, double __b, double __c, + double __d) { + double __ac = __a * __c; + double __bd = __b * __d; + double __ad = __a * __d; + double __bc = __b * __c; + double _Complex z; + __real__(z) = __ac - __bd; + __imag__(z) = __ad + __bc; + if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) { + int __recalc = 0; + if (_ISINFd(__a) || _ISINFd(__b)) { + __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a); + __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b); + if (_ISNANd(__c)) + __c = _COPYSIGNd(0, __c); + if (_ISNANd(__d)) + __d = _COPYSIGNd(0, __d); + __recalc = 1; + } + if (_ISINFd(__c) || _ISINFd(__d)) { + __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c); + __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d); + if (_ISNANd(__a)) + __a = _COPYSIGNd(0, __a); + if (_ISNANd(__b)) + __b = _COPYSIGNd(0, __b); + __recalc = 1; + } + if (!__recalc && + (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) { + if (_ISNANd(__a)) + __a = _COPYSIGNd(0, __a); + if (_ISNANd(__b)) + __b = _COPYSIGNd(0, __b); + if (_ISNANd(__c)) + __c = _COPYSIGNd(0, __c); + if (_ISNANd(__d)) + __d = _COPYSIGNd(0, __d); + __recalc = 1; + } + if (__recalc) { + // Can't use std::numeric_limits::infinity() -- that doesn't have + // a device overload (and isn't constexpr before C++11, naturally). + __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d); + __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c); + } + } + return z; +} + +__DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) { + float __ac = __a * __c; + float __bd = __b * __d; + float __ad = __a * __d; + float __bc = __b * __c; + float _Complex z; + __real__(z) = __ac - __bd; + __imag__(z) = __ad + __bc; + if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) { + int __recalc = 0; + if (_ISINFf(__a) || _ISINFf(__b)) { + __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a); + __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b); + if (_ISNANf(__c)) + __c = _COPYSIGNf(0, __c); + if (_ISNANf(__d)) + __d = _COPYSIGNf(0, __d); + __recalc = 1; + } + if (_ISINFf(__c) || _ISINFf(__d)) { + __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c); + __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d); + if (_ISNANf(__a)) + __a = _COPYSIGNf(0, __a); + if (_ISNANf(__b)) + __b = _COPYSIGNf(0, __b); + __recalc = 1; + } + if (!__recalc && + (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) { + if (_ISNANf(__a)) + __a = _COPYSIGNf(0, __a); + if (_ISNANf(__b)) + __b = _COPYSIGNf(0, __b); + if (_ISNANf(__c)) + __c = _COPYSIGNf(0, __c); + if (_ISNANf(__d)) + __d = _COPYSIGNf(0, __d); + __recalc = 1; + } + if (__recalc) { + __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d); + __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c); + } + } + return z; +} + +__DEVICE__ double _Complex __divdc3(double __a, double __b, double __c, + double __d) { + int __ilogbw = 0; + // Can't use std::max, because that's defined in , and we don't + // want to pull that in for every compile. The CUDA headers define + // ::max(float, float) and ::max(double, double), which is sufficient for us. + double __logbw = _LOGBd(_fmaxd(_ABSd(__c), _ABSd(__d))); + if (_ISFINITEd(__logbw)) { + __ilogbw = (int)__logbw; + __c = _SCALBNd(__c, -__ilogbw); + __d = _SCALBNd(__d, -__ilogbw); + } + double __denom = __c * __c + __d * __d; + double _Complex z; + __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw); + __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw); + if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) { + if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) { + __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a; + __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b; + } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) && + _ISFINITEd(__d)) { + __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a); + __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b); + __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d); + __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d); + } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) && + _ISFINITEd(__b)) { + __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c); + __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d); + __real__(z) = 0.0 * (__a * __c + __b * __d); + __imag__(z) = 0.0 * (__b * __c - __a * __d); + } + } + return z; +} + +__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) { + int __ilogbw = 0; + float __logbw = _LOGBf(_fmaxf(_ABSf(__c), _ABSf(__d))); + if (_ISFINITEf(__logbw)) { + __ilogbw = (int)__logbw; + __c = _SCALBNf(__c, -__ilogbw); + __d = _SCALBNf(__d, -__ilogbw); + } + float __denom = __c * __c + __d * __d; + float _Complex z; + __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw); + __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw); + if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) { + if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) { + __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a; + __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b; + } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) && + _ISFINITEf(__d)) { + __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a); + __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b); + __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d); + __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d); + } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) && + _ISFINITEf(__b)) { + __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c); + __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d); + __real__(z) = 0 * (__a * __c + __b * __d); + __imag__(z) = 0 * (__b * __c - __a * __d); + } + } + return z; +} + +#if defined(__cplusplus) +} // extern "C" +#endif + +#undef _ISNANd +#undef _ISNANf +#undef _ISINFd +#undef _ISINFf +#undef _COPYSIGNd +#undef _COPYSIGNf +#undef _ISFINITEd +#undef _ISFINITEf +#undef _SCALBNd +#undef _SCALBNf +#undef _ABSd +#undef _ABSf +#undef _LOGBd +#undef _LOGBf +#undef _fmaxd +#undef _fmaxf + +#ifdef __OPENMP_NVPTX__ +#pragma omp end declare target +#endif + +#pragma pop_macro("__DEVICE__") + +#endif // __CLANG_CUDA_COMPLEX_BUILTINS diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_device_functions.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_device_functions.h new file mode 100644 index 0000000..cc4e1a4 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_device_functions.h @@ -0,0 +1,1558 @@ +/*===---- __clang_cuda_device_functions.h - CUDA runtime support -----------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__ +#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__ + +#ifndef __OPENMP_NVPTX__ +#if CUDA_VERSION < 9000 +#error This file is intended to be used with CUDA-9+ only. +#endif +#endif + +// __DEVICE__ is a helper macro with common set of attributes for the wrappers +// we implement in this file. We need static in order to avoid emitting unused +// functions and __forceinline__ helps inlining these wrappers at -O1. +#pragma push_macro("__DEVICE__") +#ifdef __OPENMP_NVPTX__ +#define __DEVICE__ static __attribute__((always_inline, nothrow)) +#else +#define __DEVICE__ static __device__ __forceinline__ +#endif + +__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); } +__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); } +__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); } +__DEVICE__ unsigned int __brev(unsigned int __a) { return __nv_brev(__a); } +__DEVICE__ unsigned long long __brevll(unsigned long long __a) { + return __nv_brevll(__a); +} +#if defined(__cplusplus) +__DEVICE__ void __brkpt() { __asm__ __volatile__("brkpt;"); } +__DEVICE__ void __brkpt(int __a) { __brkpt(); } +#else +__DEVICE__ void __attribute__((overloadable)) __brkpt(void) { + __asm__ __volatile__("brkpt;"); +} +__DEVICE__ void __attribute__((overloadable)) __brkpt(int __a) { __brkpt(); } +#endif +__DEVICE__ unsigned int __byte_perm(unsigned int __a, unsigned int __b, + unsigned int __c) { + return __nv_byte_perm(__a, __b, __c); +} +__DEVICE__ int __clz(int __a) { return __nv_clz(__a); } +__DEVICE__ int __clzll(long long __a) { return __nv_clzll(__a); } +__DEVICE__ float __cosf(float __a) { return __nv_fast_cosf(__a); } +__DEVICE__ double __dAtomicAdd(double *__p, double __v) { + return __nvvm_atom_add_gen_d(__p, __v); +} +__DEVICE__ double __dAtomicAdd_block(double *__p, double __v) { + return __nvvm_atom_cta_add_gen_d(__p, __v); +} +__DEVICE__ double __dAtomicAdd_system(double *__p, double __v) { + return __nvvm_atom_sys_add_gen_d(__p, __v); +} +__DEVICE__ double __dadd_rd(double __a, double __b) { + return __nv_dadd_rd(__a, __b); +} +__DEVICE__ double __dadd_rn(double __a, double __b) { + return __nv_dadd_rn(__a, __b); +} +__DEVICE__ double __dadd_ru(double __a, double __b) { + return __nv_dadd_ru(__a, __b); +} +__DEVICE__ double __dadd_rz(double __a, double __b) { + return __nv_dadd_rz(__a, __b); +} +__DEVICE__ double __ddiv_rd(double __a, double __b) { + return __nv_ddiv_rd(__a, __b); +} +__DEVICE__ double __ddiv_rn(double __a, double __b) { + return __nv_ddiv_rn(__a, __b); +} +__DEVICE__ double __ddiv_ru(double __a, double __b) { + return __nv_ddiv_ru(__a, __b); +} +__DEVICE__ double __ddiv_rz(double __a, double __b) { + return __nv_ddiv_rz(__a, __b); +} +__DEVICE__ double __dmul_rd(double __a, double __b) { + return __nv_dmul_rd(__a, __b); +} +__DEVICE__ double __dmul_rn(double __a, double __b) { + return __nv_dmul_rn(__a, __b); +} +__DEVICE__ double __dmul_ru(double __a, double __b) { + return __nv_dmul_ru(__a, __b); +} +__DEVICE__ double __dmul_rz(double __a, double __b) { + return __nv_dmul_rz(__a, __b); +} +__DEVICE__ float __double2float_rd(double __a) { + return __nv_double2float_rd(__a); +} +__DEVICE__ float __double2float_rn(double __a) { + return __nv_double2float_rn(__a); +} +__DEVICE__ float __double2float_ru(double __a) { + return __nv_double2float_ru(__a); +} +__DEVICE__ float __double2float_rz(double __a) { + return __nv_double2float_rz(__a); +} +__DEVICE__ int __double2hiint(double __a) { return __nv_double2hiint(__a); } +__DEVICE__ int __double2int_rd(double __a) { return __nv_double2int_rd(__a); } +__DEVICE__ int __double2int_rn(double __a) { return __nv_double2int_rn(__a); } +__DEVICE__ int __double2int_ru(double __a) { return __nv_double2int_ru(__a); } +__DEVICE__ int __double2int_rz(double __a) { return __nv_double2int_rz(__a); } +__DEVICE__ long long __double2ll_rd(double __a) { + return __nv_double2ll_rd(__a); +} +__DEVICE__ long long __double2ll_rn(double __a) { + return __nv_double2ll_rn(__a); +} +__DEVICE__ long long __double2ll_ru(double __a) { + return __nv_double2ll_ru(__a); +} +__DEVICE__ long long __double2ll_rz(double __a) { + return __nv_double2ll_rz(__a); +} +__DEVICE__ int __double2loint(double __a) { return __nv_double2loint(__a); } +__DEVICE__ unsigned int __double2uint_rd(double __a) { + return __nv_double2uint_rd(__a); +} +__DEVICE__ unsigned int __double2uint_rn(double __a) { + return __nv_double2uint_rn(__a); +} +__DEVICE__ unsigned int __double2uint_ru(double __a) { + return __nv_double2uint_ru(__a); +} +__DEVICE__ unsigned int __double2uint_rz(double __a) { + return __nv_double2uint_rz(__a); +} +__DEVICE__ unsigned long long __double2ull_rd(double __a) { + return __nv_double2ull_rd(__a); +} +__DEVICE__ unsigned long long __double2ull_rn(double __a) { + return __nv_double2ull_rn(__a); +} +__DEVICE__ unsigned long long __double2ull_ru(double __a) { + return __nv_double2ull_ru(__a); +} +__DEVICE__ unsigned long long __double2ull_rz(double __a) { + return __nv_double2ull_rz(__a); +} +__DEVICE__ long long __double_as_longlong(double __a) { + return __nv_double_as_longlong(__a); +} +__DEVICE__ double __drcp_rd(double __a) { return __nv_drcp_rd(__a); } +__DEVICE__ double __drcp_rn(double __a) { return __nv_drcp_rn(__a); } +__DEVICE__ double __drcp_ru(double __a) { return __nv_drcp_ru(__a); } +__DEVICE__ double __drcp_rz(double __a) { return __nv_drcp_rz(__a); } +__DEVICE__ double __dsqrt_rd(double __a) { return __nv_dsqrt_rd(__a); } +__DEVICE__ double __dsqrt_rn(double __a) { return __nv_dsqrt_rn(__a); } +__DEVICE__ double __dsqrt_ru(double __a) { return __nv_dsqrt_ru(__a); } +__DEVICE__ double __dsqrt_rz(double __a) { return __nv_dsqrt_rz(__a); } +__DEVICE__ double __dsub_rd(double __a, double __b) { + return __nv_dsub_rd(__a, __b); +} +__DEVICE__ double __dsub_rn(double __a, double __b) { + return __nv_dsub_rn(__a, __b); +} +__DEVICE__ double __dsub_ru(double __a, double __b) { + return __nv_dsub_ru(__a, __b); +} +__DEVICE__ double __dsub_rz(double __a, double __b) { + return __nv_dsub_rz(__a, __b); +} +__DEVICE__ float __exp10f(float __a) { return __nv_fast_exp10f(__a); } +__DEVICE__ float __expf(float __a) { return __nv_fast_expf(__a); } +__DEVICE__ float __fAtomicAdd(float *__p, float __v) { + return __nvvm_atom_add_gen_f(__p, __v); +} +__DEVICE__ float __fAtomicAdd_block(float *__p, float __v) { + return __nvvm_atom_cta_add_gen_f(__p, __v); +} +__DEVICE__ float __fAtomicAdd_system(float *__p, float __v) { + return __nvvm_atom_sys_add_gen_f(__p, __v); +} +__DEVICE__ float __fAtomicExch(float *__p, float __v) { + return __nv_int_as_float( + __nvvm_atom_xchg_gen_i((int *)__p, __nv_float_as_int(__v))); +} +__DEVICE__ float __fAtomicExch_block(float *__p, float __v) { + return __nv_int_as_float( + __nvvm_atom_cta_xchg_gen_i((int *)__p, __nv_float_as_int(__v))); +} +__DEVICE__ float __fAtomicExch_system(float *__p, float __v) { + return __nv_int_as_float( + __nvvm_atom_sys_xchg_gen_i((int *)__p, __nv_float_as_int(__v))); +} +__DEVICE__ float __fadd_rd(float __a, float __b) { + return __nv_fadd_rd(__a, __b); +} +__DEVICE__ float __fadd_rn(float __a, float __b) { + return __nv_fadd_rn(__a, __b); +} +__DEVICE__ float __fadd_ru(float __a, float __b) { + return __nv_fadd_ru(__a, __b); +} +__DEVICE__ float __fadd_rz(float __a, float __b) { + return __nv_fadd_rz(__a, __b); +} +__DEVICE__ float __fdiv_rd(float __a, float __b) { + return __nv_fdiv_rd(__a, __b); +} +__DEVICE__ float __fdiv_rn(float __a, float __b) { + return __nv_fdiv_rn(__a, __b); +} +__DEVICE__ float __fdiv_ru(float __a, float __b) { + return __nv_fdiv_ru(__a, __b); +} +__DEVICE__ float __fdiv_rz(float __a, float __b) { + return __nv_fdiv_rz(__a, __b); +} +__DEVICE__ float __fdividef(float __a, float __b) { + return __nv_fast_fdividef(__a, __b); +} +__DEVICE__ int __ffs(int __a) { return __nv_ffs(__a); } +__DEVICE__ int __ffsll(long long __a) { return __nv_ffsll(__a); } +__DEVICE__ int __finite(double __a) { return __nv_isfinited(__a); } +__DEVICE__ int __finitef(float __a) { return __nv_finitef(__a); } +#ifdef _MSC_VER +__DEVICE__ int __finitel(long double __a); +#endif +__DEVICE__ int __float2int_rd(float __a) { return __nv_float2int_rd(__a); } +__DEVICE__ int __float2int_rn(float __a) { return __nv_float2int_rn(__a); } +__DEVICE__ int __float2int_ru(float __a) { return __nv_float2int_ru(__a); } +__DEVICE__ int __float2int_rz(float __a) { return __nv_float2int_rz(__a); } +__DEVICE__ long long __float2ll_rd(float __a) { return __nv_float2ll_rd(__a); } +__DEVICE__ long long __float2ll_rn(float __a) { return __nv_float2ll_rn(__a); } +__DEVICE__ long long __float2ll_ru(float __a) { return __nv_float2ll_ru(__a); } +__DEVICE__ long long __float2ll_rz(float __a) { return __nv_float2ll_rz(__a); } +__DEVICE__ unsigned int __float2uint_rd(float __a) { + return __nv_float2uint_rd(__a); +} +__DEVICE__ unsigned int __float2uint_rn(float __a) { + return __nv_float2uint_rn(__a); +} +__DEVICE__ unsigned int __float2uint_ru(float __a) { + return __nv_float2uint_ru(__a); +} +__DEVICE__ unsigned int __float2uint_rz(float __a) { + return __nv_float2uint_rz(__a); +} +__DEVICE__ unsigned long long __float2ull_rd(float __a) { + return __nv_float2ull_rd(__a); +} +__DEVICE__ unsigned long long __float2ull_rn(float __a) { + return __nv_float2ull_rn(__a); +} +__DEVICE__ unsigned long long __float2ull_ru(float __a) { + return __nv_float2ull_ru(__a); +} +__DEVICE__ unsigned long long __float2ull_rz(float __a) { + return __nv_float2ull_rz(__a); +} +__DEVICE__ int __float_as_int(float __a) { return __nv_float_as_int(__a); } +__DEVICE__ unsigned int __float_as_uint(float __a) { + return __nv_float_as_uint(__a); +} +__DEVICE__ double __fma_rd(double __a, double __b, double __c) { + return __nv_fma_rd(__a, __b, __c); +} +__DEVICE__ double __fma_rn(double __a, double __b, double __c) { + return __nv_fma_rn(__a, __b, __c); +} +__DEVICE__ double __fma_ru(double __a, double __b, double __c) { + return __nv_fma_ru(__a, __b, __c); +} +__DEVICE__ double __fma_rz(double __a, double __b, double __c) { + return __nv_fma_rz(__a, __b, __c); +} +__DEVICE__ float __fmaf_ieee_rd(float __a, float __b, float __c) { + return __nv_fmaf_ieee_rd(__a, __b, __c); +} +__DEVICE__ float __fmaf_ieee_rn(float __a, float __b, float __c) { + return __nv_fmaf_ieee_rn(__a, __b, __c); +} +__DEVICE__ float __fmaf_ieee_ru(float __a, float __b, float __c) { + return __nv_fmaf_ieee_ru(__a, __b, __c); +} +__DEVICE__ float __fmaf_ieee_rz(float __a, float __b, float __c) { + return __nv_fmaf_ieee_rz(__a, __b, __c); +} +__DEVICE__ float __fmaf_rd(float __a, float __b, float __c) { + return __nv_fmaf_rd(__a, __b, __c); +} +__DEVICE__ float __fmaf_rn(float __a, float __b, float __c) { + return __nv_fmaf_rn(__a, __b, __c); +} +__DEVICE__ float __fmaf_ru(float __a, float __b, float __c) { + return __nv_fmaf_ru(__a, __b, __c); +} +__DEVICE__ float __fmaf_rz(float __a, float __b, float __c) { + return __nv_fmaf_rz(__a, __b, __c); +} +__DEVICE__ float __fmul_rd(float __a, float __b) { + return __nv_fmul_rd(__a, __b); +} +__DEVICE__ float __fmul_rn(float __a, float __b) { + return __nv_fmul_rn(__a, __b); +} +__DEVICE__ float __fmul_ru(float __a, float __b) { + return __nv_fmul_ru(__a, __b); +} +__DEVICE__ float __fmul_rz(float __a, float __b) { + return __nv_fmul_rz(__a, __b); +} +__DEVICE__ float __frcp_rd(float __a) { return __nv_frcp_rd(__a); } +__DEVICE__ float __frcp_rn(float __a) { return __nv_frcp_rn(__a); } +__DEVICE__ float __frcp_ru(float __a) { return __nv_frcp_ru(__a); } +__DEVICE__ float __frcp_rz(float __a) { return __nv_frcp_rz(__a); } +__DEVICE__ float __frsqrt_rn(float __a) { return __nv_frsqrt_rn(__a); } +__DEVICE__ float __fsqrt_rd(float __a) { return __nv_fsqrt_rd(__a); } +__DEVICE__ float __fsqrt_rn(float __a) { return __nv_fsqrt_rn(__a); } +__DEVICE__ float __fsqrt_ru(float __a) { return __nv_fsqrt_ru(__a); } +__DEVICE__ float __fsqrt_rz(float __a) { return __nv_fsqrt_rz(__a); } +__DEVICE__ float __fsub_rd(float __a, float __b) { + return __nv_fsub_rd(__a, __b); +} +__DEVICE__ float __fsub_rn(float __a, float __b) { + return __nv_fsub_rn(__a, __b); +} +__DEVICE__ float __fsub_ru(float __a, float __b) { + return __nv_fsub_ru(__a, __b); +} +__DEVICE__ float __fsub_rz(float __a, float __b) { + return __nv_fsub_rz(__a, __b); +} +__DEVICE__ int __hadd(int __a, int __b) { return __nv_hadd(__a, __b); } +__DEVICE__ double __hiloint2double(int __a, int __b) { + return __nv_hiloint2double(__a, __b); +} +__DEVICE__ int __iAtomicAdd(int *__p, int __v) { + return __nvvm_atom_add_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicAdd_block(int *__p, int __v) { + return __nvvm_atom_cta_add_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicAdd_system(int *__p, int __v) { + return __nvvm_atom_sys_add_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicAnd(int *__p, int __v) { + return __nvvm_atom_and_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicAnd_block(int *__p, int __v) { + return __nvvm_atom_cta_and_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicAnd_system(int *__p, int __v) { + return __nvvm_atom_sys_and_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicCAS(int *__p, int __cmp, int __v) { + return __nvvm_atom_cas_gen_i(__p, __cmp, __v); +} +__DEVICE__ int __iAtomicCAS_block(int *__p, int __cmp, int __v) { + return __nvvm_atom_cta_cas_gen_i(__p, __cmp, __v); +} +__DEVICE__ int __iAtomicCAS_system(int *__p, int __cmp, int __v) { + return __nvvm_atom_sys_cas_gen_i(__p, __cmp, __v); +} +__DEVICE__ int __iAtomicExch(int *__p, int __v) { + return __nvvm_atom_xchg_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicExch_block(int *__p, int __v) { + return __nvvm_atom_cta_xchg_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicExch_system(int *__p, int __v) { + return __nvvm_atom_sys_xchg_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicMax(int *__p, int __v) { + return __nvvm_atom_max_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicMax_block(int *__p, int __v) { + return __nvvm_atom_cta_max_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicMax_system(int *__p, int __v) { + return __nvvm_atom_sys_max_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicMin(int *__p, int __v) { + return __nvvm_atom_min_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicMin_block(int *__p, int __v) { + return __nvvm_atom_cta_min_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicMin_system(int *__p, int __v) { + return __nvvm_atom_sys_min_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicOr(int *__p, int __v) { + return __nvvm_atom_or_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicOr_block(int *__p, int __v) { + return __nvvm_atom_cta_or_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicOr_system(int *__p, int __v) { + return __nvvm_atom_sys_or_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicXor(int *__p, int __v) { + return __nvvm_atom_xor_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicXor_block(int *__p, int __v) { + return __nvvm_atom_cta_xor_gen_i(__p, __v); +} +__DEVICE__ int __iAtomicXor_system(int *__p, int __v) { + return __nvvm_atom_sys_xor_gen_i(__p, __v); +} +__DEVICE__ long long __illAtomicMax(long long *__p, long long __v) { + return __nvvm_atom_max_gen_ll(__p, __v); +} +__DEVICE__ long long __illAtomicMax_block(long long *__p, long long __v) { + return __nvvm_atom_cta_max_gen_ll(__p, __v); +} +__DEVICE__ long long __illAtomicMax_system(long long *__p, long long __v) { + return __nvvm_atom_sys_max_gen_ll(__p, __v); +} +__DEVICE__ long long __illAtomicMin(long long *__p, long long __v) { + return __nvvm_atom_min_gen_ll(__p, __v); +} +__DEVICE__ long long __illAtomicMin_block(long long *__p, long long __v) { + return __nvvm_atom_cta_min_gen_ll(__p, __v); +} +__DEVICE__ long long __illAtomicMin_system(long long *__p, long long __v) { + return __nvvm_atom_sys_min_gen_ll(__p, __v); +} +__DEVICE__ double __int2double_rn(int __a) { return __nv_int2double_rn(__a); } +__DEVICE__ float __int2float_rd(int __a) { return __nv_int2float_rd(__a); } +__DEVICE__ float __int2float_rn(int __a) { return __nv_int2float_rn(__a); } +__DEVICE__ float __int2float_ru(int __a) { return __nv_int2float_ru(__a); } +__DEVICE__ float __int2float_rz(int __a) { return __nv_int2float_rz(__a); } +__DEVICE__ float __int_as_float(int __a) { return __nv_int_as_float(__a); } +__DEVICE__ int __isfinited(double __a) { return __nv_isfinited(__a); } +__DEVICE__ int __isinf(double __a) { return __nv_isinfd(__a); } +__DEVICE__ int __isinff(float __a) { return __nv_isinff(__a); } +#ifdef _MSC_VER +__DEVICE__ int __isinfl(long double __a); +#endif +__DEVICE__ int __isnan(double __a) { return __nv_isnand(__a); } +__DEVICE__ int __isnanf(float __a) { return __nv_isnanf(__a); } +#ifdef _MSC_VER +__DEVICE__ int __isnanl(long double __a); +#endif +__DEVICE__ double __ll2double_rd(long long __a) { + return __nv_ll2double_rd(__a); +} +__DEVICE__ double __ll2double_rn(long long __a) { + return __nv_ll2double_rn(__a); +} +__DEVICE__ double __ll2double_ru(long long __a) { + return __nv_ll2double_ru(__a); +} +__DEVICE__ double __ll2double_rz(long long __a) { + return __nv_ll2double_rz(__a); +} +__DEVICE__ float __ll2float_rd(long long __a) { return __nv_ll2float_rd(__a); } +__DEVICE__ float __ll2float_rn(long long __a) { return __nv_ll2float_rn(__a); } +__DEVICE__ float __ll2float_ru(long long __a) { return __nv_ll2float_ru(__a); } +__DEVICE__ float __ll2float_rz(long long __a) { return __nv_ll2float_rz(__a); } +__DEVICE__ long long __llAtomicAnd(long long *__p, long long __v) { + return __nvvm_atom_and_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicAnd_block(long long *__p, long long __v) { + return __nvvm_atom_cta_and_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicAnd_system(long long *__p, long long __v) { + return __nvvm_atom_sys_and_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicOr(long long *__p, long long __v) { + return __nvvm_atom_or_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicOr_block(long long *__p, long long __v) { + return __nvvm_atom_cta_or_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicOr_system(long long *__p, long long __v) { + return __nvvm_atom_sys_or_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicXor(long long *__p, long long __v) { + return __nvvm_atom_xor_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicXor_block(long long *__p, long long __v) { + return __nvvm_atom_cta_xor_gen_ll(__p, __v); +} +__DEVICE__ long long __llAtomicXor_system(long long *__p, long long __v) { + return __nvvm_atom_sys_xor_gen_ll(__p, __v); +} +__DEVICE__ float __log10f(float __a) { return __nv_fast_log10f(__a); } +__DEVICE__ float __log2f(float __a) { return __nv_fast_log2f(__a); } +__DEVICE__ float __logf(float __a) { return __nv_fast_logf(__a); } +__DEVICE__ double __longlong_as_double(long long __a) { + return __nv_longlong_as_double(__a); +} +__DEVICE__ int __mul24(int __a, int __b) { return __nv_mul24(__a, __b); } +__DEVICE__ long long __mul64hi(long long __a, long long __b) { + return __nv_mul64hi(__a, __b); +} +__DEVICE__ int __mulhi(int __a, int __b) { return __nv_mulhi(__a, __b); } +__DEVICE__ unsigned int __pm0(void) { return __nvvm_read_ptx_sreg_pm0(); } +__DEVICE__ unsigned int __pm1(void) { return __nvvm_read_ptx_sreg_pm1(); } +__DEVICE__ unsigned int __pm2(void) { return __nvvm_read_ptx_sreg_pm2(); } +__DEVICE__ unsigned int __pm3(void) { return __nvvm_read_ptx_sreg_pm3(); } +__DEVICE__ int __popc(int __a) { return __nv_popc(__a); } +__DEVICE__ int __popcll(long long __a) { return __nv_popcll(__a); } +__DEVICE__ float __powf(float __a, float __b) { + return __nv_fast_powf(__a, __b); +} + +// Parameter must have a known integer value. +#define __prof_trigger(__a) __asm__ __volatile__("pmevent \t%0;" ::"i"(__a)) +__DEVICE__ int __rhadd(int __a, int __b) { return __nv_rhadd(__a, __b); } +__DEVICE__ unsigned int __sad(int __a, int __b, unsigned int __c) { + return __nv_sad(__a, __b, __c); +} +__DEVICE__ float __saturatef(float __a) { return __nv_saturatef(__a); } +__DEVICE__ int __signbitd(double __a) { return __nv_signbitd(__a); } +__DEVICE__ int __signbitf(float __a) { return __nv_signbitf(__a); } +__DEVICE__ void __sincosf(float __a, float *__s, float *__c) { + return __nv_fast_sincosf(__a, __s, __c); +} +__DEVICE__ float __sinf(float __a) { return __nv_fast_sinf(__a); } +__DEVICE__ int __syncthreads_and(int __a) { return __nvvm_bar0_and(__a); } +__DEVICE__ int __syncthreads_count(int __a) { return __nvvm_bar0_popc(__a); } +__DEVICE__ int __syncthreads_or(int __a) { return __nvvm_bar0_or(__a); } +__DEVICE__ float __tanf(float __a) { return __nv_fast_tanf(__a); } +__DEVICE__ void __threadfence(void) { __nvvm_membar_gl(); } +__DEVICE__ void __threadfence_block(void) { __nvvm_membar_cta(); }; +__DEVICE__ void __threadfence_system(void) { __nvvm_membar_sys(); }; +__DEVICE__ void __trap(void) { __asm__ __volatile__("trap;"); } +__DEVICE__ unsigned int __uAtomicAdd(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_add_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicAdd_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_add_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicAdd_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_add_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicAnd(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_and_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicAnd_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_and_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicAnd_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_and_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicCAS(unsigned int *__p, unsigned int __cmp, + unsigned int __v) { + return __nvvm_atom_cas_gen_i((int *)__p, __cmp, __v); +} +__DEVICE__ unsigned int +__uAtomicCAS_block(unsigned int *__p, unsigned int __cmp, unsigned int __v) { + return __nvvm_atom_cta_cas_gen_i((int *)__p, __cmp, __v); +} +__DEVICE__ unsigned int +__uAtomicCAS_system(unsigned int *__p, unsigned int __cmp, unsigned int __v) { + return __nvvm_atom_sys_cas_gen_i((int *)__p, __cmp, __v); +} +__DEVICE__ unsigned int __uAtomicDec(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_dec_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicDec_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_dec_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicDec_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_dec_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicExch(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_xchg_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicExch_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_xchg_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicExch_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_xchg_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicInc(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_inc_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicInc_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_inc_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicInc_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_inc_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicMax(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_max_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicMax_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_max_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicMax_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_max_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicMin(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_min_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicMin_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_min_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicMin_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_min_gen_ui(__p, __v); +} +__DEVICE__ unsigned int __uAtomicOr(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_or_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicOr_block(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_cta_or_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicOr_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_or_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicXor(unsigned int *__p, unsigned int __v) { + return __nvvm_atom_xor_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicXor_block(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_cta_xor_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uAtomicXor_system(unsigned int *__p, + unsigned int __v) { + return __nvvm_atom_sys_xor_gen_i((int *)__p, __v); +} +__DEVICE__ unsigned int __uhadd(unsigned int __a, unsigned int __b) { + return __nv_uhadd(__a, __b); +} +__DEVICE__ double __uint2double_rn(unsigned int __a) { + return __nv_uint2double_rn(__a); +} +__DEVICE__ float __uint2float_rd(unsigned int __a) { + return __nv_uint2float_rd(__a); +} +__DEVICE__ float __uint2float_rn(unsigned int __a) { + return __nv_uint2float_rn(__a); +} +__DEVICE__ float __uint2float_ru(unsigned int __a) { + return __nv_uint2float_ru(__a); +} +__DEVICE__ float __uint2float_rz(unsigned int __a) { + return __nv_uint2float_rz(__a); +} +__DEVICE__ float __uint_as_float(unsigned int __a) { + return __nv_uint_as_float(__a); +} // +__DEVICE__ double __ull2double_rd(unsigned long long __a) { + return __nv_ull2double_rd(__a); +} +__DEVICE__ double __ull2double_rn(unsigned long long __a) { + return __nv_ull2double_rn(__a); +} +__DEVICE__ double __ull2double_ru(unsigned long long __a) { + return __nv_ull2double_ru(__a); +} +__DEVICE__ double __ull2double_rz(unsigned long long __a) { + return __nv_ull2double_rz(__a); +} +__DEVICE__ float __ull2float_rd(unsigned long long __a) { + return __nv_ull2float_rd(__a); +} +__DEVICE__ float __ull2float_rn(unsigned long long __a) { + return __nv_ull2float_rn(__a); +} +__DEVICE__ float __ull2float_ru(unsigned long long __a) { + return __nv_ull2float_ru(__a); +} +__DEVICE__ float __ull2float_rz(unsigned long long __a) { + return __nv_ull2float_rz(__a); +} +__DEVICE__ unsigned long long __ullAtomicAdd(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_add_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicAdd_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_add_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicAdd_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_add_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicAnd(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_and_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicAnd_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_and_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicAnd_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_and_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicCAS(unsigned long long *__p, + unsigned long long __cmp, + unsigned long long __v) { + return __nvvm_atom_cas_gen_ll((long long *)__p, __cmp, __v); +} +__DEVICE__ unsigned long long __ullAtomicCAS_block(unsigned long long *__p, + unsigned long long __cmp, + unsigned long long __v) { + return __nvvm_atom_cta_cas_gen_ll((long long *)__p, __cmp, __v); +} +__DEVICE__ unsigned long long __ullAtomicCAS_system(unsigned long long *__p, + unsigned long long __cmp, + unsigned long long __v) { + return __nvvm_atom_sys_cas_gen_ll((long long *)__p, __cmp, __v); +} +__DEVICE__ unsigned long long __ullAtomicExch(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_xchg_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicExch_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_xchg_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicExch_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_xchg_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicMax(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_max_gen_ull(__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicMax_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_max_gen_ull(__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicMax_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_max_gen_ull(__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicMin(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_min_gen_ull(__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicMin_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_min_gen_ull(__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicMin_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_min_gen_ull(__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicOr(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_or_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicOr_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_or_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicOr_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_or_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicXor(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_xor_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicXor_block(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_cta_xor_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned long long __ullAtomicXor_system(unsigned long long *__p, + unsigned long long __v) { + return __nvvm_atom_sys_xor_gen_ll((long long *)__p, __v); +} +__DEVICE__ unsigned int __umul24(unsigned int __a, unsigned int __b) { + return __nv_umul24(__a, __b); +} +__DEVICE__ unsigned long long __umul64hi(unsigned long long __a, + unsigned long long __b) { + return __nv_umul64hi(__a, __b); +} +__DEVICE__ unsigned int __umulhi(unsigned int __a, unsigned int __b) { + return __nv_umulhi(__a, __b); +} +__DEVICE__ unsigned int __urhadd(unsigned int __a, unsigned int __b) { + return __nv_urhadd(__a, __b); +} +__DEVICE__ unsigned int __usad(unsigned int __a, unsigned int __b, + unsigned int __c) { + return __nv_usad(__a, __b, __c); +} + +#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020 +__DEVICE__ unsigned int __vabs2(unsigned int __a) { return __nv_vabs2(__a); } +__DEVICE__ unsigned int __vabs4(unsigned int __a) { return __nv_vabs4(__a); } +__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) { + return __nv_vabsdiffs2(__a, __b); +} +__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) { + return __nv_vabsdiffs4(__a, __b); +} +__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) { + return __nv_vabsdiffu2(__a, __b); +} +__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) { + return __nv_vabsdiffu4(__a, __b); +} +__DEVICE__ unsigned int __vabsss2(unsigned int __a) { + return __nv_vabsss2(__a); +} +__DEVICE__ unsigned int __vabsss4(unsigned int __a) { + return __nv_vabsss4(__a); +} +__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) { + return __nv_vadd2(__a, __b); +} +__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) { + return __nv_vadd4(__a, __b); +} +__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) { + return __nv_vaddss2(__a, __b); +} +__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) { + return __nv_vaddss4(__a, __b); +} +__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) { + return __nv_vaddus2(__a, __b); +} +__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) { + return __nv_vaddus4(__a, __b); +} +__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) { + return __nv_vavgs2(__a, __b); +} +__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) { + return __nv_vavgs4(__a, __b); +} +__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) { + return __nv_vavgu2(__a, __b); +} +__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) { + return __nv_vavgu4(__a, __b); +} +__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) { + return __nv_vcmpeq2(__a, __b); +} +__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) { + return __nv_vcmpeq4(__a, __b); +} +__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) { + return __nv_vcmpges2(__a, __b); +} +__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) { + return __nv_vcmpges4(__a, __b); +} +__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) { + return __nv_vcmpgeu2(__a, __b); +} +__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) { + return __nv_vcmpgeu4(__a, __b); +} +__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) { + return __nv_vcmpgts2(__a, __b); +} +__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) { + return __nv_vcmpgts4(__a, __b); +} +__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) { + return __nv_vcmpgtu2(__a, __b); +} +__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) { + return __nv_vcmpgtu4(__a, __b); +} +__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) { + return __nv_vcmples2(__a, __b); +} +__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) { + return __nv_vcmples4(__a, __b); +} +__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) { + return __nv_vcmpleu2(__a, __b); +} +__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) { + return __nv_vcmpleu4(__a, __b); +} +__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) { + return __nv_vcmplts2(__a, __b); +} +__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) { + return __nv_vcmplts4(__a, __b); +} +__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) { + return __nv_vcmpltu2(__a, __b); +} +__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) { + return __nv_vcmpltu4(__a, __b); +} +__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) { + return __nv_vcmpne2(__a, __b); +} +__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) { + return __nv_vcmpne4(__a, __b); +} +__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) { + return __nv_vhaddu2(__a, __b); +} +__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) { + return __nv_vhaddu4(__a, __b); +} +__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) { + return __nv_vmaxs2(__a, __b); +} +__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) { + return __nv_vmaxs4(__a, __b); +} +__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) { + return __nv_vmaxu2(__a, __b); +} +__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) { + return __nv_vmaxu4(__a, __b); +} +__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) { + return __nv_vmins2(__a, __b); +} +__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) { + return __nv_vmins4(__a, __b); +} +__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) { + return __nv_vminu2(__a, __b); +} +__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) { + return __nv_vminu4(__a, __b); +} +__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __nv_vneg2(__a); } +__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __nv_vneg4(__a); } +__DEVICE__ unsigned int __vnegss2(unsigned int __a) { + return __nv_vnegss2(__a); +} +__DEVICE__ unsigned int __vnegss4(unsigned int __a) { + return __nv_vnegss4(__a); +} +__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) { + return __nv_vsads2(__a, __b); +} +__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) { + return __nv_vsads4(__a, __b); +} +__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) { + return __nv_vsadu2(__a, __b); +} +__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) { + return __nv_vsadu4(__a, __b); +} +__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) { + return __nv_vseteq2(__a, __b); +} +__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) { + return __nv_vseteq4(__a, __b); +} +__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) { + return __nv_vsetges2(__a, __b); +} +__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) { + return __nv_vsetges4(__a, __b); +} +__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) { + return __nv_vsetgeu2(__a, __b); +} +__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) { + return __nv_vsetgeu4(__a, __b); +} +__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) { + return __nv_vsetgts2(__a, __b); +} +__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) { + return __nv_vsetgts4(__a, __b); +} +__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) { + return __nv_vsetgtu2(__a, __b); +} +__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) { + return __nv_vsetgtu4(__a, __b); +} +__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) { + return __nv_vsetles2(__a, __b); +} +__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) { + return __nv_vsetles4(__a, __b); +} +__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) { + return __nv_vsetleu2(__a, __b); +} +__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) { + return __nv_vsetleu4(__a, __b); +} +__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) { + return __nv_vsetlts2(__a, __b); +} +__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) { + return __nv_vsetlts4(__a, __b); +} +__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) { + return __nv_vsetltu2(__a, __b); +} +__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) { + return __nv_vsetltu4(__a, __b); +} +__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) { + return __nv_vsetne2(__a, __b); +} +__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) { + return __nv_vsetne4(__a, __b); +} +__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) { + return __nv_vsub2(__a, __b); +} +__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) { + return __nv_vsub4(__a, __b); +} +__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) { + return __nv_vsubss2(__a, __b); +} +__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) { + return __nv_vsubss4(__a, __b); +} +__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) { + return __nv_vsubus2(__a, __b); +} +__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) { + return __nv_vsubus4(__a, __b); +} +#else // CUDA_VERSION >= 9020 +// CUDA no longer provides inline assembly (or bitcode) implementation of these +// functions, so we have to reimplment them. The implementation is naive and is +// not optimized for performance. + +// Helper function to convert N-bit boolean subfields into all-0 or all-1. +// E.g. __bool2mask(0x01000100,8) -> 0xff00ff00 +// __bool2mask(0x00010000,16) -> 0xffff0000 +__DEVICE__ unsigned int __bool2mask(unsigned int __a, int shift) { + return (__a << shift) - __a; +} +__DEVICE__ unsigned int __vabs2(unsigned int __a) { + unsigned int r; + __asm__("vabsdiff2.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(0), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vabs4(unsigned int __a) { + unsigned int r; + __asm__("vabsdiff4.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(0), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff2.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} + +__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff4.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff2.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff4.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vabsss2(unsigned int __a) { + unsigned int r; + __asm__("vabsdiff2.s32.s32.s32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(0), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vabsss4(unsigned int __a) { + unsigned int r; + __asm__("vabsdiff4.s32.s32.s32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(0), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vadd2.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vadd4.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vadd2.s32.s32.s32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vadd4.s32.s32.s32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vadd2.u32.u32.u32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vadd4.u32.u32.u32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vavrg2.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vavrg4.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vavrg2.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vavrg4.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.u32.u32.eq %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vseteq2(__a, __b), 16); +} +__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.u32.u32.eq %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vseteq4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.s32.s32.ge %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetges2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.s32.s32.ge %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetges4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.u32.u32.ge %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetgeu2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.u32.u32.ge %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetgeu4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.s32.s32.gt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetgts2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.s32.s32.gt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetgts4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.u32.u32.gt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetgtu2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.u32.u32.gt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetgtu4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.s32.s32.le %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetles2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.s32.s32.le %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetles4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.u32.u32.le %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetleu2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.u32.u32.le %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetleu4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.s32.s32.lt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetlts2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.s32.s32.lt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetlts4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.u32.u32.lt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetltu2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.u32.u32.lt %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetltu4(__a, __b), 8); +} +__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset2.u32.u32.ne %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetne2(__a, __b), 16); +} +__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vset4.u32.u32.ne %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) { + return __bool2mask(__vsetne4(__a, __b), 8); +} + +// Based on ITEM 23 in AIM-239: http://dspace.mit.edu/handle/1721.1/6086 +// (a & b) + (a | b) = a + b = (a ^ b) + 2 * (a & b) => +// (a + b) / 2 = ((a ^ b) >> 1) + (a & b) +// To operate on multiple sub-elements we need to make sure to mask out bits +// that crossed over into adjacent elements during the shift. +__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) { + return (((__a ^ __b) >> 1) & ~0x80008000u) + (__a & __b); +} +__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) { + return (((__a ^ __b) >> 1) & ~0x80808080u) + (__a & __b); +} + +__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) { + unsigned int r; + if ((__a & 0x8000) && (__b & 0x8000)) { + // Work around a bug in ptxas which produces invalid result if low element + // is negative. + unsigned mask = __vcmpgts2(__a, __b); + r = (__a & mask) | (__b & ~mask); + } else { + __asm__("vmax2.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + } + return r; +} +__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmax4.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmax2.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmax4.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmin2.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmin4.s32.s32.s32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmin2.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vmin4.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff2.s32.s32.s32.add %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff4.s32.s32.s32.add %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff2.u32.u32.u32.add %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vabsdiff4.u32.u32.u32.add %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} + +__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vsub2.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __vsub2(0, __a); } + +__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vsub4.u32.u32.u32 %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __vsub4(0, __a); } +__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vsub2.s32.s32.s32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vnegss2(unsigned int __a) { + return __vsubss2(0, __a); +} +__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vsub4.s32.s32.s32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vnegss4(unsigned int __a) { + return __vsubss4(0, __a); +} +__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vsub2.u32.u32.u32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) { + unsigned int r; + __asm__("vsub4.u32.u32.u32.sat %0,%1,%2,%3;" + : "=r"(r) + : "r"(__a), "r"(__b), "r"(0)); + return r; +} +#endif // CUDA_VERSION >= 9020 + +// For OpenMP we require the user to include as we need to know what +// clock_t is on the system. +#ifndef __OPENMP_NVPTX__ +__DEVICE__ /* clock_t= */ int clock() { return __nvvm_read_ptx_sreg_clock(); } +#endif +__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); } + +// These functions shouldn't be declared when including this header +// for math function resolution purposes. +#ifndef __OPENMP_NVPTX__ +__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) { + return __builtin_memcpy(__a, __b, __c); +} +__DEVICE__ void *memset(void *__a, int __b, size_t __c) { + return __builtin_memset(__a, __b, __c); +} +#endif + +#pragma pop_macro("__DEVICE__") +#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_intrinsics.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_intrinsics.h new file mode 100644 index 0000000..c7bff6a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_intrinsics.h @@ -0,0 +1,486 @@ +/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_CUDA_INTRINSICS_H__ +#define __CLANG_CUDA_INTRINSICS_H__ +#ifndef __CUDA__ +#error "This file is for CUDA compilation only." +#endif + +// sm_30 intrinsics: __shfl_{up,down,xor}. + +#define __SM_30_INTRINSICS_H__ +#define __SM_30_INTRINSICS_HPP__ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +#pragma push_macro("__MAKE_SHUFFLES") +#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, \ + __Type) \ + inline __device__ int __FnName(int __val, __Type __offset, \ + int __width = warpSize) { \ + return __IntIntrinsic(__val, __offset, \ + ((warpSize - __width) << 8) | (__Mask)); \ + } \ + inline __device__ float __FnName(float __val, __Type __offset, \ + int __width = warpSize) { \ + return __FloatIntrinsic(__val, __offset, \ + ((warpSize - __width) << 8) | (__Mask)); \ + } \ + inline __device__ unsigned int __FnName(unsigned int __val, __Type __offset, \ + int __width = warpSize) { \ + return static_cast( \ + ::__FnName(static_cast(__val), __offset, __width)); \ + } \ + inline __device__ long long __FnName(long long __val, __Type __offset, \ + int __width = warpSize) { \ + struct __Bits { \ + int __a, __b; \ + }; \ + _Static_assert(sizeof(__val) == sizeof(__Bits)); \ + _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \ + __Bits __tmp; \ + memcpy(&__tmp, &__val, sizeof(__val)); \ + __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \ + __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \ + long long __ret; \ + memcpy(&__ret, &__tmp, sizeof(__tmp)); \ + return __ret; \ + } \ + inline __device__ long __FnName(long __val, __Type __offset, \ + int __width = warpSize) { \ + _Static_assert(sizeof(long) == sizeof(long long) || \ + sizeof(long) == sizeof(int)); \ + if (sizeof(long) == sizeof(long long)) { \ + return static_cast( \ + ::__FnName(static_cast(__val), __offset, __width)); \ + } else if (sizeof(long) == sizeof(int)) { \ + return static_cast( \ + ::__FnName(static_cast(__val), __offset, __width)); \ + } \ + } \ + inline __device__ unsigned long __FnName( \ + unsigned long __val, __Type __offset, int __width = warpSize) { \ + return static_cast( \ + ::__FnName(static_cast(__val), __offset, __width)); \ + } \ + inline __device__ unsigned long long __FnName( \ + unsigned long long __val, __Type __offset, int __width = warpSize) { \ + return static_cast(::__FnName( \ + static_cast(__val), __offset, __width)); \ + } \ + inline __device__ double __FnName(double __val, __Type __offset, \ + int __width = warpSize) { \ + long long __tmp; \ + _Static_assert(sizeof(__tmp) == sizeof(__val)); \ + memcpy(&__tmp, &__val, sizeof(__val)); \ + __tmp = ::__FnName(__tmp, __offset, __width); \ + double __ret; \ + memcpy(&__ret, &__tmp, sizeof(__ret)); \ + return __ret; \ + } + +__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f, int); +// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >= +// maxLane. +__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0, + unsigned int); +__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f, + unsigned int); +__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f, + int); +#pragma pop_macro("__MAKE_SHUFFLES") + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +#if CUDA_VERSION >= 9000 +#if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300) +// __shfl_sync_* variants available in CUDA-9 +#pragma push_macro("__MAKE_SYNC_SHUFFLES") +#define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \ + __Mask, __Type) \ + inline __device__ int __FnName(unsigned int __mask, int __val, \ + __Type __offset, int __width = warpSize) { \ + return __IntIntrinsic(__mask, __val, __offset, \ + ((warpSize - __width) << 8) | (__Mask)); \ + } \ + inline __device__ float __FnName(unsigned int __mask, float __val, \ + __Type __offset, int __width = warpSize) { \ + return __FloatIntrinsic(__mask, __val, __offset, \ + ((warpSize - __width) << 8) | (__Mask)); \ + } \ + inline __device__ unsigned int __FnName(unsigned int __mask, \ + unsigned int __val, __Type __offset, \ + int __width = warpSize) { \ + return static_cast( \ + ::__FnName(__mask, static_cast(__val), __offset, __width)); \ + } \ + inline __device__ long long __FnName(unsigned int __mask, long long __val, \ + __Type __offset, \ + int __width = warpSize) { \ + struct __Bits { \ + int __a, __b; \ + }; \ + _Static_assert(sizeof(__val) == sizeof(__Bits)); \ + _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \ + __Bits __tmp; \ + memcpy(&__tmp, &__val, sizeof(__val)); \ + __tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width); \ + __tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width); \ + long long __ret; \ + memcpy(&__ret, &__tmp, sizeof(__tmp)); \ + return __ret; \ + } \ + inline __device__ unsigned long long __FnName( \ + unsigned int __mask, unsigned long long __val, __Type __offset, \ + int __width = warpSize) { \ + return static_cast(::__FnName( \ + __mask, static_cast(__val), __offset, __width)); \ + } \ + inline __device__ long __FnName(unsigned int __mask, long __val, \ + __Type __offset, int __width = warpSize) { \ + _Static_assert(sizeof(long) == sizeof(long long) || \ + sizeof(long) == sizeof(int)); \ + if (sizeof(long) == sizeof(long long)) { \ + return static_cast(::__FnName( \ + __mask, static_cast(__val), __offset, __width)); \ + } else if (sizeof(long) == sizeof(int)) { \ + return static_cast( \ + ::__FnName(__mask, static_cast(__val), __offset, __width)); \ + } \ + } \ + inline __device__ unsigned long __FnName( \ + unsigned int __mask, unsigned long __val, __Type __offset, \ + int __width = warpSize) { \ + return static_cast( \ + ::__FnName(__mask, static_cast(__val), __offset, __width)); \ + } \ + inline __device__ double __FnName(unsigned int __mask, double __val, \ + __Type __offset, int __width = warpSize) { \ + long long __tmp; \ + _Static_assert(sizeof(__tmp) == sizeof(__val)); \ + memcpy(&__tmp, &__val, sizeof(__val)); \ + __tmp = ::__FnName(__mask, __tmp, __offset, __width); \ + double __ret; \ + memcpy(&__ret, &__tmp, sizeof(__ret)); \ + return __ret; \ + } +__MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32, + __nvvm_shfl_sync_idx_f32, 0x1f, int); +// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >= +// maxLane. +__MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32, + __nvvm_shfl_sync_up_f32, 0, unsigned int); +__MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32, + __nvvm_shfl_sync_down_f32, 0x1f, unsigned int); +__MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32, + __nvvm_shfl_sync_bfly_f32, 0x1f, int); +#pragma pop_macro("__MAKE_SYNC_SHUFFLES") + +inline __device__ void __syncwarp(unsigned int mask = 0xffffffff) { + return __nvvm_bar_warp_sync(mask); +} + +inline __device__ void __barrier_sync(unsigned int id) { + __nvvm_barrier_sync(id); +} + +inline __device__ void __barrier_sync_count(unsigned int id, + unsigned int count) { + __nvvm_barrier_sync_cnt(id, count); +} + +inline __device__ int __all_sync(unsigned int mask, int pred) { + return __nvvm_vote_all_sync(mask, pred); +} + +inline __device__ int __any_sync(unsigned int mask, int pred) { + return __nvvm_vote_any_sync(mask, pred); +} + +inline __device__ int __uni_sync(unsigned int mask, int pred) { + return __nvvm_vote_uni_sync(mask, pred); +} + +inline __device__ unsigned int __ballot_sync(unsigned int mask, int pred) { + return __nvvm_vote_ballot_sync(mask, pred); +} + +inline __device__ unsigned int __activemask() { +#if CUDA_VERSION < 9020 + return __nvvm_vote_ballot(1); +#else + unsigned int mask; + asm volatile("activemask.b32 %0;" : "=r"(mask)); + return mask; +#endif +} + +inline __device__ unsigned int __fns(unsigned mask, unsigned base, int offset) { + return __nvvm_fns(mask, base, offset); +} + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +// Define __match* builtins CUDA-9 headers expect to see. +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 +inline __device__ unsigned int __match32_any_sync(unsigned int mask, + unsigned int value) { + return __nvvm_match_any_sync_i32(mask, value); +} + +inline __device__ unsigned long long +__match64_any_sync(unsigned int mask, unsigned long long value) { + return __nvvm_match_any_sync_i64(mask, value); +} + +inline __device__ unsigned int +__match32_all_sync(unsigned int mask, unsigned int value, int *pred) { + return __nvvm_match_all_sync_i32p(mask, value, pred); +} + +inline __device__ unsigned long long +__match64_all_sync(unsigned int mask, unsigned long long value, int *pred) { + return __nvvm_match_all_sync_i64p(mask, value, pred); +} +#include "crt/sm_70_rt.hpp" + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 +#endif // __CUDA_VERSION >= 9000 + +// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}. + +// Prevent the vanilla sm_32 intrinsics header from being included. +#define __SM_32_INTRINSICS_H__ +#define __SM_32_INTRINSICS_HPP__ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); } +inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); } +inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); } +inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); } +inline __device__ long long __ldg(const long long *ptr) { + return __nvvm_ldg_ll(ptr); +} +inline __device__ unsigned char __ldg(const unsigned char *ptr) { + return __nvvm_ldg_uc(ptr); +} +inline __device__ signed char __ldg(const signed char *ptr) { + return __nvvm_ldg_uc((const unsigned char *)ptr); +} +inline __device__ unsigned short __ldg(const unsigned short *ptr) { + return __nvvm_ldg_us(ptr); +} +inline __device__ unsigned int __ldg(const unsigned int *ptr) { + return __nvvm_ldg_ui(ptr); +} +inline __device__ unsigned long __ldg(const unsigned long *ptr) { + return __nvvm_ldg_ul(ptr); +} +inline __device__ unsigned long long __ldg(const unsigned long long *ptr) { + return __nvvm_ldg_ull(ptr); +} +inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); } +inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); } + +inline __device__ char2 __ldg(const char2 *ptr) { + typedef char c2 __attribute__((ext_vector_type(2))); + // We can assume that ptr is aligned at least to char2's alignment, but the + // load will assume that ptr is aligned to char2's alignment. This is only + // safe if alignof(c2) <= alignof(char2). + c2 rv = __nvvm_ldg_c2(reinterpret_cast(ptr)); + char2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ char4 __ldg(const char4 *ptr) { + typedef char c4 __attribute__((ext_vector_type(4))); + c4 rv = __nvvm_ldg_c4(reinterpret_cast(ptr)); + char4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ short2 __ldg(const short2 *ptr) { + typedef short s2 __attribute__((ext_vector_type(2))); + s2 rv = __nvvm_ldg_s2(reinterpret_cast(ptr)); + short2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ short4 __ldg(const short4 *ptr) { + typedef short s4 __attribute__((ext_vector_type(4))); + s4 rv = __nvvm_ldg_s4(reinterpret_cast(ptr)); + short4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ int2 __ldg(const int2 *ptr) { + typedef int i2 __attribute__((ext_vector_type(2))); + i2 rv = __nvvm_ldg_i2(reinterpret_cast(ptr)); + int2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ int4 __ldg(const int4 *ptr) { + typedef int i4 __attribute__((ext_vector_type(4))); + i4 rv = __nvvm_ldg_i4(reinterpret_cast(ptr)); + int4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ longlong2 __ldg(const longlong2 *ptr) { + typedef long long ll2 __attribute__((ext_vector_type(2))); + ll2 rv = __nvvm_ldg_ll2(reinterpret_cast(ptr)); + longlong2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} + +inline __device__ uchar2 __ldg(const uchar2 *ptr) { + typedef unsigned char uc2 __attribute__((ext_vector_type(2))); + uc2 rv = __nvvm_ldg_uc2(reinterpret_cast(ptr)); + uchar2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ uchar4 __ldg(const uchar4 *ptr) { + typedef unsigned char uc4 __attribute__((ext_vector_type(4))); + uc4 rv = __nvvm_ldg_uc4(reinterpret_cast(ptr)); + uchar4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ ushort2 __ldg(const ushort2 *ptr) { + typedef unsigned short us2 __attribute__((ext_vector_type(2))); + us2 rv = __nvvm_ldg_us2(reinterpret_cast(ptr)); + ushort2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ ushort4 __ldg(const ushort4 *ptr) { + typedef unsigned short us4 __attribute__((ext_vector_type(4))); + us4 rv = __nvvm_ldg_us4(reinterpret_cast(ptr)); + ushort4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ uint2 __ldg(const uint2 *ptr) { + typedef unsigned int ui2 __attribute__((ext_vector_type(2))); + ui2 rv = __nvvm_ldg_ui2(reinterpret_cast(ptr)); + uint2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ uint4 __ldg(const uint4 *ptr) { + typedef unsigned int ui4 __attribute__((ext_vector_type(4))); + ui4 rv = __nvvm_ldg_ui4(reinterpret_cast(ptr)); + uint4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) { + typedef unsigned long long ull2 __attribute__((ext_vector_type(2))); + ull2 rv = __nvvm_ldg_ull2(reinterpret_cast(ptr)); + ulonglong2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} + +inline __device__ float2 __ldg(const float2 *ptr) { + typedef float f2 __attribute__((ext_vector_type(2))); + f2 rv = __nvvm_ldg_f2(reinterpret_cast(ptr)); + float2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ float4 __ldg(const float4 *ptr) { + typedef float f4 __attribute__((ext_vector_type(4))); + f4 rv = __nvvm_ldg_f4(reinterpret_cast(ptr)); + float4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ double2 __ldg(const double2 *ptr) { + typedef double d2 __attribute__((ext_vector_type(2))); + d2 rv = __nvvm_ldg_d2(reinterpret_cast(ptr)); + double2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} + +// TODO: Implement these as intrinsics, so the backend can work its magic on +// these. Alternatively, we could implement these as plain C and try to get +// llvm to recognize the relevant patterns. +inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned result; + asm("shf.l.wrap.b32 %0, %1, %2, %3;" + : "=r"(result) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return result; +} +inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned result; + asm("shf.l.clamp.b32 %0, %1, %2, %3;" + : "=r"(result) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return result; +} +inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned result; + asm("shf.r.wrap.b32 %0, %1, %2, %3;" + : "=r"(result) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return result; +} +inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned ret; + asm("shf.r.clamp.b32 %0, %1, %2, %3;" + : "=r"(ret) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return ret; +} + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +#endif // defined(__CLANG_CUDA_INTRINSICS_H__) diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_libdevice_declares.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_libdevice_declares.h new file mode 100644 index 0000000..6173b589 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_libdevice_declares.h @@ -0,0 +1,462 @@ +/*===-- __clang_cuda_libdevice_declares.h - decls for libdevice functions --=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_LIBDEVICE_DECLARES_H__ +#define __CLANG_CUDA_LIBDEVICE_DECLARES_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__OPENMP_NVPTX__) +#define __DEVICE__ +#elif defined(__CUDA__) +#define __DEVICE__ __device__ +#endif + +__DEVICE__ int __nv_abs(int __a); +__DEVICE__ double __nv_acos(double __a); +__DEVICE__ float __nv_acosf(float __a); +__DEVICE__ double __nv_acosh(double __a); +__DEVICE__ float __nv_acoshf(float __a); +__DEVICE__ double __nv_asin(double __a); +__DEVICE__ float __nv_asinf(float __a); +__DEVICE__ double __nv_asinh(double __a); +__DEVICE__ float __nv_asinhf(float __a); +__DEVICE__ double __nv_atan2(double __a, double __b); +__DEVICE__ float __nv_atan2f(float __a, float __b); +__DEVICE__ double __nv_atan(double __a); +__DEVICE__ float __nv_atanf(float __a); +__DEVICE__ double __nv_atanh(double __a); +__DEVICE__ float __nv_atanhf(float __a); +__DEVICE__ int __nv_brev(int __a); +__DEVICE__ long long __nv_brevll(long long __a); +__DEVICE__ int __nv_byte_perm(int __a, int __b, int __c); +__DEVICE__ double __nv_cbrt(double __a); +__DEVICE__ float __nv_cbrtf(float __a); +__DEVICE__ double __nv_ceil(double __a); +__DEVICE__ float __nv_ceilf(float __a); +__DEVICE__ int __nv_clz(int __a); +__DEVICE__ int __nv_clzll(long long __a); +__DEVICE__ double __nv_copysign(double __a, double __b); +__DEVICE__ float __nv_copysignf(float __a, float __b); +__DEVICE__ double __nv_cos(double __a); +__DEVICE__ float __nv_cosf(float __a); +__DEVICE__ double __nv_cosh(double __a); +__DEVICE__ float __nv_coshf(float __a); +__DEVICE__ double __nv_cospi(double __a); +__DEVICE__ float __nv_cospif(float __a); +__DEVICE__ double __nv_cyl_bessel_i0(double __a); +__DEVICE__ float __nv_cyl_bessel_i0f(float __a); +__DEVICE__ double __nv_cyl_bessel_i1(double __a); +__DEVICE__ float __nv_cyl_bessel_i1f(float __a); +__DEVICE__ double __nv_dadd_rd(double __a, double __b); +__DEVICE__ double __nv_dadd_rn(double __a, double __b); +__DEVICE__ double __nv_dadd_ru(double __a, double __b); +__DEVICE__ double __nv_dadd_rz(double __a, double __b); +__DEVICE__ double __nv_ddiv_rd(double __a, double __b); +__DEVICE__ double __nv_ddiv_rn(double __a, double __b); +__DEVICE__ double __nv_ddiv_ru(double __a, double __b); +__DEVICE__ double __nv_ddiv_rz(double __a, double __b); +__DEVICE__ double __nv_dmul_rd(double __a, double __b); +__DEVICE__ double __nv_dmul_rn(double __a, double __b); +__DEVICE__ double __nv_dmul_ru(double __a, double __b); +__DEVICE__ double __nv_dmul_rz(double __a, double __b); +__DEVICE__ float __nv_double2float_rd(double __a); +__DEVICE__ float __nv_double2float_rn(double __a); +__DEVICE__ float __nv_double2float_ru(double __a); +__DEVICE__ float __nv_double2float_rz(double __a); +__DEVICE__ int __nv_double2hiint(double __a); +__DEVICE__ int __nv_double2int_rd(double __a); +__DEVICE__ int __nv_double2int_rn(double __a); +__DEVICE__ int __nv_double2int_ru(double __a); +__DEVICE__ int __nv_double2int_rz(double __a); +__DEVICE__ long long __nv_double2ll_rd(double __a); +__DEVICE__ long long __nv_double2ll_rn(double __a); +__DEVICE__ long long __nv_double2ll_ru(double __a); +__DEVICE__ long long __nv_double2ll_rz(double __a); +__DEVICE__ int __nv_double2loint(double __a); +__DEVICE__ unsigned int __nv_double2uint_rd(double __a); +__DEVICE__ unsigned int __nv_double2uint_rn(double __a); +__DEVICE__ unsigned int __nv_double2uint_ru(double __a); +__DEVICE__ unsigned int __nv_double2uint_rz(double __a); +__DEVICE__ unsigned long long __nv_double2ull_rd(double __a); +__DEVICE__ unsigned long long __nv_double2ull_rn(double __a); +__DEVICE__ unsigned long long __nv_double2ull_ru(double __a); +__DEVICE__ unsigned long long __nv_double2ull_rz(double __a); +__DEVICE__ unsigned long long __nv_double_as_longlong(double __a); +__DEVICE__ double __nv_drcp_rd(double __a); +__DEVICE__ double __nv_drcp_rn(double __a); +__DEVICE__ double __nv_drcp_ru(double __a); +__DEVICE__ double __nv_drcp_rz(double __a); +__DEVICE__ double __nv_dsqrt_rd(double __a); +__DEVICE__ double __nv_dsqrt_rn(double __a); +__DEVICE__ double __nv_dsqrt_ru(double __a); +__DEVICE__ double __nv_dsqrt_rz(double __a); +__DEVICE__ double __nv_dsub_rd(double __a, double __b); +__DEVICE__ double __nv_dsub_rn(double __a, double __b); +__DEVICE__ double __nv_dsub_ru(double __a, double __b); +__DEVICE__ double __nv_dsub_rz(double __a, double __b); +__DEVICE__ double __nv_erfc(double __a); +__DEVICE__ float __nv_erfcf(float __a); +__DEVICE__ double __nv_erfcinv(double __a); +__DEVICE__ float __nv_erfcinvf(float __a); +__DEVICE__ double __nv_erfcx(double __a); +__DEVICE__ float __nv_erfcxf(float __a); +__DEVICE__ double __nv_erf(double __a); +__DEVICE__ float __nv_erff(float __a); +__DEVICE__ double __nv_erfinv(double __a); +__DEVICE__ float __nv_erfinvf(float __a); +__DEVICE__ double __nv_exp10(double __a); +__DEVICE__ float __nv_exp10f(float __a); +__DEVICE__ double __nv_exp2(double __a); +__DEVICE__ float __nv_exp2f(float __a); +__DEVICE__ double __nv_exp(double __a); +__DEVICE__ float __nv_expf(float __a); +__DEVICE__ double __nv_expm1(double __a); +__DEVICE__ float __nv_expm1f(float __a); +__DEVICE__ double __nv_fabs(double __a); +__DEVICE__ float __nv_fabsf(float __a); +__DEVICE__ float __nv_fadd_rd(float __a, float __b); +__DEVICE__ float __nv_fadd_rn(float __a, float __b); +__DEVICE__ float __nv_fadd_ru(float __a, float __b); +__DEVICE__ float __nv_fadd_rz(float __a, float __b); +__DEVICE__ float __nv_fast_cosf(float __a); +__DEVICE__ float __nv_fast_exp10f(float __a); +__DEVICE__ float __nv_fast_expf(float __a); +__DEVICE__ float __nv_fast_fdividef(float __a, float __b); +__DEVICE__ float __nv_fast_log10f(float __a); +__DEVICE__ float __nv_fast_log2f(float __a); +__DEVICE__ float __nv_fast_logf(float __a); +__DEVICE__ float __nv_fast_powf(float __a, float __b); +__DEVICE__ void __nv_fast_sincosf(float __a, float *__s, float *__c); +__DEVICE__ float __nv_fast_sinf(float __a); +__DEVICE__ float __nv_fast_tanf(float __a); +__DEVICE__ double __nv_fdim(double __a, double __b); +__DEVICE__ float __nv_fdimf(float __a, float __b); +__DEVICE__ float __nv_fdiv_rd(float __a, float __b); +__DEVICE__ float __nv_fdiv_rn(float __a, float __b); +__DEVICE__ float __nv_fdiv_ru(float __a, float __b); +__DEVICE__ float __nv_fdiv_rz(float __a, float __b); +__DEVICE__ int __nv_ffs(int __a); +__DEVICE__ int __nv_ffsll(long long __a); +__DEVICE__ int __nv_finitef(float __a); +__DEVICE__ unsigned short __nv_float2half_rn(float __a); +__DEVICE__ int __nv_float2int_rd(float __a); +__DEVICE__ int __nv_float2int_rn(float __a); +__DEVICE__ int __nv_float2int_ru(float __a); +__DEVICE__ int __nv_float2int_rz(float __a); +__DEVICE__ long long __nv_float2ll_rd(float __a); +__DEVICE__ long long __nv_float2ll_rn(float __a); +__DEVICE__ long long __nv_float2ll_ru(float __a); +__DEVICE__ long long __nv_float2ll_rz(float __a); +__DEVICE__ unsigned int __nv_float2uint_rd(float __a); +__DEVICE__ unsigned int __nv_float2uint_rn(float __a); +__DEVICE__ unsigned int __nv_float2uint_ru(float __a); +__DEVICE__ unsigned int __nv_float2uint_rz(float __a); +__DEVICE__ unsigned long long __nv_float2ull_rd(float __a); +__DEVICE__ unsigned long long __nv_float2ull_rn(float __a); +__DEVICE__ unsigned long long __nv_float2ull_ru(float __a); +__DEVICE__ unsigned long long __nv_float2ull_rz(float __a); +__DEVICE__ int __nv_float_as_int(float __a); +__DEVICE__ unsigned int __nv_float_as_uint(float __a); +__DEVICE__ double __nv_floor(double __a); +__DEVICE__ float __nv_floorf(float __a); +__DEVICE__ double __nv_fma(double __a, double __b, double __c); +__DEVICE__ float __nv_fmaf(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_ieee_rd(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_ieee_rn(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_ieee_ru(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_ieee_rz(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_rd(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_rn(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_ru(float __a, float __b, float __c); +__DEVICE__ float __nv_fmaf_rz(float __a, float __b, float __c); +__DEVICE__ double __nv_fma_rd(double __a, double __b, double __c); +__DEVICE__ double __nv_fma_rn(double __a, double __b, double __c); +__DEVICE__ double __nv_fma_ru(double __a, double __b, double __c); +__DEVICE__ double __nv_fma_rz(double __a, double __b, double __c); +__DEVICE__ double __nv_fmax(double __a, double __b); +__DEVICE__ float __nv_fmaxf(float __a, float __b); +__DEVICE__ double __nv_fmin(double __a, double __b); +__DEVICE__ float __nv_fminf(float __a, float __b); +__DEVICE__ double __nv_fmod(double __a, double __b); +__DEVICE__ float __nv_fmodf(float __a, float __b); +__DEVICE__ float __nv_fmul_rd(float __a, float __b); +__DEVICE__ float __nv_fmul_rn(float __a, float __b); +__DEVICE__ float __nv_fmul_ru(float __a, float __b); +__DEVICE__ float __nv_fmul_rz(float __a, float __b); +__DEVICE__ float __nv_frcp_rd(float __a); +__DEVICE__ float __nv_frcp_rn(float __a); +__DEVICE__ float __nv_frcp_ru(float __a); +__DEVICE__ float __nv_frcp_rz(float __a); +__DEVICE__ double __nv_frexp(double __a, int *__b); +__DEVICE__ float __nv_frexpf(float __a, int *__b); +__DEVICE__ float __nv_frsqrt_rn(float __a); +__DEVICE__ float __nv_fsqrt_rd(float __a); +__DEVICE__ float __nv_fsqrt_rn(float __a); +__DEVICE__ float __nv_fsqrt_ru(float __a); +__DEVICE__ float __nv_fsqrt_rz(float __a); +__DEVICE__ float __nv_fsub_rd(float __a, float __b); +__DEVICE__ float __nv_fsub_rn(float __a, float __b); +__DEVICE__ float __nv_fsub_ru(float __a, float __b); +__DEVICE__ float __nv_fsub_rz(float __a, float __b); +__DEVICE__ int __nv_hadd(int __a, int __b); +__DEVICE__ float __nv_half2float(unsigned short __h); +__DEVICE__ double __nv_hiloint2double(int __a, int __b); +__DEVICE__ double __nv_hypot(double __a, double __b); +__DEVICE__ float __nv_hypotf(float __a, float __b); +__DEVICE__ int __nv_ilogb(double __a); +__DEVICE__ int __nv_ilogbf(float __a); +__DEVICE__ double __nv_int2double_rn(int __a); +__DEVICE__ float __nv_int2float_rd(int __a); +__DEVICE__ float __nv_int2float_rn(int __a); +__DEVICE__ float __nv_int2float_ru(int __a); +__DEVICE__ float __nv_int2float_rz(int __a); +__DEVICE__ float __nv_int_as_float(int __a); +__DEVICE__ int __nv_isfinited(double __a); +__DEVICE__ int __nv_isinfd(double __a); +__DEVICE__ int __nv_isinff(float __a); +__DEVICE__ int __nv_isnand(double __a); +__DEVICE__ int __nv_isnanf(float __a); +__DEVICE__ double __nv_j0(double __a); +__DEVICE__ float __nv_j0f(float __a); +__DEVICE__ double __nv_j1(double __a); +__DEVICE__ float __nv_j1f(float __a); +__DEVICE__ float __nv_jnf(int __a, float __b); +__DEVICE__ double __nv_jn(int __a, double __b); +__DEVICE__ double __nv_ldexp(double __a, int __b); +__DEVICE__ float __nv_ldexpf(float __a, int __b); +__DEVICE__ double __nv_lgamma(double __a); +__DEVICE__ float __nv_lgammaf(float __a); +__DEVICE__ double __nv_ll2double_rd(long long __a); +__DEVICE__ double __nv_ll2double_rn(long long __a); +__DEVICE__ double __nv_ll2double_ru(long long __a); +__DEVICE__ double __nv_ll2double_rz(long long __a); +__DEVICE__ float __nv_ll2float_rd(long long __a); +__DEVICE__ float __nv_ll2float_rn(long long __a); +__DEVICE__ float __nv_ll2float_ru(long long __a); +__DEVICE__ float __nv_ll2float_rz(long long __a); +__DEVICE__ long long __nv_llabs(long long __a); +__DEVICE__ long long __nv_llmax(long long __a, long long __b); +__DEVICE__ long long __nv_llmin(long long __a, long long __b); +__DEVICE__ long long __nv_llrint(double __a); +__DEVICE__ long long __nv_llrintf(float __a); +__DEVICE__ long long __nv_llround(double __a); +__DEVICE__ long long __nv_llroundf(float __a); +__DEVICE__ double __nv_log10(double __a); +__DEVICE__ float __nv_log10f(float __a); +__DEVICE__ double __nv_log1p(double __a); +__DEVICE__ float __nv_log1pf(float __a); +__DEVICE__ double __nv_log2(double __a); +__DEVICE__ float __nv_log2f(float __a); +__DEVICE__ double __nv_logb(double __a); +__DEVICE__ float __nv_logbf(float __a); +__DEVICE__ double __nv_log(double __a); +__DEVICE__ float __nv_logf(float __a); +__DEVICE__ double __nv_longlong_as_double(long long __a); +__DEVICE__ int __nv_max(int __a, int __b); +__DEVICE__ int __nv_min(int __a, int __b); +__DEVICE__ double __nv_modf(double __a, double *__b); +__DEVICE__ float __nv_modff(float __a, float *__b); +__DEVICE__ int __nv_mul24(int __a, int __b); +__DEVICE__ long long __nv_mul64hi(long long __a, long long __b); +__DEVICE__ int __nv_mulhi(int __a, int __b); +__DEVICE__ double __nv_nan(const signed char *__a); +__DEVICE__ float __nv_nanf(const signed char *__a); +__DEVICE__ double __nv_nearbyint(double __a); +__DEVICE__ float __nv_nearbyintf(float __a); +__DEVICE__ double __nv_nextafter(double __a, double __b); +__DEVICE__ float __nv_nextafterf(float __a, float __b); +__DEVICE__ double __nv_norm3d(double __a, double __b, double __c); +__DEVICE__ float __nv_norm3df(float __a, float __b, float __c); +__DEVICE__ double __nv_norm4d(double __a, double __b, double __c, double __d); +__DEVICE__ float __nv_norm4df(float __a, float __b, float __c, float __d); +__DEVICE__ double __nv_normcdf(double __a); +__DEVICE__ float __nv_normcdff(float __a); +__DEVICE__ double __nv_normcdfinv(double __a); +__DEVICE__ float __nv_normcdfinvf(float __a); +__DEVICE__ float __nv_normf(int __a, const float *__b); +__DEVICE__ double __nv_norm(int __a, const double *__b); +__DEVICE__ int __nv_popc(int __a); +__DEVICE__ int __nv_popcll(long long __a); +__DEVICE__ double __nv_pow(double __a, double __b); +__DEVICE__ float __nv_powf(float __a, float __b); +__DEVICE__ double __nv_powi(double __a, int __b); +__DEVICE__ float __nv_powif(float __a, int __b); +__DEVICE__ double __nv_rcbrt(double __a); +__DEVICE__ float __nv_rcbrtf(float __a); +__DEVICE__ double __nv_rcp64h(double __a); +__DEVICE__ double __nv_remainder(double __a, double __b); +__DEVICE__ float __nv_remainderf(float __a, float __b); +__DEVICE__ double __nv_remquo(double __a, double __b, int *__c); +__DEVICE__ float __nv_remquof(float __a, float __b, int *__c); +__DEVICE__ int __nv_rhadd(int __a, int __b); +__DEVICE__ double __nv_rhypot(double __a, double __b); +__DEVICE__ float __nv_rhypotf(float __a, float __b); +__DEVICE__ double __nv_rint(double __a); +__DEVICE__ float __nv_rintf(float __a); +__DEVICE__ double __nv_rnorm3d(double __a, double __b, double __c); +__DEVICE__ float __nv_rnorm3df(float __a, float __b, float __c); +__DEVICE__ double __nv_rnorm4d(double __a, double __b, double __c, double __d); +__DEVICE__ float __nv_rnorm4df(float __a, float __b, float __c, float __d); +__DEVICE__ float __nv_rnormf(int __a, const float *__b); +__DEVICE__ double __nv_rnorm(int __a, const double *__b); +__DEVICE__ double __nv_round(double __a); +__DEVICE__ float __nv_roundf(float __a); +__DEVICE__ double __nv_rsqrt(double __a); +__DEVICE__ float __nv_rsqrtf(float __a); +__DEVICE__ int __nv_sad(int __a, int __b, int __c); +__DEVICE__ float __nv_saturatef(float __a); +__DEVICE__ double __nv_scalbn(double __a, int __b); +__DEVICE__ float __nv_scalbnf(float __a, int __b); +__DEVICE__ int __nv_signbitd(double __a); +__DEVICE__ int __nv_signbitf(float __a); +__DEVICE__ void __nv_sincos(double __a, double *__b, double *__c); +__DEVICE__ void __nv_sincosf(float __a, float *__b, float *__c); +__DEVICE__ void __nv_sincospi(double __a, double *__b, double *__c); +__DEVICE__ void __nv_sincospif(float __a, float *__b, float *__c); +__DEVICE__ double __nv_sin(double __a); +__DEVICE__ float __nv_sinf(float __a); +__DEVICE__ double __nv_sinh(double __a); +__DEVICE__ float __nv_sinhf(float __a); +__DEVICE__ double __nv_sinpi(double __a); +__DEVICE__ float __nv_sinpif(float __a); +__DEVICE__ double __nv_sqrt(double __a); +__DEVICE__ float __nv_sqrtf(float __a); +__DEVICE__ double __nv_tan(double __a); +__DEVICE__ float __nv_tanf(float __a); +__DEVICE__ double __nv_tanh(double __a); +__DEVICE__ float __nv_tanhf(float __a); +__DEVICE__ double __nv_tgamma(double __a); +__DEVICE__ float __nv_tgammaf(float __a); +__DEVICE__ double __nv_trunc(double __a); +__DEVICE__ float __nv_truncf(float __a); +__DEVICE__ int __nv_uhadd(unsigned int __a, unsigned int __b); +__DEVICE__ double __nv_uint2double_rn(unsigned int __i); +__DEVICE__ float __nv_uint2float_rd(unsigned int __a); +__DEVICE__ float __nv_uint2float_rn(unsigned int __a); +__DEVICE__ float __nv_uint2float_ru(unsigned int __a); +__DEVICE__ float __nv_uint2float_rz(unsigned int __a); +__DEVICE__ float __nv_uint_as_float(unsigned int __a); +__DEVICE__ double __nv_ull2double_rd(unsigned long long __a); +__DEVICE__ double __nv_ull2double_rn(unsigned long long __a); +__DEVICE__ double __nv_ull2double_ru(unsigned long long __a); +__DEVICE__ double __nv_ull2double_rz(unsigned long long __a); +__DEVICE__ float __nv_ull2float_rd(unsigned long long __a); +__DEVICE__ float __nv_ull2float_rn(unsigned long long __a); +__DEVICE__ float __nv_ull2float_ru(unsigned long long __a); +__DEVICE__ float __nv_ull2float_rz(unsigned long long __a); +__DEVICE__ unsigned long long __nv_ullmax(unsigned long long __a, + unsigned long long __b); +__DEVICE__ unsigned long long __nv_ullmin(unsigned long long __a, + unsigned long long __b); +__DEVICE__ unsigned int __nv_umax(unsigned int __a, unsigned int __b); +__DEVICE__ unsigned int __nv_umin(unsigned int __a, unsigned int __b); +__DEVICE__ unsigned int __nv_umul24(unsigned int __a, unsigned int __b); +__DEVICE__ unsigned long long __nv_umul64hi(unsigned long long __a, + unsigned long long __b); +__DEVICE__ unsigned int __nv_umulhi(unsigned int __a, unsigned int __b); +__DEVICE__ unsigned int __nv_urhadd(unsigned int __a, unsigned int __b); +__DEVICE__ unsigned int __nv_usad(unsigned int __a, unsigned int __b, + unsigned int __c); +#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020 +__DEVICE__ int __nv_vabs2(int __a); +__DEVICE__ int __nv_vabs4(int __a); +__DEVICE__ int __nv_vabsdiffs2(int __a, int __b); +__DEVICE__ int __nv_vabsdiffs4(int __a, int __b); +__DEVICE__ int __nv_vabsdiffu2(int __a, int __b); +__DEVICE__ int __nv_vabsdiffu4(int __a, int __b); +__DEVICE__ int __nv_vabsss2(int __a); +__DEVICE__ int __nv_vabsss4(int __a); +__DEVICE__ int __nv_vadd2(int __a, int __b); +__DEVICE__ int __nv_vadd4(int __a, int __b); +__DEVICE__ int __nv_vaddss2(int __a, int __b); +__DEVICE__ int __nv_vaddss4(int __a, int __b); +__DEVICE__ int __nv_vaddus2(int __a, int __b); +__DEVICE__ int __nv_vaddus4(int __a, int __b); +__DEVICE__ int __nv_vavgs2(int __a, int __b); +__DEVICE__ int __nv_vavgs4(int __a, int __b); +__DEVICE__ int __nv_vavgu2(int __a, int __b); +__DEVICE__ int __nv_vavgu4(int __a, int __b); +__DEVICE__ int __nv_vcmpeq2(int __a, int __b); +__DEVICE__ int __nv_vcmpeq4(int __a, int __b); +__DEVICE__ int __nv_vcmpges2(int __a, int __b); +__DEVICE__ int __nv_vcmpges4(int __a, int __b); +__DEVICE__ int __nv_vcmpgeu2(int __a, int __b); +__DEVICE__ int __nv_vcmpgeu4(int __a, int __b); +__DEVICE__ int __nv_vcmpgts2(int __a, int __b); +__DEVICE__ int __nv_vcmpgts4(int __a, int __b); +__DEVICE__ int __nv_vcmpgtu2(int __a, int __b); +__DEVICE__ int __nv_vcmpgtu4(int __a, int __b); +__DEVICE__ int __nv_vcmples2(int __a, int __b); +__DEVICE__ int __nv_vcmples4(int __a, int __b); +__DEVICE__ int __nv_vcmpleu2(int __a, int __b); +__DEVICE__ int __nv_vcmpleu4(int __a, int __b); +__DEVICE__ int __nv_vcmplts2(int __a, int __b); +__DEVICE__ int __nv_vcmplts4(int __a, int __b); +__DEVICE__ int __nv_vcmpltu2(int __a, int __b); +__DEVICE__ int __nv_vcmpltu4(int __a, int __b); +__DEVICE__ int __nv_vcmpne2(int __a, int __b); +__DEVICE__ int __nv_vcmpne4(int __a, int __b); +__DEVICE__ int __nv_vhaddu2(int __a, int __b); +__DEVICE__ int __nv_vhaddu4(int __a, int __b); +__DEVICE__ int __nv_vmaxs2(int __a, int __b); +__DEVICE__ int __nv_vmaxs4(int __a, int __b); +__DEVICE__ int __nv_vmaxu2(int __a, int __b); +__DEVICE__ int __nv_vmaxu4(int __a, int __b); +__DEVICE__ int __nv_vmins2(int __a, int __b); +__DEVICE__ int __nv_vmins4(int __a, int __b); +__DEVICE__ int __nv_vminu2(int __a, int __b); +__DEVICE__ int __nv_vminu4(int __a, int __b); +__DEVICE__ int __nv_vneg2(int __a); +__DEVICE__ int __nv_vneg4(int __a); +__DEVICE__ int __nv_vnegss2(int __a); +__DEVICE__ int __nv_vnegss4(int __a); +__DEVICE__ int __nv_vsads2(int __a, int __b); +__DEVICE__ int __nv_vsads4(int __a, int __b); +__DEVICE__ int __nv_vsadu2(int __a, int __b); +__DEVICE__ int __nv_vsadu4(int __a, int __b); +__DEVICE__ int __nv_vseteq2(int __a, int __b); +__DEVICE__ int __nv_vseteq4(int __a, int __b); +__DEVICE__ int __nv_vsetges2(int __a, int __b); +__DEVICE__ int __nv_vsetges4(int __a, int __b); +__DEVICE__ int __nv_vsetgeu2(int __a, int __b); +__DEVICE__ int __nv_vsetgeu4(int __a, int __b); +__DEVICE__ int __nv_vsetgts2(int __a, int __b); +__DEVICE__ int __nv_vsetgts4(int __a, int __b); +__DEVICE__ int __nv_vsetgtu2(int __a, int __b); +__DEVICE__ int __nv_vsetgtu4(int __a, int __b); +__DEVICE__ int __nv_vsetles2(int __a, int __b); +__DEVICE__ int __nv_vsetles4(int __a, int __b); +__DEVICE__ int __nv_vsetleu2(int __a, int __b); +__DEVICE__ int __nv_vsetleu4(int __a, int __b); +__DEVICE__ int __nv_vsetlts2(int __a, int __b); +__DEVICE__ int __nv_vsetlts4(int __a, int __b); +__DEVICE__ int __nv_vsetltu2(int __a, int __b); +__DEVICE__ int __nv_vsetltu4(int __a, int __b); +__DEVICE__ int __nv_vsetne2(int __a, int __b); +__DEVICE__ int __nv_vsetne4(int __a, int __b); +__DEVICE__ int __nv_vsub2(int __a, int __b); +__DEVICE__ int __nv_vsub4(int __a, int __b); +__DEVICE__ int __nv_vsubss2(int __a, int __b); +__DEVICE__ int __nv_vsubss4(int __a, int __b); +__DEVICE__ int __nv_vsubus2(int __a, int __b); +__DEVICE__ int __nv_vsubus4(int __a, int __b); +#endif // CUDA_VERSION +__DEVICE__ double __nv_y0(double __a); +__DEVICE__ float __nv_y0f(float __a); +__DEVICE__ double __nv_y1(double __a); +__DEVICE__ float __nv_y1f(float __a); +__DEVICE__ float __nv_ynf(int __a, float __b); +__DEVICE__ double __nv_yn(int __a, double __b); +#if defined(__cplusplus) +} // extern "C" +#endif +#endif // __CLANG_CUDA_LIBDEVICE_DECLARES_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_math.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_math.h new file mode 100644 index 0000000..538556f --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_math.h @@ -0,0 +1,348 @@ +/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_CUDA_MATH_H__ +#define __CLANG_CUDA_MATH_H__ +#ifndef __CUDA__ +#error "This file is for CUDA compilation only." +#endif + +#ifndef __OPENMP_NVPTX__ +#if CUDA_VERSION < 9000 +#error This file is intended to be used with CUDA-9+ only. +#endif +#endif + +// __DEVICE__ is a helper macro with common set of attributes for the wrappers +// we implement in this file. We need static in order to avoid emitting unused +// functions and __forceinline__ helps inlining these wrappers at -O1. +#pragma push_macro("__DEVICE__") +#ifdef __OPENMP_NVPTX__ +#if defined(__cplusplus) +#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) +#else +#define __DEVICE__ static __attribute__((always_inline, nothrow)) +#endif +#else +#define __DEVICE__ static __device__ __forceinline__ +#endif + +// Specialized version of __DEVICE__ for functions with void return type. Needed +// because the OpenMP overlay requires constexpr functions here but prior to +// c++14 void return functions could not be constexpr. +#pragma push_macro("__DEVICE_VOID__") +#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L +#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow)) +#else +#define __DEVICE_VOID__ __DEVICE__ +#endif + +// libdevice provides fast low precision and slow full-recision implementations +// for some functions. Which one gets selected depends on +// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if +// -ffast-math or -fcuda-approx-transcendentals are in effect. +#pragma push_macro("__FAST_OR_SLOW") +#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__) +#define __FAST_OR_SLOW(fast, slow) fast +#else +#define __FAST_OR_SLOW(fast, slow) slow +#endif + +__DEVICE__ int abs(int __a) { return __nv_abs(__a); } +__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); } +__DEVICE__ double acos(double __a) { return __nv_acos(__a); } +__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); } +__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); } +__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); } +__DEVICE__ double asin(double __a) { return __nv_asin(__a); } +__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); } +__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); } +__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); } +__DEVICE__ double atan(double __a) { return __nv_atan(__a); } +__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); } +__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); } +__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); } +__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); } +__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); } +__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); } +__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); } +__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); } +__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); } +__DEVICE__ double copysign(double __a, double __b) { + return __nv_copysign(__a, __b); +} +__DEVICE__ float copysignf(float __a, float __b) { + return __nv_copysignf(__a, __b); +} +__DEVICE__ double cos(double __a) { return __nv_cos(__a); } +__DEVICE__ float cosf(float __a) { + return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a); +} +__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); } +__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); } +__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); } +__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); } +__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); } +__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); } +__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); } +__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); } +__DEVICE__ double erf(double __a) { return __nv_erf(__a); } +__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); } +__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); } +__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); } +__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); } +__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); } +__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); } +__DEVICE__ float erff(float __a) { return __nv_erff(__a); } +__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); } +__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); } +__DEVICE__ double exp(double __a) { return __nv_exp(__a); } +__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); } +__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); } +__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); } +__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); } +__DEVICE__ float expf(float __a) { return __nv_expf(__a); } +__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); } +__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); } +__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); } +__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); } +__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); } +__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; } +__DEVICE__ float fdividef(float __a, float __b) { +#if __FAST_MATH__ && !__CUDA_PREC_DIV + return __nv_fast_fdividef(__a, __b); +#else + return __a / __b; +#endif +} +__DEVICE__ double floor(double __f) { return __nv_floor(__f); } +__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); } +__DEVICE__ double fma(double __a, double __b, double __c) { + return __nv_fma(__a, __b, __c); +} +__DEVICE__ float fmaf(float __a, float __b, float __c) { + return __nv_fmaf(__a, __b, __c); +} +__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); } +__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); } +__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); } +__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); } +__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); } +__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); } +__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); } +__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); } +__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); } +__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); } +__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); } +__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); } +__DEVICE__ double j0(double __a) { return __nv_j0(__a); } +__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); } +__DEVICE__ double j1(double __a) { return __nv_j1(__a); } +__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); } +__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); } +__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); } +#if defined(__LP64__) || defined(_WIN64) +__DEVICE__ long labs(long __a) { return __nv_llabs(__a); }; +#else +__DEVICE__ long labs(long __a) { return __nv_abs(__a); }; +#endif +__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); } +__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); } +__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); } +__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); } +__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); } +__DEVICE__ long long llmax(long long __a, long long __b) { + return __nv_llmax(__a, __b); +} +__DEVICE__ long long llmin(long long __a, long long __b) { + return __nv_llmin(__a, __b); +} +__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); } +__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); } +__DEVICE__ long long llround(double __a) { return __nv_llround(__a); } +__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); } +__DEVICE__ double round(double __a) { return __nv_round(__a); } +__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); } +__DEVICE__ double log(double __a) { return __nv_log(__a); } +__DEVICE__ double log10(double __a) { return __nv_log10(__a); } +__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); } +__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); } +__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); } +__DEVICE__ double log2(double __a) { return __nv_log2(__a); } +__DEVICE__ float log2f(float __a) { + return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a); +} +__DEVICE__ double logb(double __a) { return __nv_logb(__a); } +__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); } +__DEVICE__ float logf(float __a) { + return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a); +} +#if defined(__LP64__) || defined(_WIN64) +__DEVICE__ long lrint(double __a) { return llrint(__a); } +__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); } +__DEVICE__ long lround(double __a) { return llround(__a); } +__DEVICE__ long lroundf(float __a) { return llroundf(__a); } +#else +__DEVICE__ long lrint(double __a) { return (long)rint(__a); } +__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); } +__DEVICE__ long lround(double __a) { return round(__a); } +__DEVICE__ long lroundf(float __a) { return roundf(__a); } +#endif +__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); } +__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); } +__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); } +__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); } +__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); } +__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); } +__DEVICE__ double nextafter(double __a, double __b) { + return __nv_nextafter(__a, __b); +} +__DEVICE__ float nextafterf(float __a, float __b) { + return __nv_nextafterf(__a, __b); +} +__DEVICE__ double norm(int __dim, const double *__t) { + return __nv_norm(__dim, __t); +} +__DEVICE__ double norm3d(double __a, double __b, double __c) { + return __nv_norm3d(__a, __b, __c); +} +__DEVICE__ float norm3df(float __a, float __b, float __c) { + return __nv_norm3df(__a, __b, __c); +} +__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) { + return __nv_norm4d(__a, __b, __c, __d); +} +__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) { + return __nv_norm4df(__a, __b, __c, __d); +} +__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); } +__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); } +__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); } +__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); } +__DEVICE__ float normf(int __dim, const float *__t) { + return __nv_normf(__dim, __t); +} +__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); } +__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); } +__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); } +__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); } +__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); } +__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); } +__DEVICE__ double remainder(double __a, double __b) { + return __nv_remainder(__a, __b); +} +__DEVICE__ float remainderf(float __a, float __b) { + return __nv_remainderf(__a, __b); +} +__DEVICE__ double remquo(double __a, double __b, int *__c) { + return __nv_remquo(__a, __b, __c); +} +__DEVICE__ float remquof(float __a, float __b, int *__c) { + return __nv_remquof(__a, __b, __c); +} +__DEVICE__ double rhypot(double __a, double __b) { + return __nv_rhypot(__a, __b); +} +__DEVICE__ float rhypotf(float __a, float __b) { + return __nv_rhypotf(__a, __b); +} +// __nv_rint* in libdevice is buggy and produces incorrect results. +__DEVICE__ double rint(double __a) { return __builtin_rint(__a); } +__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); } +__DEVICE__ double rnorm(int __a, const double *__b) { + return __nv_rnorm(__a, __b); +} +__DEVICE__ double rnorm3d(double __a, double __b, double __c) { + return __nv_rnorm3d(__a, __b, __c); +} +__DEVICE__ float rnorm3df(float __a, float __b, float __c) { + return __nv_rnorm3df(__a, __b, __c); +} +__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) { + return __nv_rnorm4d(__a, __b, __c, __d); +} +__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) { + return __nv_rnorm4df(__a, __b, __c, __d); +} +__DEVICE__ float rnormf(int __dim, const float *__t) { + return __nv_rnormf(__dim, __t); +} +__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); } +__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); } +__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); } +__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); } +__DEVICE__ double scalbln(double __a, long __b) { + if (__b > INT_MAX) + return __a > 0 ? HUGE_VAL : -HUGE_VAL; + if (__b < INT_MIN) + return __a > 0 ? 0.0 : -0.0; + return scalbn(__a, (int)__b); +} +__DEVICE__ float scalblnf(float __a, long __b) { + if (__b > INT_MAX) + return __a > 0 ? HUGE_VALF : -HUGE_VALF; + if (__b < INT_MIN) + return __a > 0 ? 0.f : -0.f; + return scalbnf(__a, (int)__b); +} +__DEVICE__ double sin(double __a) { return __nv_sin(__a); } +__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) { + return __nv_sincos(__a, __s, __c); +} +__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) { + return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c); +} +__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) { + return __nv_sincospi(__a, __s, __c); +} +__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) { + return __nv_sincospif(__a, __s, __c); +} +__DEVICE__ float sinf(float __a) { + return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a); +} +__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); } +__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); } +__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); } +__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); } +__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); } +__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); } +__DEVICE__ double tan(double __a) { return __nv_tan(__a); } +__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); } +__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); } +__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); } +__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); } +__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); } +__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); } +__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); } +__DEVICE__ unsigned long long ullmax(unsigned long long __a, + unsigned long long __b) { + return __nv_ullmax(__a, __b); +} +__DEVICE__ unsigned long long ullmin(unsigned long long __a, + unsigned long long __b) { + return __nv_ullmin(__a, __b); +} +__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) { + return __nv_umax(__a, __b); +} +__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) { + return __nv_umin(__a, __b); +} +__DEVICE__ double y0(double __a) { return __nv_y0(__a); } +__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); } +__DEVICE__ double y1(double __a) { return __nv_y1(__a); } +__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); } +__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); } +__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); } + +#pragma pop_macro("__DEVICE__") +#pragma pop_macro("__DEVICE_VOID__") +#pragma pop_macro("__FAST_OR_SLOW") + +#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_math_forward_declares.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_math_forward_declares.h new file mode 100644 index 0000000..c0f1f47 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_math_forward_declares.h @@ -0,0 +1,284 @@ +/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__ +#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__ +#if !defined(__CUDA__) && !__HIP__ +#error "This file is for CUDA/HIP compilation only." +#endif + +// This file forward-declares of some math functions we (or the CUDA headers) +// will define later. We need to do this, and do it before cmath is included, +// because the standard library may have constexpr math functions. In the +// absence of a prior __device__ decl, those constexpr functions may become +// implicitly host+device. host+device functions can't be overloaded, so that +// would preclude the use of our own __device__ overloads for these functions. + +#pragma push_macro("__DEVICE__") +#define __DEVICE__ \ + static __inline__ __attribute__((always_inline)) __attribute__((device)) + +__DEVICE__ long abs(long); +__DEVICE__ long long abs(long long); +__DEVICE__ double abs(double); +__DEVICE__ float abs(float); +__DEVICE__ int abs(int); +__DEVICE__ double acos(double); +__DEVICE__ float acos(float); +__DEVICE__ double acosh(double); +__DEVICE__ float acosh(float); +__DEVICE__ double asin(double); +__DEVICE__ float asin(float); +__DEVICE__ double asinh(double); +__DEVICE__ float asinh(float); +__DEVICE__ double atan2(double, double); +__DEVICE__ float atan2(float, float); +__DEVICE__ double atan(double); +__DEVICE__ float atan(float); +__DEVICE__ double atanh(double); +__DEVICE__ float atanh(float); +__DEVICE__ double cbrt(double); +__DEVICE__ float cbrt(float); +__DEVICE__ double ceil(double); +__DEVICE__ float ceil(float); +__DEVICE__ double copysign(double, double); +__DEVICE__ float copysign(float, float); +__DEVICE__ double cos(double); +__DEVICE__ float cos(float); +__DEVICE__ double cosh(double); +__DEVICE__ float cosh(float); +__DEVICE__ double erfc(double); +__DEVICE__ float erfc(float); +__DEVICE__ double erf(double); +__DEVICE__ float erf(float); +__DEVICE__ double exp2(double); +__DEVICE__ float exp2(float); +__DEVICE__ double exp(double); +__DEVICE__ float exp(float); +__DEVICE__ double expm1(double); +__DEVICE__ float expm1(float); +__DEVICE__ double fabs(double); +__DEVICE__ float fabs(float); +__DEVICE__ double fdim(double, double); +__DEVICE__ float fdim(float, float); +__DEVICE__ double floor(double); +__DEVICE__ float floor(float); +__DEVICE__ double fma(double, double, double); +__DEVICE__ float fma(float, float, float); +__DEVICE__ double fmax(double, double); +__DEVICE__ float fmax(float, float); +__DEVICE__ double fmin(double, double); +__DEVICE__ float fmin(float, float); +__DEVICE__ double fmod(double, double); +__DEVICE__ float fmod(float, float); +__DEVICE__ int fpclassify(double); +__DEVICE__ int fpclassify(float); +__DEVICE__ double frexp(double, int *); +__DEVICE__ float frexp(float, int *); +__DEVICE__ double hypot(double, double); +__DEVICE__ float hypot(float, float); +__DEVICE__ int ilogb(double); +__DEVICE__ int ilogb(float); +#ifdef _MSC_VER +__DEVICE__ bool isfinite(long double); +#endif +__DEVICE__ bool isfinite(double); +__DEVICE__ bool isfinite(float); +__DEVICE__ bool isgreater(double, double); +__DEVICE__ bool isgreaterequal(double, double); +__DEVICE__ bool isgreaterequal(float, float); +__DEVICE__ bool isgreater(float, float); +#ifdef _MSC_VER +__DEVICE__ bool isinf(long double); +#endif +__DEVICE__ bool isinf(double); +__DEVICE__ bool isinf(float); +__DEVICE__ bool isless(double, double); +__DEVICE__ bool islessequal(double, double); +__DEVICE__ bool islessequal(float, float); +__DEVICE__ bool isless(float, float); +__DEVICE__ bool islessgreater(double, double); +__DEVICE__ bool islessgreater(float, float); +#ifdef _MSC_VER +__DEVICE__ bool isnan(long double); +#endif +__DEVICE__ bool isnan(double); +__DEVICE__ bool isnan(float); +__DEVICE__ bool isnormal(double); +__DEVICE__ bool isnormal(float); +__DEVICE__ bool isunordered(double, double); +__DEVICE__ bool isunordered(float, float); +__DEVICE__ long labs(long); +__DEVICE__ double ldexp(double, int); +__DEVICE__ float ldexp(float, int); +__DEVICE__ double lgamma(double); +__DEVICE__ float lgamma(float); +__DEVICE__ long long llabs(long long); +__DEVICE__ long long llrint(double); +__DEVICE__ long long llrint(float); +__DEVICE__ double log10(double); +__DEVICE__ float log10(float); +__DEVICE__ double log1p(double); +__DEVICE__ float log1p(float); +__DEVICE__ double log2(double); +__DEVICE__ float log2(float); +__DEVICE__ double logb(double); +__DEVICE__ float logb(float); +__DEVICE__ double log(double); +__DEVICE__ float log(float); +__DEVICE__ long lrint(double); +__DEVICE__ long lrint(float); +__DEVICE__ long lround(double); +__DEVICE__ long lround(float); +__DEVICE__ long long llround(float); // No llround(double). +__DEVICE__ double modf(double, double *); +__DEVICE__ float modf(float, float *); +__DEVICE__ double nan(const char *); +__DEVICE__ float nanf(const char *); +__DEVICE__ double nearbyint(double); +__DEVICE__ float nearbyint(float); +__DEVICE__ double nextafter(double, double); +__DEVICE__ float nextafter(float, float); +__DEVICE__ double pow(double, double); +__DEVICE__ double pow(double, int); +__DEVICE__ float pow(float, float); +__DEVICE__ float pow(float, int); +__DEVICE__ double remainder(double, double); +__DEVICE__ float remainder(float, float); +__DEVICE__ double remquo(double, double, int *); +__DEVICE__ float remquo(float, float, int *); +__DEVICE__ double rint(double); +__DEVICE__ float rint(float); +__DEVICE__ double round(double); +__DEVICE__ float round(float); +__DEVICE__ double scalbln(double, long); +__DEVICE__ float scalbln(float, long); +__DEVICE__ double scalbn(double, int); +__DEVICE__ float scalbn(float, int); +#ifdef _MSC_VER +__DEVICE__ bool signbit(long double); +#endif +__DEVICE__ bool signbit(double); +__DEVICE__ bool signbit(float); +__DEVICE__ double sin(double); +__DEVICE__ float sin(float); +__DEVICE__ double sinh(double); +__DEVICE__ float sinh(float); +__DEVICE__ double sqrt(double); +__DEVICE__ float sqrt(float); +__DEVICE__ double tan(double); +__DEVICE__ float tan(float); +__DEVICE__ double tanh(double); +__DEVICE__ float tanh(float); +__DEVICE__ double tgamma(double); +__DEVICE__ float tgamma(float); +__DEVICE__ double trunc(double); +__DEVICE__ float trunc(float); + +// Notably missing above is nexttoward, which we don't define on +// the device side because libdevice doesn't give us an implementation, and we +// don't want to be in the business of writing one ourselves. + +// We need to define these overloads in exactly the namespace our standard +// library uses (including the right inline namespace), otherwise they won't be +// picked up by other functions in the standard library (e.g. functions in +// ). Thus the ugliness below. +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif +#endif + +using ::abs; +using ::acos; +using ::acosh; +using ::asin; +using ::asinh; +using ::atan; +using ::atan2; +using ::atanh; +using ::cbrt; +using ::ceil; +using ::copysign; +using ::cos; +using ::cosh; +using ::erf; +using ::erfc; +using ::exp; +using ::exp2; +using ::expm1; +using ::fabs; +using ::fdim; +using ::floor; +using ::fma; +using ::fmax; +using ::fmin; +using ::fmod; +using ::fpclassify; +using ::frexp; +using ::hypot; +using ::ilogb; +using ::isfinite; +using ::isgreater; +using ::isgreaterequal; +using ::isinf; +using ::isless; +using ::islessequal; +using ::islessgreater; +using ::isnan; +using ::isnormal; +using ::isunordered; +using ::labs; +using ::ldexp; +using ::lgamma; +using ::llabs; +using ::llrint; +using ::log; +using ::log10; +using ::log1p; +using ::log2; +using ::logb; +using ::lrint; +using ::lround; +using ::llround; +using ::modf; +using ::nan; +using ::nanf; +using ::nearbyint; +using ::nextafter; +using ::pow; +using ::remainder; +using ::remquo; +using ::rint; +using ::round; +using ::scalbln; +using ::scalbn; +using ::signbit; +using ::sin; +using ::sinh; +using ::sqrt; +using ::tan; +using ::tanh; +using ::tgamma; +using ::trunc; + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif +} // namespace std +#endif + +#pragma pop_macro("__DEVICE__") + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_runtime_wrapper.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_runtime_wrapper.h new file mode 100644 index 0000000..f401964 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_cuda_runtime_wrapper.h @@ -0,0 +1,448 @@ +/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* + * WARNING: This header is intended to be directly -include'd by + * the compiler and is not supposed to be included by users. + * + * CUDA headers are implemented in a way that currently makes it + * impossible for user code to #include directly when compiling with + * Clang. They present different view of CUDA-supplied functions + * depending on where in NVCC's compilation pipeline the headers are + * included. Neither of these modes provides function definitions with + * correct attributes, so we use preprocessor to force the headers + * into a form that Clang can use. + * + * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's + * this file during every CUDA compilation. + */ + +#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__ +#define __CLANG_CUDA_RUNTIME_WRAPPER_H__ + +#if defined(__CUDA__) && defined(__clang__) + +// Include some forward declares that must come before cmath. +#include <__clang_cuda_math_forward_declares.h> + +// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions +// enabled depend on it to avoid using __float128, which is unsupported in +// CUDA. +#define __CUDACC__ + +// Include some standard headers to avoid CUDA headers including them +// while some required macros (like __THROW) are in a weird state. +#include +#include +#include +#undef __CUDACC__ + +// Preserve common macros that will be changed below by us or by CUDA +// headers. +#pragma push_macro("__THROW") +#pragma push_macro("__CUDA_ARCH__") + +// WARNING: Preprocessor hacks below are based on specific details of +// CUDA-7.x headers and are not expected to work with any other +// version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION < 7000 +#error "Unsupported CUDA version!" +#endif + +#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__") +#if CUDA_VERSION >= 10000 +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#endif + +// Make largest subset of device functions available during host +// compilation -- SM_35 for the time being. +#ifndef __CUDA_ARCH__ +#define __CUDA_ARCH__ 350 +#endif + +#include "__clang_cuda_builtin_vars.h" + +// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above +// has taken care of builtin variables declared in the file. +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +// {math,device}_functions.h only have declarations of the +// functions. We don't need them as we're going to pull in their +// definitions from .hpp files. +#define __DEVICE_FUNCTIONS_H__ +#define __MATH_FUNCTIONS_H__ +#define __COMMON_FUNCTIONS_H__ +// device_functions_decls is replaced by __clang_cuda_device_functions.h +// included below. +#define __DEVICE_FUNCTIONS_DECLS_H__ + +#undef __CUDACC__ +#if CUDA_VERSION < 9000 +#define __CUDABE__ +#else +#define __CUDACC__ +#define __CUDA_LIBDEVICE__ +#endif +// Disables definitions of device-side runtime support stubs in +// cuda_device_runtime_api.h +#include "host_defines.h" +#undef __CUDACC__ +#include "driver_types.h" +#include "host_config.h" + +// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in +// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the +// functional equivalent of what we need. +#pragma push_macro("nv_weak") +#define nv_weak weak +#undef __CUDABE__ +#undef __CUDA_LIBDEVICE__ +#define __CUDACC__ +#include "cuda_runtime.h" + +#pragma pop_macro("nv_weak") +#undef __CUDACC__ +#define __CUDABE__ + +// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does +// not have at the moment. Emulate them with a builtin memcpy/memset. +#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n) +#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n) + +#if CUDA_VERSION < 9000 +#include "crt/device_runtime.h" +#endif +#include "crt/host_runtime.h" +// device_runtime.h defines __cxa_* macros that will conflict with +// cxxabi.h. +// FIXME: redefine these as __device__ functions. +#undef __cxa_vec_ctor +#undef __cxa_vec_cctor +#undef __cxa_vec_dtor +#undef __cxa_vec_new +#undef __cxa_vec_new2 +#undef __cxa_vec_new3 +#undef __cxa_vec_delete2 +#undef __cxa_vec_delete +#undef __cxa_vec_delete3 +#undef __cxa_pure_virtual + +// math_functions.hpp expects this host function be defined on MacOS, but it +// ends up not being there because of the games we play here. Just define it +// ourselves; it's simple enough. +#ifdef __APPLE__ +inline __host__ double __signbitd(double x) { + return std::signbit(x); +} +#endif + +// CUDA 9.1 no longer provides declarations for libdevice functions, so we need +// to provide our own. +#include <__clang_cuda_libdevice_declares.h> + +// Wrappers for many device-side standard library functions, incl. math +// functions, became compiler builtins in CUDA-9 and have been removed from the +// CUDA headers. Clang now provides its own implementation of the wrappers. +#if CUDA_VERSION >= 9000 +#include <__clang_cuda_device_functions.h> +#include <__clang_cuda_math.h> +#endif + +// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's +// counterpart does not do it, so we need to make it empty here to keep +// following CUDA includes happy. +#undef __THROW +#define __THROW + +// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values. +// Previous versions used to check whether they are defined or not. +// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it +// here to detect the switch. + +#if defined(CU_DEVICE_INVALID) +#if !defined(__USE_FAST_MATH__) +#define __USE_FAST_MATH__ 0 +#endif + +#if !defined(__CUDA_PREC_DIV) +#define __CUDA_PREC_DIV 0 +#endif +#endif + +// Temporarily poison __host__ macro to ensure it's not used by any of +// the headers we're about to include. +#pragma push_macro("__host__") +#define __host__ UNEXPECTED_HOST_ATTRIBUTE + +// device_functions.hpp and math_functions*.hpp use 'static +// __forceinline__' (with no __device__) for definitions of device +// functions. Temporarily redefine __forceinline__ to include +// __device__. +#pragma push_macro("__forceinline__") +#define __forceinline__ __device__ __inline__ __attribute__((always_inline)) +#if CUDA_VERSION < 9000 +#include "device_functions.hpp" +#endif + +// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we +// get the slow-but-accurate or fast-but-inaccurate versions of functions like +// sin and exp. This is controlled in clang by -fcuda-approx-transcendentals. +// +// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs. +// slow divides), so we need to scope our define carefully here. +#pragma push_macro("__USE_FAST_MATH__") +#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__) +#define __USE_FAST_MATH__ 1 +#endif + +#if CUDA_VERSION >= 9000 +// CUDA-9.2 needs host-side memcpy for some host functions in +// device_functions.hpp +#if CUDA_VERSION >= 9020 +#include +#endif +#include "crt/math_functions.hpp" +#else +#include "math_functions.hpp" +#endif + +#pragma pop_macro("__USE_FAST_MATH__") + +#if CUDA_VERSION < 9000 +#include "math_functions_dbl_ptx3.hpp" +#endif +#pragma pop_macro("__forceinline__") + +// Pull in host-only functions that are only available when neither +// __CUDACC__ nor __CUDABE__ are defined. +#undef __MATH_FUNCTIONS_HPP__ +#undef __CUDABE__ +#if CUDA_VERSION < 9000 +#include "math_functions.hpp" +#endif +// Alas, additional overloads for these functions are hard to get to. +// Considering that we only need these overloads for a few functions, +// we can provide them here. +static inline float rsqrt(float __a) { return rsqrtf(__a); } +static inline float rcbrt(float __a) { return rcbrtf(__a); } +static inline float sinpi(float __a) { return sinpif(__a); } +static inline float cospi(float __a) { return cospif(__a); } +static inline void sincospi(float __a, float *__b, float *__c) { + return sincospif(__a, __b, __c); +} +static inline float erfcinv(float __a) { return erfcinvf(__a); } +static inline float normcdfinv(float __a) { return normcdfinvf(__a); } +static inline float normcdf(float __a) { return normcdff(__a); } +static inline float erfcx(float __a) { return erfcxf(__a); } + +#if CUDA_VERSION < 9000 +// For some reason single-argument variant is not always declared by +// CUDA headers. Alas, device_functions.hpp included below needs it. +static inline __device__ void __brkpt(int __c) { __brkpt(); } +#endif + +// Now include *.hpp with definitions of various GPU functions. Alas, +// a lot of thins get declared/defined with __host__ attribute which +// we don't want and we have to define it out. We also have to include +// {device,math}_functions.hpp again in order to extract the other +// branch of #if/else inside. +#define __host__ +#undef __CUDABE__ +#define __CUDACC__ +#if CUDA_VERSION >= 9000 +// Some atomic functions became compiler builtins in CUDA-9 , so we need their +// declarations. +#include "device_atomic_functions.h" +#endif +#undef __DEVICE_FUNCTIONS_HPP__ +#include "device_atomic_functions.hpp" +#if CUDA_VERSION >= 9000 +#include "crt/device_functions.hpp" +#include "crt/device_double_functions.hpp" +#else +#include "device_functions.hpp" +#define __CUDABE__ +#include "device_double_functions.h" +#undef __CUDABE__ +#endif +#include "sm_20_atomic_functions.hpp" +#include "sm_20_intrinsics.hpp" +#include "sm_32_atomic_functions.hpp" + +// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h. These define the +// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to +// define them using builtins so that the optimizer can reason about and across +// these instructions. In particular, using intrinsics for ldg gets us the +// [addr+imm] addressing mode, which, although it doesn't actually exist in the +// hardware, seems to generate faster machine code because ptxas can more easily +// reason about our code. + +#if CUDA_VERSION >= 8000 +#pragma push_macro("__CUDA_ARCH__") +#undef __CUDA_ARCH__ +#include "sm_60_atomic_functions.hpp" +#include "sm_61_intrinsics.hpp" +#pragma pop_macro("__CUDA_ARCH__") +#endif + +#undef __MATH_FUNCTIONS_HPP__ + +// math_functions.hpp defines ::signbit as a __host__ __device__ function. This +// conflicts with libstdc++'s constexpr ::signbit, so we have to rename +// math_function.hpp's ::signbit. It's guarded by #undef signbit, but that's +// conditional on __GNUC__. :) +#pragma push_macro("signbit") +#pragma push_macro("__GNUC__") +#undef __GNUC__ +#define signbit __ignored_cuda_signbit + +// CUDA-9 omits device-side definitions of some math functions if it sees +// include guard from math.h wrapper from libstdc++. We have to undo the header +// guard temporarily to get the definitions we need. +#pragma push_macro("_GLIBCXX_MATH_H") +#pragma push_macro("_LIBCPP_VERSION") +#if CUDA_VERSION >= 9000 +#undef _GLIBCXX_MATH_H +// We also need to undo another guard that checks for libc++ 3.8+ +#ifdef _LIBCPP_VERSION +#define _LIBCPP_VERSION 3700 +#endif +#endif + +#if CUDA_VERSION >= 9000 +#include "crt/math_functions.hpp" +#else +#include "math_functions.hpp" +#endif +#pragma pop_macro("_GLIBCXX_MATH_H") +#pragma pop_macro("_LIBCPP_VERSION") +#pragma pop_macro("__GNUC__") +#pragma pop_macro("signbit") + +#pragma pop_macro("__host__") + +#include "texture_indirect_functions.h" + +// Restore state of __CUDA_ARCH__ and __THROW we had on entry. +#pragma pop_macro("__CUDA_ARCH__") +#pragma pop_macro("__THROW") + +// Set up compiler macros expected to be seen during compilation. +#undef __CUDABE__ +#define __CUDACC__ + +extern "C" { +// Device-side CUDA system calls. +// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls +// We need these declarations and wrappers for device-side +// malloc/free/printf calls to work without relying on +// -fcuda-disable-target-call-checks option. +__device__ int vprintf(const char *, const char *); +__device__ void free(void *) __attribute((nothrow)); +__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc)); + +// __assertfail() used to have a `noreturn` attribute. Unfortunately that +// contributed to triggering the longstanding bug in ptxas when assert was used +// in sufficiently convoluted code. See +// https://bugs.llvm.org/show_bug.cgi?id=27738 for the details. +__device__ void __assertfail(const char *__message, const char *__file, + unsigned __line, const char *__function, + size_t __charSize); + +// In order for standard assert() macro on linux to work we need to +// provide device-side __assert_fail() +__device__ static inline void __assert_fail(const char *__message, + const char *__file, unsigned __line, + const char *__function) { + __assertfail(__message, __file, __line, __function, sizeof(char)); +} + +// Clang will convert printf into vprintf, but we still need +// device-side declaration for it. +__device__ int printf(const char *, ...); +} // extern "C" + +// We also need device-side std::malloc and std::free. +namespace std { +__device__ static inline void free(void *__ptr) { ::free(__ptr); } +__device__ static inline void *malloc(size_t __size) { + return ::malloc(__size); +} +} // namespace std + +// Out-of-line implementations from __clang_cuda_builtin_vars.h. These need to +// come after we've pulled in the definition of uint3 and dim3. + +__device__ inline __cuda_builtin_threadIdx_t::operator dim3() const { + return dim3(x, y, z); +} + +__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const { + return {x, y, z}; +} + +__device__ inline __cuda_builtin_blockIdx_t::operator dim3() const { + return dim3(x, y, z); +} + +__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const { + return {x, y, z}; +} + +__device__ inline __cuda_builtin_blockDim_t::operator dim3() const { + return dim3(x, y, z); +} + +__device__ inline __cuda_builtin_blockDim_t::operator uint3() const { + return {x, y, z}; +} + +__device__ inline __cuda_builtin_gridDim_t::operator dim3() const { + return dim3(x, y, z); +} + +__device__ inline __cuda_builtin_gridDim_t::operator uint3() const { + return {x, y, z}; +} + +#include <__clang_cuda_cmath.h> +#include <__clang_cuda_intrinsics.h> +#include <__clang_cuda_complex_builtins.h> + +// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host +// mode, giving them their "proper" types of dim3 and uint3. This is +// incompatible with the types we give in __clang_cuda_builtin_vars.h. As as +// hack, force-include the header (nvcc doesn't include it by default) but +// redefine dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are +// only used here for the redeclarations of blockDim and threadIdx.) +#pragma push_macro("dim3") +#pragma push_macro("uint3") +#define dim3 __cuda_builtin_blockDim_t +#define uint3 __cuda_builtin_threadIdx_t +#include "curand_mtgp32_kernel.h" +#pragma pop_macro("dim3") +#pragma pop_macro("uint3") +#pragma pop_macro("__USE_FAST_MATH__") +#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__") + +// CUDA runtime uses this undocumented function to access kernel launch +// configuration. The declaration is in crt/device_functions.h but that file +// includes a lot of other stuff we don't want. Instead, we'll provide our own +// declaration for it here. +#if CUDA_VERSION >= 9020 +extern "C" unsigned __cudaPushCallConfiguration(dim3 gridDim, dim3 blockDim, + size_t sharedMem = 0, + void *stream = 0); +#endif + +#endif // __CUDA__ +#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_cmath.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_cmath.h new file mode 100644 index 0000000..d488db0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_cmath.h @@ -0,0 +1,842 @@ +/*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_HIP_CMATH_H__ +#define __CLANG_HIP_CMATH_H__ + +#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__) +#error "This file is for HIP and OpenMP AMDGCN device compilation only." +#endif + +#if !defined(__HIPCC_RTC__) +#if defined(__cplusplus) +#include +#include +#include +#endif +#include +#include +#endif // !defined(__HIPCC_RTC__) + +#pragma push_macro("__DEVICE__") +#pragma push_macro("__CONSTEXPR__") +#ifdef __OPENMP_AMDGCN__ +#define __DEVICE__ static __attribute__((always_inline, nothrow)) +#define __CONSTEXPR__ constexpr +#else +#define __DEVICE__ static __device__ inline __attribute__((always_inline)) +#define __CONSTEXPR__ +#endif // __OPENMP_AMDGCN__ + +// Start with functions that cannot be defined by DEF macros below. +#if defined(__cplusplus) +#if defined __OPENMP_AMDGCN__ +__DEVICE__ __CONSTEXPR__ float fabs(float __x) { return ::fabsf(__x); } +__DEVICE__ __CONSTEXPR__ float sin(float __x) { return ::sinf(__x); } +__DEVICE__ __CONSTEXPR__ float cos(float __x) { return ::cosf(__x); } +#endif +__DEVICE__ __CONSTEXPR__ double abs(double __x) { return ::fabs(__x); } +__DEVICE__ __CONSTEXPR__ float abs(float __x) { return ::fabsf(__x); } +__DEVICE__ __CONSTEXPR__ long long abs(long long __n) { return ::llabs(__n); } +__DEVICE__ __CONSTEXPR__ long abs(long __n) { return ::labs(__n); } +__DEVICE__ __CONSTEXPR__ float fma(float __x, float __y, float __z) { + return ::fmaf(__x, __y, __z); +} +#if !defined(__HIPCC_RTC__) +// The value returned by fpclassify is platform dependent, therefore it is not +// supported by hipRTC. +__DEVICE__ __CONSTEXPR__ int fpclassify(float __x) { + return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, + FP_ZERO, __x); +} +__DEVICE__ __CONSTEXPR__ int fpclassify(double __x) { + return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, + FP_ZERO, __x); +} +#endif // !defined(__HIPCC_RTC__) + +__DEVICE__ __CONSTEXPR__ float frexp(float __arg, int *__exp) { + return ::frexpf(__arg, __exp); +} + +#if defined(__OPENMP_AMDGCN__) +// For OpenMP we work around some old system headers that have non-conforming +// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do +// this by providing two versions of these functions, differing only in the +// return type. To avoid conflicting definitions we disable implicit base +// function generation. That means we will end up with two specializations, one +// per type, but only one has a base function defined by the system header. +#pragma omp begin declare variant match( \ + implementation = {extension(disable_implicit_base)}) + +// FIXME: We lack an extension to customize the mangling of the variants, e.g., +// add a suffix. This means we would clash with the names of the variants +// (note that we do not create implicit base functions here). To avoid +// this clash we add a new trait to some of them that is always true +// (this is LLVM after all ;)). It will only influence the mangled name +// of the variants inside the inner region and avoid the clash. +#pragma omp begin declare variant match(implementation = {vendor(llvm)}) + +__DEVICE__ __CONSTEXPR__ int isinf(float __x) { return ::__isinff(__x); } +__DEVICE__ __CONSTEXPR__ int isinf(double __x) { return ::__isinf(__x); } +__DEVICE__ __CONSTEXPR__ int isfinite(float __x) { return ::__finitef(__x); } +__DEVICE__ __CONSTEXPR__ int isfinite(double __x) { return ::__finite(__x); } +__DEVICE__ __CONSTEXPR__ int isnan(float __x) { return ::__isnanf(__x); } +__DEVICE__ __CONSTEXPR__ int isnan(double __x) { return ::__isnan(__x); } + +#pragma omp end declare variant +#endif // defined(__OPENMP_AMDGCN__) + +__DEVICE__ __CONSTEXPR__ bool isinf(float __x) { return ::__isinff(__x); } +__DEVICE__ __CONSTEXPR__ bool isinf(double __x) { return ::__isinf(__x); } +__DEVICE__ __CONSTEXPR__ bool isfinite(float __x) { return ::__finitef(__x); } +__DEVICE__ __CONSTEXPR__ bool isfinite(double __x) { return ::__finite(__x); } +__DEVICE__ __CONSTEXPR__ bool isnan(float __x) { return ::__isnanf(__x); } +__DEVICE__ __CONSTEXPR__ bool isnan(double __x) { return ::__isnan(__x); } + +#if defined(__OPENMP_AMDGCN__) +#pragma omp end declare variant +#endif // defined(__OPENMP_AMDGCN__) + +__DEVICE__ __CONSTEXPR__ bool isgreater(float __x, float __y) { + return __builtin_isgreater(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isgreater(double __x, double __y) { + return __builtin_isgreater(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isgreaterequal(float __x, float __y) { + return __builtin_isgreaterequal(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isgreaterequal(double __x, double __y) { + return __builtin_isgreaterequal(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isless(float __x, float __y) { + return __builtin_isless(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isless(double __x, double __y) { + return __builtin_isless(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool islessequal(float __x, float __y) { + return __builtin_islessequal(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool islessequal(double __x, double __y) { + return __builtin_islessequal(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool islessgreater(float __x, float __y) { + return __builtin_islessgreater(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool islessgreater(double __x, double __y) { + return __builtin_islessgreater(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isnormal(float __x) { + return __builtin_isnormal(__x); +} +__DEVICE__ __CONSTEXPR__ bool isnormal(double __x) { + return __builtin_isnormal(__x); +} +__DEVICE__ __CONSTEXPR__ bool isunordered(float __x, float __y) { + return __builtin_isunordered(__x, __y); +} +__DEVICE__ __CONSTEXPR__ bool isunordered(double __x, double __y) { + return __builtin_isunordered(__x, __y); +} +__DEVICE__ __CONSTEXPR__ float modf(float __x, float *__iptr) { + return ::modff(__x, __iptr); +} +__DEVICE__ __CONSTEXPR__ float pow(float __base, int __iexp) { + return ::powif(__base, __iexp); +} +__DEVICE__ __CONSTEXPR__ double pow(double __base, int __iexp) { + return ::powi(__base, __iexp); +} +__DEVICE__ __CONSTEXPR__ float remquo(float __x, float __y, int *__quo) { + return ::remquof(__x, __y, __quo); +} +__DEVICE__ __CONSTEXPR__ float scalbln(float __x, long int __n) { + return ::scalblnf(__x, __n); +} +__DEVICE__ __CONSTEXPR__ bool signbit(float __x) { return ::__signbitf(__x); } +__DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); } + +// Notably missing above is nexttoward. We omit it because +// ocml doesn't provide an implementation, and we don't want to be in the +// business of implementing tricky libm functions in this header. + +// Other functions. +__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y, + _Float16 __z) { + return __ocml_fma_f16(__x, __y, __z); +} +__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) { + return __ocml_pown_f16(__base, __iexp); +} + +#ifndef __OPENMP_AMDGCN__ +// BEGIN DEF_FUN and HIP_OVERLOAD + +// BEGIN DEF_FUN + +#pragma push_macro("__DEF_FUN1") +#pragma push_macro("__DEF_FUN2") +#pragma push_macro("__DEF_FUN2_FI") + +// Define cmath functions with float argument and returns __retty. +#define __DEF_FUN1(__retty, __func) \ + __DEVICE__ __CONSTEXPR__ __retty __func(float __x) { return __func##f(__x); } + +// Define cmath functions with two float arguments and returns __retty. +#define __DEF_FUN2(__retty, __func) \ + __DEVICE__ __CONSTEXPR__ __retty __func(float __x, float __y) { \ + return __func##f(__x, __y); \ + } + +// Define cmath functions with a float and an int argument and returns __retty. +#define __DEF_FUN2_FI(__retty, __func) \ + __DEVICE__ __CONSTEXPR__ __retty __func(float __x, int __y) { \ + return __func##f(__x, __y); \ + } + +__DEF_FUN1(float, acos) +__DEF_FUN1(float, acosh) +__DEF_FUN1(float, asin) +__DEF_FUN1(float, asinh) +__DEF_FUN1(float, atan) +__DEF_FUN2(float, atan2) +__DEF_FUN1(float, atanh) +__DEF_FUN1(float, cbrt) +__DEF_FUN1(float, ceil) +__DEF_FUN2(float, copysign) +__DEF_FUN1(float, cos) +__DEF_FUN1(float, cosh) +__DEF_FUN1(float, erf) +__DEF_FUN1(float, erfc) +__DEF_FUN1(float, exp) +__DEF_FUN1(float, exp2) +__DEF_FUN1(float, expm1) +__DEF_FUN1(float, fabs) +__DEF_FUN2(float, fdim) +__DEF_FUN1(float, floor) +__DEF_FUN2(float, fmax) +__DEF_FUN2(float, fmin) +__DEF_FUN2(float, fmod) +__DEF_FUN2(float, hypot) +__DEF_FUN1(int, ilogb) +__DEF_FUN2_FI(float, ldexp) +__DEF_FUN1(float, lgamma) +__DEF_FUN1(float, log) +__DEF_FUN1(float, log10) +__DEF_FUN1(float, log1p) +__DEF_FUN1(float, log2) +__DEF_FUN1(float, logb) +__DEF_FUN1(long long, llrint) +__DEF_FUN1(long long, llround) +__DEF_FUN1(long, lrint) +__DEF_FUN1(long, lround) +__DEF_FUN1(float, nearbyint) +__DEF_FUN2(float, nextafter) +__DEF_FUN2(float, pow) +__DEF_FUN2(float, remainder) +__DEF_FUN1(float, rint) +__DEF_FUN1(float, round) +__DEF_FUN2_FI(float, scalbn) +__DEF_FUN1(float, sin) +__DEF_FUN1(float, sinh) +__DEF_FUN1(float, sqrt) +__DEF_FUN1(float, tan) +__DEF_FUN1(float, tanh) +__DEF_FUN1(float, tgamma) +__DEF_FUN1(float, trunc) + +#pragma pop_macro("__DEF_FUN1") +#pragma pop_macro("__DEF_FUN2") +#pragma pop_macro("__DEF_FUN2_FI") + +// END DEF_FUN + +// BEGIN HIP_OVERLOAD + +#pragma push_macro("__HIP_OVERLOAD1") +#pragma push_macro("__HIP_OVERLOAD2") + +// __hip_enable_if::type is a type function which returns __T if __B is true. +template struct __hip_enable_if {}; + +template struct __hip_enable_if { typedef __T type; }; + +namespace __hip { +template struct is_integral { + enum { value = 0 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; +template <> struct is_integral { + enum { value = 1 }; +}; + +// ToDo: specializes is_arithmetic<_Float16> +template struct is_arithmetic { + enum { value = 0 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; +template <> struct is_arithmetic { + enum { value = 1 }; +}; + +struct true_type { + static const __constant__ bool value = true; +}; +struct false_type { + static const __constant__ bool value = false; +}; + +template struct is_same : public false_type {}; +template struct is_same<__T, __T> : public true_type {}; + +template struct add_rvalue_reference { typedef __T &&type; }; + +template typename add_rvalue_reference<__T>::type declval(); + +// decltype is only available in C++11 and above. +#if __cplusplus >= 201103L +// __hip_promote +template struct __numeric_type { + static void __test(...); + static _Float16 __test(_Float16); + static float __test(float); + static double __test(char); + static double __test(int); + static double __test(unsigned); + static double __test(long); + static double __test(unsigned long); + static double __test(long long); + static double __test(unsigned long long); + static double __test(double); + // No support for long double, use double instead. + static double __test(long double); + + typedef decltype(__test(declval<_Tp>())) type; + static const bool value = !is_same::value; +}; + +template <> struct __numeric_type { static const bool value = true; }; + +template ::value &&__numeric_type<_A2>::value + &&__numeric_type<_A3>::value> +class __promote_imp { +public: + static const bool value = false; +}; + +template +class __promote_imp<_A1, _A2, _A3, true> { +private: + typedef typename __promote_imp<_A1>::type __type1; + typedef typename __promote_imp<_A2>::type __type2; + typedef typename __promote_imp<_A3>::type __type3; + +public: + typedef decltype(__type1() + __type2() + __type3()) type; + static const bool value = true; +}; + +template class __promote_imp<_A1, _A2, void, true> { +private: + typedef typename __promote_imp<_A1>::type __type1; + typedef typename __promote_imp<_A2>::type __type2; + +public: + typedef decltype(__type1() + __type2()) type; + static const bool value = true; +}; + +template class __promote_imp<_A1, void, void, true> { +public: + typedef typename __numeric_type<_A1>::type type; + static const bool value = true; +}; + +template +class __promote : public __promote_imp<_A1, _A2, _A3> {}; +#endif //__cplusplus >= 201103L +} // namespace __hip + +// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to +// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with +// floor(double). +#define __HIP_OVERLOAD1(__retty, __fn) \ + template \ + __DEVICE__ __CONSTEXPR__ \ + typename __hip_enable_if<__hip::is_integral<__T>::value, __retty>::type \ + __fn(__T __x) { \ + return ::__fn((double)__x); \ + } + +// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double +// or integer argument to avoid compilation error due to ambibuity. e.g. +// max(5.0f, 6.0) is resolved with max(double, double). +#if __cplusplus >= 201103L +#define __HIP_OVERLOAD2(__retty, __fn) \ + template \ + __DEVICE__ __CONSTEXPR__ typename __hip_enable_if< \ + __hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value, \ + typename __hip::__promote<__T1, __T2>::type>::type \ + __fn(__T1 __x, __T2 __y) { \ + typedef typename __hip::__promote<__T1, __T2>::type __result_type; \ + return __fn((__result_type)__x, (__result_type)__y); \ + } +#else +#define __HIP_OVERLOAD2(__retty, __fn) \ + template \ + __DEVICE__ __CONSTEXPR__ \ + typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && \ + __hip::is_arithmetic<__T2>::value, \ + __retty>::type \ + __fn(__T1 __x, __T2 __y) { \ + return __fn((double)__x, (double)__y); \ + } +#endif + +__HIP_OVERLOAD1(double, acos) +__HIP_OVERLOAD1(double, acosh) +__HIP_OVERLOAD1(double, asin) +__HIP_OVERLOAD1(double, asinh) +__HIP_OVERLOAD1(double, atan) +__HIP_OVERLOAD2(double, atan2) +__HIP_OVERLOAD1(double, atanh) +__HIP_OVERLOAD1(double, cbrt) +__HIP_OVERLOAD1(double, ceil) +__HIP_OVERLOAD2(double, copysign) +__HIP_OVERLOAD1(double, cos) +__HIP_OVERLOAD1(double, cosh) +__HIP_OVERLOAD1(double, erf) +__HIP_OVERLOAD1(double, erfc) +__HIP_OVERLOAD1(double, exp) +__HIP_OVERLOAD1(double, exp2) +__HIP_OVERLOAD1(double, expm1) +__HIP_OVERLOAD1(double, fabs) +__HIP_OVERLOAD2(double, fdim) +__HIP_OVERLOAD1(double, floor) +__HIP_OVERLOAD2(double, fmax) +__HIP_OVERLOAD2(double, fmin) +__HIP_OVERLOAD2(double, fmod) +#if !defined(__HIPCC_RTC__) +__HIP_OVERLOAD1(int, fpclassify) +#endif // !defined(__HIPCC_RTC__) +__HIP_OVERLOAD2(double, hypot) +__HIP_OVERLOAD1(int, ilogb) +__HIP_OVERLOAD1(bool, isfinite) +__HIP_OVERLOAD2(bool, isgreater) +__HIP_OVERLOAD2(bool, isgreaterequal) +__HIP_OVERLOAD1(bool, isinf) +__HIP_OVERLOAD2(bool, isless) +__HIP_OVERLOAD2(bool, islessequal) +__HIP_OVERLOAD2(bool, islessgreater) +__HIP_OVERLOAD1(bool, isnan) +__HIP_OVERLOAD1(bool, isnormal) +__HIP_OVERLOAD2(bool, isunordered) +__HIP_OVERLOAD1(double, lgamma) +__HIP_OVERLOAD1(double, log) +__HIP_OVERLOAD1(double, log10) +__HIP_OVERLOAD1(double, log1p) +__HIP_OVERLOAD1(double, log2) +__HIP_OVERLOAD1(double, logb) +__HIP_OVERLOAD1(long long, llrint) +__HIP_OVERLOAD1(long long, llround) +__HIP_OVERLOAD1(long, lrint) +__HIP_OVERLOAD1(long, lround) +__HIP_OVERLOAD1(double, nearbyint) +__HIP_OVERLOAD2(double, nextafter) +__HIP_OVERLOAD2(double, pow) +__HIP_OVERLOAD2(double, remainder) +__HIP_OVERLOAD1(double, rint) +__HIP_OVERLOAD1(double, round) +__HIP_OVERLOAD1(bool, signbit) +__HIP_OVERLOAD1(double, sin) +__HIP_OVERLOAD1(double, sinh) +__HIP_OVERLOAD1(double, sqrt) +__HIP_OVERLOAD1(double, tan) +__HIP_OVERLOAD1(double, tanh) +__HIP_OVERLOAD1(double, tgamma) +__HIP_OVERLOAD1(double, trunc) + +// Overload these but don't add them to std, they are not part of cmath. +__HIP_OVERLOAD2(double, max) +__HIP_OVERLOAD2(double, min) + +// Additional Overloads that don't quite match HIP_OVERLOAD. +#if __cplusplus >= 201103L +template +__DEVICE__ __CONSTEXPR__ typename __hip_enable_if< + __hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value && + __hip::is_arithmetic<__T3>::value, + typename __hip::__promote<__T1, __T2, __T3>::type>::type +fma(__T1 __x, __T2 __y, __T3 __z) { + typedef typename __hip::__promote<__T1, __T2, __T3>::type __result_type; + return ::fma((__result_type)__x, (__result_type)__y, (__result_type)__z); +} +#else +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && + __hip::is_arithmetic<__T2>::value && + __hip::is_arithmetic<__T3>::value, + double>::type + fma(__T1 __x, __T2 __y, __T3 __z) { + return ::fma((double)__x, (double)__y, (double)__z); +} +#endif + +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type + frexp(__T __x, int *__exp) { + return ::frexp((double)__x, __exp); +} + +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type + ldexp(__T __x, int __exp) { + return ::ldexp((double)__x, __exp); +} + +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type + modf(__T __x, double *__exp) { + return ::modf((double)__x, __exp); +} + +#if __cplusplus >= 201103L +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && + __hip::is_arithmetic<__T2>::value, + typename __hip::__promote<__T1, __T2>::type>::type + remquo(__T1 __x, __T2 __y, int *__quo) { + typedef typename __hip::__promote<__T1, __T2>::type __result_type; + return ::remquo((__result_type)__x, (__result_type)__y, __quo); +} +#else +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && + __hip::is_arithmetic<__T2>::value, + double>::type + remquo(__T1 __x, __T2 __y, int *__quo) { + return ::remquo((double)__x, (double)__y, __quo); +} +#endif + +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type + scalbln(__T __x, long int __exp) { + return ::scalbln((double)__x, __exp); +} + +template +__DEVICE__ __CONSTEXPR__ + typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type + scalbn(__T __x, int __exp) { + return ::scalbn((double)__x, __exp); +} + +#pragma pop_macro("__HIP_OVERLOAD1") +#pragma pop_macro("__HIP_OVERLOAD2") + +// END HIP_OVERLOAD + +// END DEF_FUN and HIP_OVERLOAD + +#endif // ifndef __OPENMP_AMDGCN__ +#endif // defined(__cplusplus) + +#ifndef __OPENMP_AMDGCN__ +// Define these overloads inside the namespace our standard library uses. +#if !defined(__HIPCC_RTC__) +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif // _LIBCPP_BEGIN_NAMESPACE_STD + +// Pull the new overloads we defined above into namespace std. +// using ::abs; - This may be considered for C++. +using ::acos; +using ::acosh; +using ::asin; +using ::asinh; +using ::atan; +using ::atan2; +using ::atanh; +using ::cbrt; +using ::ceil; +using ::copysign; +using ::cos; +using ::cosh; +using ::erf; +using ::erfc; +using ::exp; +using ::exp2; +using ::expm1; +using ::fabs; +using ::fdim; +using ::floor; +using ::fma; +using ::fmax; +using ::fmin; +using ::fmod; +using ::fpclassify; +using ::frexp; +using ::hypot; +using ::ilogb; +using ::isfinite; +using ::isgreater; +using ::isgreaterequal; +using ::isless; +using ::islessequal; +using ::islessgreater; +using ::isnormal; +using ::isunordered; +using ::ldexp; +using ::lgamma; +using ::llrint; +using ::llround; +using ::log; +using ::log10; +using ::log1p; +using ::log2; +using ::logb; +using ::lrint; +using ::lround; +using ::modf; +// using ::nan; - This may be considered for C++. +// using ::nanf; - This may be considered for C++. +// using ::nanl; - This is not yet defined. +using ::nearbyint; +using ::nextafter; +// using ::nexttoward; - Omit this since we do not have a definition. +using ::pow; +using ::remainder; +using ::remquo; +using ::rint; +using ::round; +using ::scalbln; +using ::scalbn; +using ::signbit; +using ::sin; +using ::sinh; +using ::sqrt; +using ::tan; +using ::tanh; +using ::tgamma; +using ::trunc; + +// Well this is fun: We need to pull these symbols in for libc++, but we can't +// pull them in with libstdc++, because its ::isinf and ::isnan are different +// than its std::isinf and std::isnan. +#ifndef __GLIBCXX__ +using ::isinf; +using ::isnan; +#endif + +// Finally, pull the "foobarf" functions that HIP defines into std. +using ::acosf; +using ::acoshf; +using ::asinf; +using ::asinhf; +using ::atan2f; +using ::atanf; +using ::atanhf; +using ::cbrtf; +using ::ceilf; +using ::copysignf; +using ::cosf; +using ::coshf; +using ::erfcf; +using ::erff; +using ::exp2f; +using ::expf; +using ::expm1f; +using ::fabsf; +using ::fdimf; +using ::floorf; +using ::fmaf; +using ::fmaxf; +using ::fminf; +using ::fmodf; +using ::frexpf; +using ::hypotf; +using ::ilogbf; +using ::ldexpf; +using ::lgammaf; +using ::llrintf; +using ::llroundf; +using ::log10f; +using ::log1pf; +using ::log2f; +using ::logbf; +using ::logf; +using ::lrintf; +using ::lroundf; +using ::modff; +using ::nearbyintf; +using ::nextafterf; +// using ::nexttowardf; - Omit this since we do not have a definition. +using ::powf; +using ::remainderf; +using ::remquof; +using ::rintf; +using ::roundf; +using ::scalblnf; +using ::scalbnf; +using ::sinf; +using ::sinhf; +using ::sqrtf; +using ::tanf; +using ::tanhf; +using ::tgammaf; +using ::truncf; + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION +} // namespace std +#endif // _LIBCPP_END_NAMESPACE_STD +#endif // !defined(__HIPCC_RTC__) + +// Define device-side math functions from on MSVC. +#if !defined(__HIPCC_RTC__) +#if defined(_MSC_VER) + +// Before VS2019, `` is also included in `` and other headers. +// But, from VS2019, it's only included in ``. Need to include +// `` here to ensure C functions declared there won't be markded as +// `__host__` and `__device__` through `` wrapper. +#include + +#if defined(__cplusplus) +extern "C" { +#endif // defined(__cplusplus) +__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Cosh(double x, + double y) { + return cosh(x) * y; +} +__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FCosh(float x, + float y) { + return coshf(x) * y; +} +__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _Dtest(double *p) { + return fpclassify(*p); +} +__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _FDtest(float *p) { + return fpclassify(*p); +} +__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Sinh(double x, + double y) { + return sinh(x) * y; +} +__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FSinh(float x, + float y) { + return sinhf(x) * y; +} +#if defined(__cplusplus) +} +#endif // defined(__cplusplus) +#endif // defined(_MSC_VER) +#endif // !defined(__HIPCC_RTC__) +#endif // ifndef __OPENMP_AMDGCN__ + +#pragma pop_macro("__DEVICE__") +#pragma pop_macro("__CONSTEXPR__") + +#endif // __CLANG_HIP_CMATH_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_libdevice_declares.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_libdevice_declares.h new file mode 100644 index 0000000..8be848b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_libdevice_declares.h @@ -0,0 +1,346 @@ +/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__ +#define __CLANG_HIP_LIBDEVICE_DECLARES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +// BEGIN FLOAT +__device__ __attribute__((const)) float __ocml_acos_f32(float); +__device__ __attribute__((pure)) float __ocml_acosh_f32(float); +__device__ __attribute__((const)) float __ocml_asin_f32(float); +__device__ __attribute__((pure)) float __ocml_asinh_f32(float); +__device__ __attribute__((const)) float __ocml_atan2_f32(float, float); +__device__ __attribute__((const)) float __ocml_atan_f32(float); +__device__ __attribute__((pure)) float __ocml_atanh_f32(float); +__device__ __attribute__((pure)) float __ocml_cbrt_f32(float); +__device__ __attribute__((const)) float __ocml_ceil_f32(float); +__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float, + float); +__device__ float __ocml_cos_f32(float); +__device__ float __ocml_native_cos_f32(float); +__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float); +__device__ float __ocml_cospi_f32(float); +__device__ float __ocml_i0_f32(float); +__device__ float __ocml_i1_f32(float); +__device__ __attribute__((pure)) float __ocml_erfc_f32(float); +__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float); +__device__ __attribute__((pure)) float __ocml_erfcx_f32(float); +__device__ __attribute__((pure)) float __ocml_erf_f32(float); +__device__ __attribute__((pure)) float __ocml_erfinv_f32(float); +__device__ __attribute__((pure)) float __ocml_exp10_f32(float); +__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float); +__device__ __attribute__((pure)) float __ocml_exp2_f32(float); +__device__ __attribute__((pure)) float __ocml_exp_f32(float); +__device__ __attribute__((pure)) float __ocml_native_exp_f32(float); +__device__ __attribute__((pure)) float __ocml_expm1_f32(float); +__device__ __attribute__((const)) float __ocml_fabs_f32(float); +__device__ __attribute__((const)) float __ocml_fdim_f32(float, float); +__device__ __attribute__((const)) float __ocml_floor_f32(float); +__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float); +__device__ __attribute__((const)) float __ocml_fmax_f32(float, float); +__device__ __attribute__((const)) float __ocml_fmin_f32(float, float); +__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float, + float); +__device__ float __ocml_frexp_f32(float, + __attribute__((address_space(5))) int *); +__device__ __attribute__((const)) float __ocml_hypot_f32(float, float); +__device__ __attribute__((const)) int __ocml_ilogb_f32(float); +__device__ __attribute__((const)) int __ocml_isfinite_f32(float); +__device__ __attribute__((const)) int __ocml_isinf_f32(float); +__device__ __attribute__((const)) int __ocml_isnan_f32(float); +__device__ float __ocml_j0_f32(float); +__device__ float __ocml_j1_f32(float); +__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int); +__device__ float __ocml_lgamma_f32(float); +__device__ __attribute__((pure)) float __ocml_log10_f32(float); +__device__ __attribute__((pure)) float __ocml_native_log10_f32(float); +__device__ __attribute__((pure)) float __ocml_log1p_f32(float); +__device__ __attribute__((pure)) float __ocml_log2_f32(float); +__device__ __attribute__((pure)) float __ocml_native_log2_f32(float); +__device__ __attribute__((const)) float __ocml_logb_f32(float); +__device__ __attribute__((pure)) float __ocml_log_f32(float); +__device__ __attribute__((pure)) float __ocml_native_log_f32(float); +__device__ float __ocml_modf_f32(float, + __attribute__((address_space(5))) float *); +__device__ __attribute__((const)) float __ocml_nearbyint_f32(float); +__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float); +__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float); +__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float, + float); +__device__ __attribute__((pure)) float __ocml_ncdf_f32(float); +__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float); +__device__ __attribute__((pure)) float __ocml_pow_f32(float, float); +__device__ __attribute__((pure)) float __ocml_pown_f32(float, int); +__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float); +__device__ __attribute__((const)) float __ocml_remainder_f32(float, float); +__device__ float __ocml_remquo_f32(float, float, + __attribute__((address_space(5))) int *); +__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float); +__device__ __attribute__((const)) float __ocml_rint_f32(float); +__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float); +__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float, + float); +__device__ __attribute__((const)) float __ocml_round_f32(float); +__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float); +__device__ __attribute__((const)) float __ocml_scalb_f32(float, float); +__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int); +__device__ __attribute__((const)) int __ocml_signbit_f32(float); +__device__ float __ocml_sincos_f32(float, + __attribute__((address_space(5))) float *); +__device__ float __ocml_sincospi_f32(float, + __attribute__((address_space(5))) float *); +__device__ float __ocml_sin_f32(float); +__device__ float __ocml_native_sin_f32(float); +__device__ __attribute__((pure)) float __ocml_sinh_f32(float); +__device__ float __ocml_sinpi_f32(float); +__device__ __attribute__((const)) float __ocml_sqrt_f32(float); +__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float); +__device__ float __ocml_tan_f32(float); +__device__ __attribute__((pure)) float __ocml_tanh_f32(float); +__device__ float __ocml_tgamma_f32(float); +__device__ __attribute__((const)) float __ocml_trunc_f32(float); +__device__ float __ocml_y0_f32(float); +__device__ float __ocml_y1_f32(float); + +// BEGIN INTRINSICS +__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float); +__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float); +__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float); +__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float); +__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float); +__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float); +__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float); +__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float); +__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float); +__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float); +__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float); +__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float); +__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float); +__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float); +__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float); +__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float); +__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float); +__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float); +__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float); +__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float); +__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float); +__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float); +__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float); +__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float); + +__device__ inline __attribute__((const)) float +__llvm_amdgcn_cos_f32(float __x) { + return __builtin_amdgcn_cosf(__x); +} +__device__ inline __attribute__((const)) float +__llvm_amdgcn_rcp_f32(float __x) { + return __builtin_amdgcn_rcpf(__x); +} +__device__ inline __attribute__((const)) float +__llvm_amdgcn_rsq_f32(float __x) { + return __builtin_amdgcn_rsqf(__x); +} +__device__ inline __attribute__((const)) float +__llvm_amdgcn_sin_f32(float __x) { + return __builtin_amdgcn_sinf(__x); +} +// END INTRINSICS +// END FLOAT + +// BEGIN DOUBLE +__device__ __attribute__((const)) double __ocml_acos_f64(double); +__device__ __attribute__((pure)) double __ocml_acosh_f64(double); +__device__ __attribute__((const)) double __ocml_asin_f64(double); +__device__ __attribute__((pure)) double __ocml_asinh_f64(double); +__device__ __attribute__((const)) double __ocml_atan2_f64(double, double); +__device__ __attribute__((const)) double __ocml_atan_f64(double); +__device__ __attribute__((pure)) double __ocml_atanh_f64(double); +__device__ __attribute__((pure)) double __ocml_cbrt_f64(double); +__device__ __attribute__((const)) double __ocml_ceil_f64(double); +__device__ __attribute__((const)) double __ocml_copysign_f64(double, double); +__device__ double __ocml_cos_f64(double); +__device__ __attribute__((pure)) double __ocml_cosh_f64(double); +__device__ double __ocml_cospi_f64(double); +__device__ double __ocml_i0_f64(double); +__device__ double __ocml_i1_f64(double); +__device__ __attribute__((pure)) double __ocml_erfc_f64(double); +__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double); +__device__ __attribute__((pure)) double __ocml_erfcx_f64(double); +__device__ __attribute__((pure)) double __ocml_erf_f64(double); +__device__ __attribute__((pure)) double __ocml_erfinv_f64(double); +__device__ __attribute__((pure)) double __ocml_exp10_f64(double); +__device__ __attribute__((pure)) double __ocml_exp2_f64(double); +__device__ __attribute__((pure)) double __ocml_exp_f64(double); +__device__ __attribute__((pure)) double __ocml_expm1_f64(double); +__device__ __attribute__((const)) double __ocml_fabs_f64(double); +__device__ __attribute__((const)) double __ocml_fdim_f64(double, double); +__device__ __attribute__((const)) double __ocml_floor_f64(double); +__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double); +__device__ __attribute__((const)) double __ocml_fmax_f64(double, double); +__device__ __attribute__((const)) double __ocml_fmin_f64(double, double); +__device__ __attribute__((const)) double __ocml_fmod_f64(double, double); +__device__ double __ocml_frexp_f64(double, + __attribute__((address_space(5))) int *); +__device__ __attribute__((const)) double __ocml_hypot_f64(double, double); +__device__ __attribute__((const)) int __ocml_ilogb_f64(double); +__device__ __attribute__((const)) int __ocml_isfinite_f64(double); +__device__ __attribute__((const)) int __ocml_isinf_f64(double); +__device__ __attribute__((const)) int __ocml_isnan_f64(double); +__device__ double __ocml_j0_f64(double); +__device__ double __ocml_j1_f64(double); +__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int); +__device__ double __ocml_lgamma_f64(double); +__device__ __attribute__((pure)) double __ocml_log10_f64(double); +__device__ __attribute__((pure)) double __ocml_log1p_f64(double); +__device__ __attribute__((pure)) double __ocml_log2_f64(double); +__device__ __attribute__((const)) double __ocml_logb_f64(double); +__device__ __attribute__((pure)) double __ocml_log_f64(double); +__device__ double __ocml_modf_f64(double, + __attribute__((address_space(5))) double *); +__device__ __attribute__((const)) double __ocml_nearbyint_f64(double); +__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double); +__device__ __attribute__((const)) double __ocml_len3_f64(double, double, + double); +__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double, + double); +__device__ __attribute__((pure)) double __ocml_ncdf_f64(double); +__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double); +__device__ __attribute__((pure)) double __ocml_pow_f64(double, double); +__device__ __attribute__((pure)) double __ocml_pown_f64(double, int); +__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double); +__device__ __attribute__((const)) double __ocml_remainder_f64(double, double); +__device__ double __ocml_remquo_f64(double, double, + __attribute__((address_space(5))) int *); +__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double); +__device__ __attribute__((const)) double __ocml_rint_f64(double); +__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double, + double); +__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double, + double, double); +__device__ __attribute__((const)) double __ocml_round_f64(double); +__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double); +__device__ __attribute__((const)) double __ocml_scalb_f64(double, double); +__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int); +__device__ __attribute__((const)) int __ocml_signbit_f64(double); +__device__ double __ocml_sincos_f64(double, + __attribute__((address_space(5))) double *); +__device__ double +__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *); +__device__ double __ocml_sin_f64(double); +__device__ __attribute__((pure)) double __ocml_sinh_f64(double); +__device__ double __ocml_sinpi_f64(double); +__device__ __attribute__((const)) double __ocml_sqrt_f64(double); +__device__ double __ocml_tan_f64(double); +__device__ __attribute__((pure)) double __ocml_tanh_f64(double); +__device__ double __ocml_tgamma_f64(double); +__device__ __attribute__((const)) double __ocml_trunc_f64(double); +__device__ double __ocml_y0_f64(double); +__device__ double __ocml_y1_f64(double); + +// BEGIN INTRINSICS +__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double); +__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double); +__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double); +__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double); +__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double); +__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double); +__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double); +__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double); +__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double); +__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double); +__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double); +__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double); +__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double); +__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double); +__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double); +__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double); +__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double); +__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double); +__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double); +__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double); +__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double, + double); +__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double, + double); +__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double, + double); +__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double, + double); + +__device__ inline __attribute__((const)) double +__llvm_amdgcn_rcp_f64(double __x) { + return __builtin_amdgcn_rcp(__x); +} +__device__ inline __attribute__((const)) double +__llvm_amdgcn_rsq_f64(double __x) { + return __builtin_amdgcn_rsq(__x); +} + +__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16); +__device__ _Float16 __ocml_cos_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16, + _Float16); +__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16); +__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16); +__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16); +__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16); +__device__ _Float16 __ocml_sin_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16); +__device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int); + +typedef _Float16 __2f16 __attribute__((ext_vector_type(2))); +typedef short __2i16 __attribute__((ext_vector_type(2))); + +__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b, + float c, bool s); +__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16); +__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16); +__device__ __2f16 __ocml_cos_2f16(__2f16); +__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16); +__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16); +__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16); +__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16); +__device__ __attribute__((const)) +__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16); +__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16); +__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16); +__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16); +__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16); +__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16); +__device__ inline __2f16 +__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL. +{ + return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)); +} +__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16); +__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16); +__device__ __2f16 __ocml_sin_2f16(__2f16); +__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16); +__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16); +__device__ __attribute__((const)) __2f16 __ocml_pown_2f16(__2f16, __2i16); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_math.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_math.h new file mode 100644 index 0000000..9effaa1 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_math.h @@ -0,0 +1,1288 @@ +/*===---- __clang_hip_math.h - Device-side HIP math support ----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_HIP_MATH_H__ +#define __CLANG_HIP_MATH_H__ + +#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__) +#error "This file is for HIP and OpenMP AMDGCN device compilation only." +#endif + +#if !defined(__HIPCC_RTC__) +#if defined(__cplusplus) +#include +#endif +#include +#include +#endif // !defined(__HIPCC_RTC__) + +#pragma push_macro("__DEVICE__") + +#ifdef __OPENMP_AMDGCN__ +#define __DEVICE__ static inline __attribute__((always_inline, nothrow)) +#else +#define __DEVICE__ static __device__ inline __attribute__((always_inline)) +#endif + +// A few functions return bool type starting only in C++11. +#pragma push_macro("__RETURN_TYPE") +#ifdef __OPENMP_AMDGCN__ +#define __RETURN_TYPE int +#else +#if defined(__cplusplus) +#define __RETURN_TYPE bool +#else +#define __RETURN_TYPE int +#endif +#endif // __OPENMP_AMDGCN__ + +#if defined (__cplusplus) && __cplusplus < 201103L +// emulate static_assert on type sizes +template +struct __compare_result{}; +template<> +struct __compare_result { + static const __device__ bool valid; +}; + +__DEVICE__ +void __suppress_unused_warning(bool b){}; +template +__DEVICE__ void __static_assert_equal_size() { + __suppress_unused_warning(__compare_result::valid); +} + +#define __static_assert_type_size_equal(A, B) \ + __static_assert_equal_size() + +#else +#define __static_assert_type_size_equal(A,B) \ + static_assert((A) == (B), "") + +#endif + +__DEVICE__ +uint64_t __make_mantissa_base8(const char *__tagp) { + uint64_t __r = 0; + while (__tagp) { + char __tmp = *__tagp; + + if (__tmp >= '0' && __tmp <= '7') + __r = (__r * 8u) + __tmp - '0'; + else + return 0; + + ++__tagp; + } + + return __r; +} + +__DEVICE__ +uint64_t __make_mantissa_base10(const char *__tagp) { + uint64_t __r = 0; + while (__tagp) { + char __tmp = *__tagp; + + if (__tmp >= '0' && __tmp <= '9') + __r = (__r * 10u) + __tmp - '0'; + else + return 0; + + ++__tagp; + } + + return __r; +} + +__DEVICE__ +uint64_t __make_mantissa_base16(const char *__tagp) { + uint64_t __r = 0; + while (__tagp) { + char __tmp = *__tagp; + + if (__tmp >= '0' && __tmp <= '9') + __r = (__r * 16u) + __tmp - '0'; + else if (__tmp >= 'a' && __tmp <= 'f') + __r = (__r * 16u) + __tmp - 'a' + 10; + else if (__tmp >= 'A' && __tmp <= 'F') + __r = (__r * 16u) + __tmp - 'A' + 10; + else + return 0; + + ++__tagp; + } + + return __r; +} + +__DEVICE__ +uint64_t __make_mantissa(const char *__tagp) { + if (!__tagp) + return 0u; + + if (*__tagp == '0') { + ++__tagp; + + if (*__tagp == 'x' || *__tagp == 'X') + return __make_mantissa_base16(__tagp); + else + return __make_mantissa_base8(__tagp); + } + + return __make_mantissa_base10(__tagp); +} + +// BEGIN FLOAT +#if defined(__cplusplus) +__DEVICE__ +int abs(int __x) { + int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1); + return (__x ^ __sgn) - __sgn; +} +__DEVICE__ +long labs(long __x) { + long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1); + return (__x ^ __sgn) - __sgn; +} +__DEVICE__ +long long llabs(long long __x) { + long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1); + return (__x ^ __sgn) - __sgn; +} +#endif + +__DEVICE__ +float acosf(float __x) { return __ocml_acos_f32(__x); } + +__DEVICE__ +float acoshf(float __x) { return __ocml_acosh_f32(__x); } + +__DEVICE__ +float asinf(float __x) { return __ocml_asin_f32(__x); } + +__DEVICE__ +float asinhf(float __x) { return __ocml_asinh_f32(__x); } + +__DEVICE__ +float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); } + +__DEVICE__ +float atanf(float __x) { return __ocml_atan_f32(__x); } + +__DEVICE__ +float atanhf(float __x) { return __ocml_atanh_f32(__x); } + +__DEVICE__ +float cbrtf(float __x) { return __ocml_cbrt_f32(__x); } + +__DEVICE__ +float ceilf(float __x) { return __ocml_ceil_f32(__x); } + +__DEVICE__ +float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); } + +__DEVICE__ +float cosf(float __x) { return __ocml_cos_f32(__x); } + +__DEVICE__ +float coshf(float __x) { return __ocml_cosh_f32(__x); } + +__DEVICE__ +float cospif(float __x) { return __ocml_cospi_f32(__x); } + +__DEVICE__ +float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); } + +__DEVICE__ +float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); } + +__DEVICE__ +float erfcf(float __x) { return __ocml_erfc_f32(__x); } + +__DEVICE__ +float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); } + +__DEVICE__ +float erfcxf(float __x) { return __ocml_erfcx_f32(__x); } + +__DEVICE__ +float erff(float __x) { return __ocml_erf_f32(__x); } + +__DEVICE__ +float erfinvf(float __x) { return __ocml_erfinv_f32(__x); } + +__DEVICE__ +float exp10f(float __x) { return __ocml_exp10_f32(__x); } + +__DEVICE__ +float exp2f(float __x) { return __ocml_exp2_f32(__x); } + +__DEVICE__ +float expf(float __x) { return __ocml_exp_f32(__x); } + +__DEVICE__ +float expm1f(float __x) { return __ocml_expm1_f32(__x); } + +__DEVICE__ +float fabsf(float __x) { return __ocml_fabs_f32(__x); } + +__DEVICE__ +float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); } + +__DEVICE__ +float fdividef(float __x, float __y) { return __x / __y; } + +__DEVICE__ +float floorf(float __x) { return __ocml_floor_f32(__x); } + +__DEVICE__ +float fmaf(float __x, float __y, float __z) { + return __ocml_fma_f32(__x, __y, __z); +} + +__DEVICE__ +float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); } + +__DEVICE__ +float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); } + +__DEVICE__ +float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); } + +__DEVICE__ +float frexpf(float __x, int *__nptr) { + int __tmp; + float __r = + __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp); + *__nptr = __tmp; + + return __r; +} + +__DEVICE__ +float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); } + +__DEVICE__ +int ilogbf(float __x) { return __ocml_ilogb_f32(__x); } + +__DEVICE__ +__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); } + +__DEVICE__ +__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); } + +__DEVICE__ +__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); } + +__DEVICE__ +float j0f(float __x) { return __ocml_j0_f32(__x); } + +__DEVICE__ +float j1f(float __x) { return __ocml_j1_f32(__x); } + +__DEVICE__ +float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication + // and the Miller & Brown algorithm + // for linear recurrences to get O(log n) steps, but it's unclear if + // it'd be beneficial in this case. + if (__n == 0) + return j0f(__x); + if (__n == 1) + return j1f(__x); + + float __x0 = j0f(__x); + float __x1 = j1f(__x); + for (int __i = 1; __i < __n; ++__i) { + float __x2 = (2 * __i) / __x * __x1 - __x0; + __x0 = __x1; + __x1 = __x2; + } + + return __x1; +} + +__DEVICE__ +float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); } + +__DEVICE__ +float lgammaf(float __x) { return __ocml_lgamma_f32(__x); } + +__DEVICE__ +long long int llrintf(float __x) { return __ocml_rint_f32(__x); } + +__DEVICE__ +long long int llroundf(float __x) { return __ocml_round_f32(__x); } + +__DEVICE__ +float log10f(float __x) { return __ocml_log10_f32(__x); } + +__DEVICE__ +float log1pf(float __x) { return __ocml_log1p_f32(__x); } + +__DEVICE__ +float log2f(float __x) { return __ocml_log2_f32(__x); } + +__DEVICE__ +float logbf(float __x) { return __ocml_logb_f32(__x); } + +__DEVICE__ +float logf(float __x) { return __ocml_log_f32(__x); } + +__DEVICE__ +long int lrintf(float __x) { return __ocml_rint_f32(__x); } + +__DEVICE__ +long int lroundf(float __x) { return __ocml_round_f32(__x); } + +__DEVICE__ +float modff(float __x, float *__iptr) { + float __tmp; + float __r = + __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp); + *__iptr = __tmp; + return __r; +} + +__DEVICE__ +float nanf(const char *__tagp) { + union { + float val; + struct ieee_float { + unsigned int mantissa : 22; + unsigned int quiet : 1; + unsigned int exponent : 8; + unsigned int sign : 1; + } bits; + } __tmp; + __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits)); + + __tmp.bits.sign = 0u; + __tmp.bits.exponent = ~0u; + __tmp.bits.quiet = 1u; + __tmp.bits.mantissa = __make_mantissa(__tagp); + + return __tmp.val; +} + +__DEVICE__ +float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); } + +__DEVICE__ +float nextafterf(float __x, float __y) { + return __ocml_nextafter_f32(__x, __y); +} + +__DEVICE__ +float norm3df(float __x, float __y, float __z) { + return __ocml_len3_f32(__x, __y, __z); +} + +__DEVICE__ +float norm4df(float __x, float __y, float __z, float __w) { + return __ocml_len4_f32(__x, __y, __z, __w); +} + +__DEVICE__ +float normcdff(float __x) { return __ocml_ncdf_f32(__x); } + +__DEVICE__ +float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); } + +__DEVICE__ +float normf(int __dim, + const float *__a) { // TODO: placeholder until OCML adds support. + float __r = 0; + while (__dim--) { + __r += __a[0] * __a[0]; + ++__a; + } + + return __ocml_sqrt_f32(__r); +} + +__DEVICE__ +float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); } + +__DEVICE__ +float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); } + +__DEVICE__ +float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); } + +__DEVICE__ +float remainderf(float __x, float __y) { + return __ocml_remainder_f32(__x, __y); +} + +__DEVICE__ +float remquof(float __x, float __y, int *__quo) { + int __tmp; + float __r = __ocml_remquo_f32( + __x, __y, (__attribute__((address_space(5))) int *)&__tmp); + *__quo = __tmp; + + return __r; +} + +__DEVICE__ +float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); } + +__DEVICE__ +float rintf(float __x) { return __ocml_rint_f32(__x); } + +__DEVICE__ +float rnorm3df(float __x, float __y, float __z) { + return __ocml_rlen3_f32(__x, __y, __z); +} + +__DEVICE__ +float rnorm4df(float __x, float __y, float __z, float __w) { + return __ocml_rlen4_f32(__x, __y, __z, __w); +} + +__DEVICE__ +float rnormf(int __dim, + const float *__a) { // TODO: placeholder until OCML adds support. + float __r = 0; + while (__dim--) { + __r += __a[0] * __a[0]; + ++__a; + } + + return __ocml_rsqrt_f32(__r); +} + +__DEVICE__ +float roundf(float __x) { return __ocml_round_f32(__x); } + +__DEVICE__ +float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); } + +__DEVICE__ +float scalblnf(float __x, long int __n) { + return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n) + : __ocml_scalb_f32(__x, __n); +} + +__DEVICE__ +float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); } + +__DEVICE__ +__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); } + +__DEVICE__ +void sincosf(float __x, float *__sinptr, float *__cosptr) { + float __tmp; + *__sinptr = + __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp); + *__cosptr = __tmp; +} + +__DEVICE__ +void sincospif(float __x, float *__sinptr, float *__cosptr) { + float __tmp; + *__sinptr = __ocml_sincospi_f32( + __x, (__attribute__((address_space(5))) float *)&__tmp); + *__cosptr = __tmp; +} + +__DEVICE__ +float sinf(float __x) { return __ocml_sin_f32(__x); } + +__DEVICE__ +float sinhf(float __x) { return __ocml_sinh_f32(__x); } + +__DEVICE__ +float sinpif(float __x) { return __ocml_sinpi_f32(__x); } + +__DEVICE__ +float sqrtf(float __x) { return __ocml_sqrt_f32(__x); } + +__DEVICE__ +float tanf(float __x) { return __ocml_tan_f32(__x); } + +__DEVICE__ +float tanhf(float __x) { return __ocml_tanh_f32(__x); } + +__DEVICE__ +float tgammaf(float __x) { return __ocml_tgamma_f32(__x); } + +__DEVICE__ +float truncf(float __x) { return __ocml_trunc_f32(__x); } + +__DEVICE__ +float y0f(float __x) { return __ocml_y0_f32(__x); } + +__DEVICE__ +float y1f(float __x) { return __ocml_y1_f32(__x); } + +__DEVICE__ +float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication + // and the Miller & Brown algorithm + // for linear recurrences to get O(log n) steps, but it's unclear if + // it'd be beneficial in this case. Placeholder until OCML adds + // support. + if (__n == 0) + return y0f(__x); + if (__n == 1) + return y1f(__x); + + float __x0 = y0f(__x); + float __x1 = y1f(__x); + for (int __i = 1; __i < __n; ++__i) { + float __x2 = (2 * __i) / __x * __x1 - __x0; + __x0 = __x1; + __x1 = __x2; + } + + return __x1; +} + +// BEGIN INTRINSICS + +__DEVICE__ +float __cosf(float __x) { return __ocml_native_cos_f32(__x); } + +__DEVICE__ +float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); } + +__DEVICE__ +float __expf(float __x) { return __ocml_native_exp_f32(__x); } + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); } +__DEVICE__ +float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); } +__DEVICE__ +float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); } +__DEVICE__ +float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); } +#else +__DEVICE__ +float __fadd_rn(float __x, float __y) { return __x + __y; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); } +__DEVICE__ +float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); } +__DEVICE__ +float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); } +__DEVICE__ +float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); } +#else +__DEVICE__ +float __fdiv_rn(float __x, float __y) { return __x / __y; } +#endif + +__DEVICE__ +float __fdividef(float __x, float __y) { return __x / __y; } + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __fmaf_rd(float __x, float __y, float __z) { + return __ocml_fma_rtn_f32(__x, __y, __z); +} +__DEVICE__ +float __fmaf_rn(float __x, float __y, float __z) { + return __ocml_fma_rte_f32(__x, __y, __z); +} +__DEVICE__ +float __fmaf_ru(float __x, float __y, float __z) { + return __ocml_fma_rtp_f32(__x, __y, __z); +} +__DEVICE__ +float __fmaf_rz(float __x, float __y, float __z) { + return __ocml_fma_rtz_f32(__x, __y, __z); +} +#else +__DEVICE__ +float __fmaf_rn(float __x, float __y, float __z) { + return __ocml_fma_f32(__x, __y, __z); +} +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); } +__DEVICE__ +float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); } +__DEVICE__ +float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); } +__DEVICE__ +float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); } +#else +__DEVICE__ +float __fmul_rn(float __x, float __y) { return __x * __y; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); } +__DEVICE__ +float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); } +__DEVICE__ +float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); } +__DEVICE__ +float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); } +#else +__DEVICE__ +float __frcp_rn(float __x) { return 1.0f / __x; } +#endif + +__DEVICE__ +float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); } + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); } +__DEVICE__ +float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); } +__DEVICE__ +float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); } +__DEVICE__ +float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); } +#else +__DEVICE__ +float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); } +__DEVICE__ +float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); } +__DEVICE__ +float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); } +__DEVICE__ +float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); } +#else +__DEVICE__ +float __fsub_rn(float __x, float __y) { return __x - __y; } +#endif + +__DEVICE__ +float __log10f(float __x) { return __ocml_native_log10_f32(__x); } + +__DEVICE__ +float __log2f(float __x) { return __ocml_native_log2_f32(__x); } + +__DEVICE__ +float __logf(float __x) { return __ocml_native_log_f32(__x); } + +__DEVICE__ +float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); } + +__DEVICE__ +float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); } + +__DEVICE__ +void __sincosf(float __x, float *__sinptr, float *__cosptr) { + *__sinptr = __ocml_native_sin_f32(__x); + *__cosptr = __ocml_native_cos_f32(__x); +} + +__DEVICE__ +float __sinf(float __x) { return __ocml_native_sin_f32(__x); } + +__DEVICE__ +float __tanf(float __x) { return __ocml_tan_f32(__x); } +// END INTRINSICS +// END FLOAT + +// BEGIN DOUBLE +__DEVICE__ +double acos(double __x) { return __ocml_acos_f64(__x); } + +__DEVICE__ +double acosh(double __x) { return __ocml_acosh_f64(__x); } + +__DEVICE__ +double asin(double __x) { return __ocml_asin_f64(__x); } + +__DEVICE__ +double asinh(double __x) { return __ocml_asinh_f64(__x); } + +__DEVICE__ +double atan(double __x) { return __ocml_atan_f64(__x); } + +__DEVICE__ +double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); } + +__DEVICE__ +double atanh(double __x) { return __ocml_atanh_f64(__x); } + +__DEVICE__ +double cbrt(double __x) { return __ocml_cbrt_f64(__x); } + +__DEVICE__ +double ceil(double __x) { return __ocml_ceil_f64(__x); } + +__DEVICE__ +double copysign(double __x, double __y) { + return __ocml_copysign_f64(__x, __y); +} + +__DEVICE__ +double cos(double __x) { return __ocml_cos_f64(__x); } + +__DEVICE__ +double cosh(double __x) { return __ocml_cosh_f64(__x); } + +__DEVICE__ +double cospi(double __x) { return __ocml_cospi_f64(__x); } + +__DEVICE__ +double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); } + +__DEVICE__ +double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); } + +__DEVICE__ +double erf(double __x) { return __ocml_erf_f64(__x); } + +__DEVICE__ +double erfc(double __x) { return __ocml_erfc_f64(__x); } + +__DEVICE__ +double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); } + +__DEVICE__ +double erfcx(double __x) { return __ocml_erfcx_f64(__x); } + +__DEVICE__ +double erfinv(double __x) { return __ocml_erfinv_f64(__x); } + +__DEVICE__ +double exp(double __x) { return __ocml_exp_f64(__x); } + +__DEVICE__ +double exp10(double __x) { return __ocml_exp10_f64(__x); } + +__DEVICE__ +double exp2(double __x) { return __ocml_exp2_f64(__x); } + +__DEVICE__ +double expm1(double __x) { return __ocml_expm1_f64(__x); } + +__DEVICE__ +double fabs(double __x) { return __ocml_fabs_f64(__x); } + +__DEVICE__ +double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); } + +__DEVICE__ +double floor(double __x) { return __ocml_floor_f64(__x); } + +__DEVICE__ +double fma(double __x, double __y, double __z) { + return __ocml_fma_f64(__x, __y, __z); +} + +__DEVICE__ +double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); } + +__DEVICE__ +double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); } + +__DEVICE__ +double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); } + +__DEVICE__ +double frexp(double __x, int *__nptr) { + int __tmp; + double __r = + __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp); + *__nptr = __tmp; + return __r; +} + +__DEVICE__ +double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); } + +__DEVICE__ +int ilogb(double __x) { return __ocml_ilogb_f64(__x); } + +__DEVICE__ +__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); } + +__DEVICE__ +__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); } + +__DEVICE__ +__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); } + +__DEVICE__ +double j0(double __x) { return __ocml_j0_f64(__x); } + +__DEVICE__ +double j1(double __x) { return __ocml_j1_f64(__x); } + +__DEVICE__ +double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication + // and the Miller & Brown algorithm + // for linear recurrences to get O(log n) steps, but it's unclear if + // it'd be beneficial in this case. Placeholder until OCML adds + // support. + if (__n == 0) + return j0(__x); + if (__n == 1) + return j1(__x); + + double __x0 = j0(__x); + double __x1 = j1(__x); + for (int __i = 1; __i < __n; ++__i) { + double __x2 = (2 * __i) / __x * __x1 - __x0; + __x0 = __x1; + __x1 = __x2; + } + return __x1; +} + +__DEVICE__ +double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); } + +__DEVICE__ +double lgamma(double __x) { return __ocml_lgamma_f64(__x); } + +__DEVICE__ +long long int llrint(double __x) { return __ocml_rint_f64(__x); } + +__DEVICE__ +long long int llround(double __x) { return __ocml_round_f64(__x); } + +__DEVICE__ +double log(double __x) { return __ocml_log_f64(__x); } + +__DEVICE__ +double log10(double __x) { return __ocml_log10_f64(__x); } + +__DEVICE__ +double log1p(double __x) { return __ocml_log1p_f64(__x); } + +__DEVICE__ +double log2(double __x) { return __ocml_log2_f64(__x); } + +__DEVICE__ +double logb(double __x) { return __ocml_logb_f64(__x); } + +__DEVICE__ +long int lrint(double __x) { return __ocml_rint_f64(__x); } + +__DEVICE__ +long int lround(double __x) { return __ocml_round_f64(__x); } + +__DEVICE__ +double modf(double __x, double *__iptr) { + double __tmp; + double __r = + __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp); + *__iptr = __tmp; + + return __r; +} + +__DEVICE__ +double nan(const char *__tagp) { +#if !_WIN32 + union { + double val; + struct ieee_double { + uint64_t mantissa : 51; + uint32_t quiet : 1; + uint32_t exponent : 11; + uint32_t sign : 1; + } bits; + } __tmp; + __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits)); + + __tmp.bits.sign = 0u; + __tmp.bits.exponent = ~0u; + __tmp.bits.quiet = 1u; + __tmp.bits.mantissa = __make_mantissa(__tagp); + + return __tmp.val; +#else + __static_assert_type_size_equal(sizeof(uint64_t), sizeof(double)); + uint64_t __val = __make_mantissa(__tagp); + __val |= 0xFFF << 51; + return *reinterpret_cast(&__val); +#endif +} + +__DEVICE__ +double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); } + +__DEVICE__ +double nextafter(double __x, double __y) { + return __ocml_nextafter_f64(__x, __y); +} + +__DEVICE__ +double norm(int __dim, + const double *__a) { // TODO: placeholder until OCML adds support. + double __r = 0; + while (__dim--) { + __r += __a[0] * __a[0]; + ++__a; + } + + return __ocml_sqrt_f64(__r); +} + +__DEVICE__ +double norm3d(double __x, double __y, double __z) { + return __ocml_len3_f64(__x, __y, __z); +} + +__DEVICE__ +double norm4d(double __x, double __y, double __z, double __w) { + return __ocml_len4_f64(__x, __y, __z, __w); +} + +__DEVICE__ +double normcdf(double __x) { return __ocml_ncdf_f64(__x); } + +__DEVICE__ +double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); } + +__DEVICE__ +double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); } + +__DEVICE__ +double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); } + +__DEVICE__ +double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); } + +__DEVICE__ +double remainder(double __x, double __y) { + return __ocml_remainder_f64(__x, __y); +} + +__DEVICE__ +double remquo(double __x, double __y, int *__quo) { + int __tmp; + double __r = __ocml_remquo_f64( + __x, __y, (__attribute__((address_space(5))) int *)&__tmp); + *__quo = __tmp; + + return __r; +} + +__DEVICE__ +double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); } + +__DEVICE__ +double rint(double __x) { return __ocml_rint_f64(__x); } + +__DEVICE__ +double rnorm(int __dim, + const double *__a) { // TODO: placeholder until OCML adds support. + double __r = 0; + while (__dim--) { + __r += __a[0] * __a[0]; + ++__a; + } + + return __ocml_rsqrt_f64(__r); +} + +__DEVICE__ +double rnorm3d(double __x, double __y, double __z) { + return __ocml_rlen3_f64(__x, __y, __z); +} + +__DEVICE__ +double rnorm4d(double __x, double __y, double __z, double __w) { + return __ocml_rlen4_f64(__x, __y, __z, __w); +} + +__DEVICE__ +double round(double __x) { return __ocml_round_f64(__x); } + +__DEVICE__ +double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); } + +__DEVICE__ +double scalbln(double __x, long int __n) { + return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n) + : __ocml_scalb_f64(__x, __n); +} +__DEVICE__ +double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); } + +__DEVICE__ +__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); } + +__DEVICE__ +double sin(double __x) { return __ocml_sin_f64(__x); } + +__DEVICE__ +void sincos(double __x, double *__sinptr, double *__cosptr) { + double __tmp; + *__sinptr = __ocml_sincos_f64( + __x, (__attribute__((address_space(5))) double *)&__tmp); + *__cosptr = __tmp; +} + +__DEVICE__ +void sincospi(double __x, double *__sinptr, double *__cosptr) { + double __tmp; + *__sinptr = __ocml_sincospi_f64( + __x, (__attribute__((address_space(5))) double *)&__tmp); + *__cosptr = __tmp; +} + +__DEVICE__ +double sinh(double __x) { return __ocml_sinh_f64(__x); } + +__DEVICE__ +double sinpi(double __x) { return __ocml_sinpi_f64(__x); } + +__DEVICE__ +double sqrt(double __x) { return __ocml_sqrt_f64(__x); } + +__DEVICE__ +double tan(double __x) { return __ocml_tan_f64(__x); } + +__DEVICE__ +double tanh(double __x) { return __ocml_tanh_f64(__x); } + +__DEVICE__ +double tgamma(double __x) { return __ocml_tgamma_f64(__x); } + +__DEVICE__ +double trunc(double __x) { return __ocml_trunc_f64(__x); } + +__DEVICE__ +double y0(double __x) { return __ocml_y0_f64(__x); } + +__DEVICE__ +double y1(double __x) { return __ocml_y1_f64(__x); } + +__DEVICE__ +double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication + // and the Miller & Brown algorithm + // for linear recurrences to get O(log n) steps, but it's unclear if + // it'd be beneficial in this case. Placeholder until OCML adds + // support. + if (__n == 0) + return y0(__x); + if (__n == 1) + return y1(__x); + + double __x0 = y0(__x); + double __x1 = y1(__x); + for (int __i = 1; __i < __n; ++__i) { + double __x2 = (2 * __i) / __x * __x1 - __x0; + __x0 = __x1; + __x1 = __x2; + } + + return __x1; +} + +// BEGIN INTRINSICS +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __dadd_rd(double __x, double __y) { + return __ocml_add_rtn_f64(__x, __y); +} +__DEVICE__ +double __dadd_rn(double __x, double __y) { + return __ocml_add_rte_f64(__x, __y); +} +__DEVICE__ +double __dadd_ru(double __x, double __y) { + return __ocml_add_rtp_f64(__x, __y); +} +__DEVICE__ +double __dadd_rz(double __x, double __y) { + return __ocml_add_rtz_f64(__x, __y); +} +#else +__DEVICE__ +double __dadd_rn(double __x, double __y) { return __x + __y; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __ddiv_rd(double __x, double __y) { + return __ocml_div_rtn_f64(__x, __y); +} +__DEVICE__ +double __ddiv_rn(double __x, double __y) { + return __ocml_div_rte_f64(__x, __y); +} +__DEVICE__ +double __ddiv_ru(double __x, double __y) { + return __ocml_div_rtp_f64(__x, __y); +} +__DEVICE__ +double __ddiv_rz(double __x, double __y) { + return __ocml_div_rtz_f64(__x, __y); +} +#else +__DEVICE__ +double __ddiv_rn(double __x, double __y) { return __x / __y; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __dmul_rd(double __x, double __y) { + return __ocml_mul_rtn_f64(__x, __y); +} +__DEVICE__ +double __dmul_rn(double __x, double __y) { + return __ocml_mul_rte_f64(__x, __y); +} +__DEVICE__ +double __dmul_ru(double __x, double __y) { + return __ocml_mul_rtp_f64(__x, __y); +} +__DEVICE__ +double __dmul_rz(double __x, double __y) { + return __ocml_mul_rtz_f64(__x, __y); +} +#else +__DEVICE__ +double __dmul_rn(double __x, double __y) { return __x * __y; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __drcp_rd(double __x) { return __ocml_div_rtn_f64(1.0, __x); } +__DEVICE__ +double __drcp_rn(double __x) { return __ocml_div_rte_f64(1.0, __x); } +__DEVICE__ +double __drcp_ru(double __x) { return __ocml_div_rtp_f64(1.0, __x); } +__DEVICE__ +double __drcp_rz(double __x) { return __ocml_div_rtz_f64(1.0, __x); } +#else +__DEVICE__ +double __drcp_rn(double __x) { return 1.0 / __x; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); } +__DEVICE__ +double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); } +__DEVICE__ +double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); } +__DEVICE__ +double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); } +#else +__DEVICE__ +double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __dsub_rd(double __x, double __y) { + return __ocml_sub_rtn_f64(__x, __y); +} +__DEVICE__ +double __dsub_rn(double __x, double __y) { + return __ocml_sub_rte_f64(__x, __y); +} +__DEVICE__ +double __dsub_ru(double __x, double __y) { + return __ocml_sub_rtp_f64(__x, __y); +} +__DEVICE__ +double __dsub_rz(double __x, double __y) { + return __ocml_sub_rtz_f64(__x, __y); +} +#else +__DEVICE__ +double __dsub_rn(double __x, double __y) { return __x - __y; } +#endif + +#if defined OCML_BASIC_ROUNDED_OPERATIONS +__DEVICE__ +double __fma_rd(double __x, double __y, double __z) { + return __ocml_fma_rtn_f64(__x, __y, __z); +} +__DEVICE__ +double __fma_rn(double __x, double __y, double __z) { + return __ocml_fma_rte_f64(__x, __y, __z); +} +__DEVICE__ +double __fma_ru(double __x, double __y, double __z) { + return __ocml_fma_rtp_f64(__x, __y, __z); +} +__DEVICE__ +double __fma_rz(double __x, double __y, double __z) { + return __ocml_fma_rtz_f64(__x, __y, __z); +} +#else +__DEVICE__ +double __fma_rn(double __x, double __y, double __z) { + return __ocml_fma_f64(__x, __y, __z); +} +#endif +// END INTRINSICS +// END DOUBLE + +// C only macros +#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L +#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x) +#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x) +#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x) +#define signbit(__x) \ + _Generic((__x), float : __signbitf, double : __signbit)(__x) +#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L + +#if defined(__cplusplus) +template __DEVICE__ T min(T __arg1, T __arg2) { + return (__arg1 < __arg2) ? __arg1 : __arg2; +} + +template __DEVICE__ T max(T __arg1, T __arg2) { + return (__arg1 > __arg2) ? __arg1 : __arg2; +} + +__DEVICE__ int min(int __arg1, int __arg2) { + return (__arg1 < __arg2) ? __arg1 : __arg2; +} +__DEVICE__ int max(int __arg1, int __arg2) { + return (__arg1 > __arg2) ? __arg1 : __arg2; +} + +__DEVICE__ +float max(float __x, float __y) { return fmaxf(__x, __y); } + +__DEVICE__ +double max(double __x, double __y) { return fmax(__x, __y); } + +__DEVICE__ +float min(float __x, float __y) { return fminf(__x, __y); } + +__DEVICE__ +double min(double __x, double __y) { return fmin(__x, __y); } + +#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) +__host__ inline static int min(int __arg1, int __arg2) { + return std::min(__arg1, __arg2); +} + +__host__ inline static int max(int __arg1, int __arg2) { + return std::max(__arg1, __arg2); +} +#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) +#endif + +#pragma pop_macro("__DEVICE__") +#pragma pop_macro("__RETURN_TYPE") + +#endif // __CLANG_HIP_MATH_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_runtime_wrapper.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_runtime_wrapper.h new file mode 100644 index 0000000..73021d2 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__clang_hip_runtime_wrapper.h @@ -0,0 +1,124 @@ +/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* + * WARNING: This header is intended to be directly -include'd by + * the compiler and is not supposed to be included by users. + * + */ + +#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__ +#define __CLANG_HIP_RUNTIME_WRAPPER_H__ + +#if __HIP__ + +#define __host__ __attribute__((host)) +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __shared__ __attribute__((shared)) +#define __constant__ __attribute__((constant)) +#define __managed__ __attribute__((managed)) + +#if !defined(__cplusplus) || __cplusplus < 201103L + #define nullptr NULL; +#endif + +#ifdef __cplusplus +extern "C" { + __attribute__((__visibility__("default"))) + __attribute__((weak)) + __attribute__((noreturn)) + __device__ void __cxa_pure_virtual(void) { + __builtin_trap(); + } + __attribute__((__visibility__("default"))) + __attribute__((weak)) + __attribute__((noreturn)) + __device__ void __cxa_deleted_virtual(void) { + __builtin_trap(); + } +} +#endif //__cplusplus + +#if !defined(__HIPCC_RTC__) +#include +#include +#include +#else +typedef __SIZE_TYPE__ size_t; +// Define macros which are needed to declare HIP device API's without standard +// C/C++ headers. This is for readability so that these API's can be written +// the same way as non-hipRTC use case. These macros need to be popped so that +// they do not pollute users' name space. +#pragma push_macro("NULL") +#pragma push_macro("uint32_t") +#pragma push_macro("uint64_t") +#pragma push_macro("CHAR_BIT") +#pragma push_macro("INT_MAX") +#define NULL (void *)0 +#define uint32_t __UINT32_TYPE__ +#define uint64_t __UINT64_TYPE__ +#define CHAR_BIT __CHAR_BIT__ +#define INT_MAX __INTMAX_MAX__ +#endif // __HIPCC_RTC__ + +typedef __SIZE_TYPE__ __hip_size_t; + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#if __HIP_ENABLE_DEVICE_MALLOC__ +__device__ void *__hip_malloc(__hip_size_t __size); +__device__ void *__hip_free(void *__ptr); +__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) { + return __hip_malloc(__size); +} +__attribute__((weak)) inline __device__ void *free(void *__ptr) { + return __hip_free(__ptr); +} +#else +__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) { + __builtin_trap(); + return (void *)0; +} +__attribute__((weak)) inline __device__ void *free(void *__ptr) { + __builtin_trap(); + return (void *)0; +} +#endif + +#ifdef __cplusplus +} // extern "C" +#endif //__cplusplus + +#include <__clang_hip_libdevice_declares.h> +#include <__clang_hip_math.h> + +#if defined(__HIPCC_RTC__) +#include <__clang_hip_cmath.h> +#else +#include <__clang_cuda_math_forward_declares.h> +#include <__clang_hip_cmath.h> +#include <__clang_cuda_complex_builtins.h> +#include +#include +#include +#endif // __HIPCC_RTC__ + +#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1 +#if defined(__HIPCC_RTC__) +#pragma pop_macro("NULL") +#pragma pop_macro("uint32_t") +#pragma pop_macro("uint64_t") +#pragma pop_macro("CHAR_BIT") +#pragma pop_macro("INT_MAX") +#endif // __HIPCC_RTC__ +#endif // __HIP__ +#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__stddef_max_align_t.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__stddef_max_align_t.h new file mode 100644 index 0000000..e3b4392 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__stddef_max_align_t.h @@ -0,0 +1,27 @@ +/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_MAX_ALIGN_T_DEFINED +#define __CLANG_MAX_ALIGN_T_DEFINED + +#if defined(_MSC_VER) +typedef double max_align_t; +#elif defined(__APPLE__) +typedef long double max_align_t; +#else +// Define 'max_align_t' to match the GCC definition. +typedef struct { + long long __clang_max_align_nonce1 + __attribute__((__aligned__(__alignof__(long long)))); + long double __clang_max_align_nonce2 + __attribute__((__aligned__(__alignof__(long double)))); +} max_align_t; +#endif + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__wmmintrin_aes.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__wmmintrin_aes.h new file mode 100644 index 0000000..f540319 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__wmmintrin_aes.h @@ -0,0 +1,140 @@ +/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WMMINTRIN_H +#error "Never use <__wmmintrin_aes.h> directly; include instead." +#endif + +#ifndef __WMMINTRIN_AES_H +#define __WMMINTRIN_AES_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes"), __min_vector_width__(128))) + +/// Performs a single round of AES encryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESENC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the encrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesenc_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R); +} + +/// Performs the final round of AES encryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESENCLAST instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the encrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesenclast_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R); +} + +/// Performs a single round of AES decryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESDEC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the decrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesdec_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R); +} + +/// Performs the final round of AES decryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESDECLAST instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the decrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesdeclast_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R); +} + +/// Applies the AES InvMixColumns() transformation to an expanded key +/// contained in the source operand, and writes the result to the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESIMC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the expanded key. +/// \returns A 128-bit integer vector containing the transformed value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesimc_si128(__m128i __V) +{ + return (__m128i)__builtin_ia32_aesimc128((__v2di)__V); +} + +/// Generates a round key for AES encryption, operating on 128-bit data +/// specified in the first source operand and using an 8-bit round constant +/// specified by the second source operand, and writes the result to the +/// destination. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R); +/// \endcode +/// +/// This intrinsic corresponds to the AESKEYGENASSIST instruction. +/// +/// \param C +/// A 128-bit integer vector that is used to generate the AES encryption key. +/// \param R +/// An 8-bit round constant used to generate the AES encryption key. +/// \returns A 128-bit round key for AES encryption. +#define _mm_aeskeygenassist_si128(C, R) \ + (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R)) + +#undef __DEFAULT_FN_ATTRS + +#endif /* __WMMINTRIN_AES_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__wmmintrin_pclmul.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__wmmintrin_pclmul.h new file mode 100644 index 0000000..fef4b93 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/__wmmintrin_pclmul.h @@ -0,0 +1,48 @@ +/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WMMINTRIN_H +#error "Never use <__wmmintrin_pclmul.h> directly; include instead." +#endif + +#ifndef __WMMINTRIN_PCLMUL_H +#define __WMMINTRIN_PCLMUL_H + +/// Multiplies two 64-bit integer values, which are selected from source +/// operands using the immediate-value operand. The multiplication is a +/// carry-less multiplication, and the 128-bit integer product is stored in +/// the destination. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I); +/// \endcode +/// +/// This intrinsic corresponds to the VPCLMULQDQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] containing one of the source operands. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing one of the source operands. +/// \param __I +/// An immediate value specifying which 64-bit values to select from the +/// operands. Bit 0 is used to select a value from operand \a __X, and bit +/// 4 is used to select a value from operand \a __Y: \n +/// Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n +/// Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n +/// Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n +/// Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used. +/// \returns The 128-bit integer vector containing the result of the carry-less +/// multiplication of the selected 64-bit values. +#define _mm_clmulepi64_si128(X, Y, I) \ + ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (char)(I))) + +#endif /* __WMMINTRIN_PCLMUL_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/adxintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/adxintrin.h new file mode 100644 index 0000000..72b9ed0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/adxintrin.h @@ -0,0 +1,72 @@ +/*===---- adxintrin.h - ADX intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __ADXINTRIN_H +#define __ADXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +/* Intrinsics that are available only if __ADX__ defined */ +static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx"))) +_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y, + unsigned int *__p) +{ + return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx"))) +_addcarryx_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) +{ + return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); +} +#endif + +/* Intrinsics that are also available if __ADX__ undefined */ +static __inline unsigned char __DEFAULT_FN_ATTRS +_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y, + unsigned int *__p) +{ + return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +static __inline unsigned char __DEFAULT_FN_ATTRS +_addcarry_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) +{ + return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); +} +#endif + +static __inline unsigned char __DEFAULT_FN_ATTRS +_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y, + unsigned int *__p) +{ + return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +static __inline unsigned char __DEFAULT_FN_ATTRS +_subborrow_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) +{ + return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __ADXINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/altivec.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/altivec.h new file mode 100644 index 0000000..d548d8a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/altivec.h @@ -0,0 +1,18974 @@ +/*===---- altivec.h - Standard header for type generic math ---------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __ALTIVEC_H +#define __ALTIVEC_H + +#ifndef __ALTIVEC__ +#error "AltiVec support not enabled" +#endif + +/* Constants for mapping CR6 bits to predicate result. */ + +#define __CR6_EQ 0 +#define __CR6_EQ_REV 1 +#define __CR6_LT 2 +#define __CR6_LT_REV 3 + +/* Constants for vec_test_data_class */ +#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0) +#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1) +#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \ + __VEC_CLASS_FP_SUBNORMAL_N) +#define __VEC_CLASS_FP_ZERO_N (1<<2) +#define __VEC_CLASS_FP_ZERO_P (1<<3) +#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | \ + __VEC_CLASS_FP_ZERO_N) +#define __VEC_CLASS_FP_INFINITY_N (1<<4) +#define __VEC_CLASS_FP_INFINITY_P (1<<5) +#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \ + __VEC_CLASS_FP_INFINITY_N) +#define __VEC_CLASS_FP_NAN (1<<6) +#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \ + __VEC_CLASS_FP_SUBNORMAL | \ + __VEC_CLASS_FP_ZERO | \ + __VEC_CLASS_FP_INFINITY) + +#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) + +#include + +static __inline__ vector signed char __ATTRS_o_ai vec_perm( + vector signed char __a, vector signed char __b, vector unsigned char __c); + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_perm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c); + +static __inline__ vector bool char __ATTRS_o_ai +vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c); + +static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a, + vector signed short __b, + vector unsigned char __c); + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_perm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c); + +static __inline__ vector bool short __ATTRS_o_ai vec_perm( + vector bool short __a, vector bool short __b, vector unsigned char __c); + +static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, + vector pixel __b, + vector unsigned char __c); + +static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a, + vector signed int __b, + vector unsigned char __c); + +static __inline__ vector unsigned int __ATTRS_o_ai vec_perm( + vector unsigned int __a, vector unsigned int __b, vector unsigned char __c); + +static __inline__ vector bool int __ATTRS_o_ai +vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c); + +static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a, + vector float __b, + vector unsigned char __c); + +#ifdef __VSX__ +static __inline__ vector long long __ATTRS_o_ai +vec_perm(vector signed long long __a, vector signed long long __b, + vector unsigned char __c); + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_perm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c); + +static __inline__ vector bool long long __ATTRS_o_ai +vec_perm(vector bool long long __a, vector bool long long __b, + vector unsigned char __c); + +static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a, + vector double __b, + vector unsigned char __c); +#endif + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char __a, vector unsigned char __b); + +/* vec_abs */ + +#define __builtin_altivec_abs_v16qi vec_abs +#define __builtin_altivec_abs_v8hi vec_abs +#define __builtin_altivec_abs_v4si vec_abs + +static __inline__ vector signed char __ATTRS_o_ai +vec_abs(vector signed char __a) { + return __builtin_altivec_vmaxsb(__a, -__a); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_abs(vector signed short __a) { + return __builtin_altivec_vmaxsh(__a, -__a); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_abs(vector signed int __a) { + return __builtin_altivec_vmaxsw(__a, -__a); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_abs(vector signed long long __a) { + return __builtin_altivec_vmaxsd(__a, -__a); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvabssp(__a); +#else + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF); + return (vector float)__res; +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) { + return __builtin_vsx_xvabsdp(__a); +} +#endif + +/* vec_abss */ +#define __builtin_altivec_abss_v16qi vec_abss +#define __builtin_altivec_abss_v8hi vec_abss +#define __builtin_altivec_abss_v4si vec_abss + +static __inline__ vector signed char __ATTRS_o_ai +vec_abss(vector signed char __a) { + return __builtin_altivec_vmaxsb( + __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a)); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_abss(vector signed short __a) { + return __builtin_altivec_vmaxsh( + __a, __builtin_altivec_vsubshs((vector signed short)(0), __a)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_abss(vector signed int __a) { + return __builtin_altivec_vmaxsw( + __a, __builtin_altivec_vsubsws((vector signed int)(0), __a)); +} + +/* vec_absd */ +#if defined(__POWER9_VECTOR__) + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_absd(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vabsdub(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_absd(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vabsduh(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_absd(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vabsduw(__a, __b); +} + +#endif /* End __POWER9_VECTOR__ */ + +/* vec_add */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_add(vector signed char __a, vector signed char __b) { + return __a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_add(vector bool char __a, vector signed char __b) { + return (vector signed char)__a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_add(vector signed char __a, vector bool char __b) { + return __a + (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_add(vector unsigned char __a, vector unsigned char __b) { + return __a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_add(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_add(vector unsigned char __a, vector bool char __b) { + return __a + (vector unsigned char)__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a, + vector short __b) { + return __a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a, + vector short __b) { + return (vector short)__a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a, + vector bool short __b) { + return __a + (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_add(vector unsigned short __a, vector unsigned short __b) { + return __a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_add(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_add(vector unsigned short __a, vector bool short __b) { + return __a + (vector unsigned short)__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a, + vector int __b) { + return __a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a, + vector int __b) { + return (vector int)__a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a, + vector bool int __b) { + return __a + (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_add(vector unsigned int __a, vector unsigned int __b) { + return __a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_add(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_add(vector unsigned int __a, vector bool int __b) { + return __a + (vector unsigned int)__b; +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_add(vector signed long long __a, vector signed long long __b) { + return __a + __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_add(vector unsigned long long __a, vector unsigned long long __b) { + return __a + __b; +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_add(vector signed __int128 __a, vector signed __int128 __b) { + return __a + __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a + __b; +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_add_u128(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vadduqm(__a, __b); +} +#elif defined(__VSX__) +static __inline__ vector signed long long __ATTRS_o_ai +vec_add(vector signed long long __a, vector signed long long __b) { +#ifdef __LITTLE_ENDIAN__ + // Little endian systems on CPU's prior to Power8 don't really exist + // so scalarizing is fine. + return __a + __b; +#else + vector unsigned int __res = + (vector unsigned int)__a + (vector unsigned int)__b; + vector unsigned int __carry = __builtin_altivec_vaddcuw( + (vector unsigned int)__a, (vector unsigned int)__b); + __carry = __builtin_shufflevector((vector unsigned char)__carry, + (vector unsigned char)__carry, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0); + return (vector signed long long)(__res + __carry); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_add(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)vec_add((vector signed long long)__a, + (vector signed long long)__b); +} +#endif // __POWER8_VECTOR__ + +static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a, + vector float __b) { + return __a + __b; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a, + vector double __b) { + return __a + __b; +} +#endif // __VSX__ + +/* vec_adde */ + +#ifdef __POWER8_VECTOR__ +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_adde(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_adde_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c); +} +#endif + +static __inline__ vector signed int __ATTRS_o_ai +vec_adde(vector signed int __a, vector signed int __b, + vector signed int __c) { + vector signed int __mask = {1, 1, 1, 1}; + vector signed int __carry = __c & __mask; + return vec_add(vec_add(__a, __b), __carry); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adde(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + vector unsigned int __mask = {1, 1, 1, 1}; + vector unsigned int __carry = __c & __mask; + return vec_add(vec_add(__a, __b), __carry); +} + +/* vec_addec */ + +#ifdef __POWER8_VECTOR__ +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_addec(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_addec_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c); +} + +#ifdef __powerpc64__ +static __inline__ vector signed int __ATTRS_o_ai +vec_addec(vector signed int __a, vector signed int __b, + vector signed int __c) { + + signed int __result[4]; + for (int i = 0; i < 4; i++) { + unsigned int __tempa = (unsigned int) __a[i]; + unsigned int __tempb = (unsigned int) __b[i]; + unsigned int __tempc = (unsigned int) __c[i]; + __tempc = __tempc & 0x00000001; + unsigned long long __longa = (unsigned long long) __tempa; + unsigned long long __longb = (unsigned long long) __tempb; + unsigned long long __longc = (unsigned long long) __tempc; + unsigned long long __sum = __longa + __longb + __longc; + unsigned long long __res = (__sum >> 32) & 0x01; + unsigned long long __tempres = (unsigned int) __res; + __result[i] = (signed int) __tempres; + } + + vector signed int ret = { __result[0], __result[1], __result[2], __result[3] }; + return ret; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_addec(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + + unsigned int __result[4]; + for (int i = 0; i < 4; i++) { + unsigned int __tempc = __c[i] & 1; + unsigned long long __longa = (unsigned long long) __a[i]; + unsigned long long __longb = (unsigned long long) __b[i]; + unsigned long long __longc = (unsigned long long) __tempc; + unsigned long long __sum = __longa + __longb + __longc; + unsigned long long __res = (__sum >> 32) & 0x01; + unsigned long long __tempres = (unsigned int) __res; + __result[i] = (signed int) __tempres; + } + + vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] }; + return ret; +} +#endif // __powerpc64__ +#endif // __POWER8_VECTOR__ + +/* vec_vaddubm */ + +#define __builtin_altivec_vaddubm vec_vaddubm + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddubm(vector signed char __a, vector signed char __b) { + return __a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddubm(vector bool char __a, vector signed char __b) { + return (vector signed char)__a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddubm(vector signed char __a, vector bool char __b) { + return __a + (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector unsigned char __a, vector unsigned char __b) { + return __a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector unsigned char __a, vector bool char __b) { + return __a + (vector unsigned char)__b; +} + +/* vec_vadduhm */ + +#define __builtin_altivec_vadduhm vec_vadduhm + +static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a, + vector short __b) { + return __a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a, + vector short __b) { + return (vector short)__a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a, + vector bool short __b) { + return __a + (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector unsigned short __a, vector unsigned short __b) { + return __a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector unsigned short __a, vector bool short __b) { + return __a + (vector unsigned short)__b; +} + +/* vec_vadduwm */ + +#define __builtin_altivec_vadduwm vec_vadduwm + +static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a, + vector int __b) { + return __a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a, + vector int __b) { + return (vector int)__a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a, + vector bool int __b) { + return __a + (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector unsigned int __a, vector unsigned int __b) { + return __a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector unsigned int __a, vector bool int __b) { + return __a + (vector unsigned int)__b; +} + +/* vec_vaddfp */ + +#define __builtin_altivec_vaddfp vec_vaddfp + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vaddfp(vector float __a, vector float __b) { + return __a + __b; +} + +/* vec_addc */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_addc(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_addc(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vaddcuw(__a, __b); +} + +#ifdef __POWER8_VECTOR__ +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_addc(vector signed __int128 __a, vector signed __int128 __b) { + return (vector signed __int128)__builtin_altivec_vaddcuq( + (vector unsigned __int128)__a, (vector unsigned __int128)__b); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_addc_u128(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +/* vec_vaddcuw */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vaddcuw(__a, __b); +} + +/* vec_adds */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_adds(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_adds(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_adds(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vaddsbs(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_adds(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_adds(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_adds(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a, + vector short __b) { + return __builtin_altivec_vaddshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a, + vector short __b) { + return __builtin_altivec_vaddshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a, + vector bool short __b) { + return __builtin_altivec_vaddshs(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_adds(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_adds(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_adds(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a, + vector int __b) { + return __builtin_altivec_vaddsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a, + vector int __b) { + return __builtin_altivec_vaddsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a, + vector bool int __b) { + return __builtin_altivec_vaddsws(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adds(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adds(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adds(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vadduws(__a, (vector unsigned int)__b); +} + +/* vec_vaddsbs */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddsbs(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddsbs(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddsbs(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vaddsbs(__a, (vector signed char)__b); +} + +/* vec_vaddubs */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubs(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubs(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubs(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b); +} + +/* vec_vaddshs */ + +static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a, + vector short __b) { + return __builtin_altivec_vaddshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a, + vector short __b) { + return __builtin_altivec_vaddshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a, + vector bool short __b) { + return __builtin_altivec_vaddshs(__a, (vector short)__b); +} + +/* vec_vadduhs */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhs(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhs(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b); +} + +/* vec_vaddsws */ + +static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a, + vector int __b) { + return __builtin_altivec_vaddsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a, + vector int __b) { + return __builtin_altivec_vaddsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a, + vector bool int __b) { + return __builtin_altivec_vaddsws(__a, (vector int)__b); +} + +/* vec_vadduws */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduws(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduws(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduws(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vadduws(__a, (vector unsigned int)__b); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +/* vec_vadduqm */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) { + return __a + __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a + __b; +} + +/* vec_vaddeuqm */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +/* vec_vaddcuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} + +/* vec_vaddecuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +/* vec_and */ + +#define __builtin_altivec_vand vec_and + +static __inline__ vector signed char __ATTRS_o_ai +vec_and(vector signed char __a, vector signed char __b) { + return __a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_and(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_and(vector signed char __a, vector bool char __b) { + return __a & (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_and(vector unsigned char __a, vector unsigned char __b) { + return __a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_and(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_and(vector unsigned char __a, vector bool char __b) { + return __a & (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a, + vector bool char __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a, + vector short __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a, + vector short __b) { + return (vector short)__a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a, + vector bool short __b) { + return __a & (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_and(vector unsigned short __a, vector unsigned short __b) { + return __a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_and(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_and(vector unsigned short __a, vector bool short __b) { + return __a & (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_and(vector bool short __a, vector bool short __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a, + vector int __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a, + vector int __b) { + return (vector int)__a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a, + vector bool int __b) { + return __a & (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_and(vector unsigned int __a, vector unsigned int __b) { + return __a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_and(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_and(vector unsigned int __a, vector bool int __b) { + return __a & (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a, + vector bool int __b) { + return __a & __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai +vec_and(vector double __a, vector bool long long __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_and(vector signed long long __a, vector signed long long __b) { + return __a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_and(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_and(vector signed long long __a, vector bool long long __b) { + return __a & (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_and(vector unsigned long long __a, vector unsigned long long __b) { + return __a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_and(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_and(vector unsigned long long __a, vector bool long long __b) { + return __a & (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_and(vector bool long long __a, vector bool long long __b) { + return __a & __b; +} +#endif + +/* vec_vand */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vand(vector signed char __a, vector signed char __b) { + return __a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vand(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vand(vector signed char __a, vector bool char __b) { + return __a & (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vand(vector unsigned char __a, vector unsigned char __b) { + return __a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vand(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vand(vector unsigned char __a, vector bool char __b) { + return __a & (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a, + vector bool char __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a, + vector short __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a, + vector short __b) { + return (vector short)__a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a, + vector bool short __b) { + return __a & (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vand(vector unsigned short __a, vector unsigned short __b) { + return __a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vand(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vand(vector unsigned short __a, vector bool short __b) { + return __a & (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vand(vector bool short __a, vector bool short __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a, + vector int __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a, + vector int __b) { + return (vector int)__a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a, + vector bool int __b) { + return __a & (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vand(vector unsigned int __a, vector unsigned int __b) { + return __a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vand(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vand(vector unsigned int __a, vector bool int __b) { + return __a & (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a, + vector bool int __b) { + return __a & __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vand(vector signed long long __a, vector signed long long __b) { + return __a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vand(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vand(vector signed long long __a, vector bool long long __b) { + return __a & (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vand(vector unsigned long long __a, vector unsigned long long __b) { + return __a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vand(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vand(vector unsigned long long __a, vector bool long long __b) { + return __a & (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vand(vector bool long long __a, vector bool long long __b) { + return __a & __b; +} +#endif + +/* vec_andc */ + +#define __builtin_altivec_vandc vec_andc + +static __inline__ vector signed char __ATTRS_o_ai +vec_andc(vector signed char __a, vector signed char __b) { + return __a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_andc(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_andc(vector signed char __a, vector bool char __b) { + return __a & ~(vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_andc(vector unsigned char __a, vector unsigned char __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_andc(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_andc(vector unsigned char __a, vector bool char __b) { + return __a & ~(vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a, + vector bool char __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a, + vector short __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a, + vector short __b) { + return (vector short)__a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a, + vector bool short __b) { + return __a & ~(vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_andc(vector unsigned short __a, vector unsigned short __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_andc(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_andc(vector unsigned short __a, vector bool short __b) { + return __a & ~(vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_andc(vector bool short __a, vector bool short __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a, + vector int __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a, + vector int __b) { + return (vector int)__a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a, + vector bool int __b) { + return __a & ~(vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_andc(vector unsigned int __a, vector unsigned int __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_andc(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_andc(vector unsigned int __a, vector bool int __b) { + return __a & ~(vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a, + vector bool int __b) { + return __a & ~__b; +} + +static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & ~(vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai +vec_andc(vector double __a, vector bool long long __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & ~(vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & ~(vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_andc(vector signed long long __a, vector signed long long __b) { + return __a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_andc(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_andc(vector signed long long __a, vector bool long long __b) { + return __a & ~(vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_andc(vector unsigned long long __a, vector unsigned long long __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_andc(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_andc(vector unsigned long long __a, vector bool long long __b) { + return __a & ~(vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_andc(vector bool long long __a, vector bool long long __b) { + return __a & ~__b; +} +#endif + +/* vec_vandc */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vandc(vector signed char __a, vector signed char __b) { + return __a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vandc(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vandc(vector signed char __a, vector bool char __b) { + return __a & ~(vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vandc(vector unsigned char __a, vector unsigned char __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vandc(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vandc(vector unsigned char __a, vector bool char __b) { + return __a & ~(vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vandc(vector bool char __a, vector bool char __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a, + vector short __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a, + vector short __b) { + return (vector short)__a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a, + vector bool short __b) { + return __a & ~(vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vandc(vector unsigned short __a, vector unsigned short __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vandc(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vandc(vector unsigned short __a, vector bool short __b) { + return __a & ~(vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vandc(vector bool short __a, vector bool short __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a, + vector int __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a, + vector int __b) { + return (vector int)__a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a, + vector bool int __b) { + return __a & ~(vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vandc(vector unsigned int __a, vector unsigned int __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vandc(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vandc(vector unsigned int __a, vector bool int __b) { + return __a & ~(vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a, + vector bool int __b) { + return __a & ~__b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vandc(vector signed long long __a, vector signed long long __b) { + return __a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vandc(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vandc(vector signed long long __a, vector bool long long __b) { + return __a & ~(vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vandc(vector unsigned long long __a, vector unsigned long long __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vandc(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vandc(vector unsigned long long __a, vector bool long long __b) { + return __a & ~(vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vandc(vector bool long long __a, vector bool long long __b) { + return __a & ~__b; +} +#endif + +/* vec_avg */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_avg(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vavgsb(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_avg(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vavgub(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a, + vector short __b) { + return __builtin_altivec_vavgsh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_avg(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vavguh(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a, + vector int __b) { + return __builtin_altivec_vavgsw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_avg(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vavguw(__a, __b); +} + +/* vec_vavgsb */ + +static __inline__ vector signed char __attribute__((__always_inline__)) +vec_vavgsb(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vavgsb(__a, __b); +} + +/* vec_vavgub */ + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_vavgub(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vavgub(__a, __b); +} + +/* vec_vavgsh */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vavgsh(vector short __a, vector short __b) { + return __builtin_altivec_vavgsh(__a, __b); +} + +/* vec_vavguh */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vavguh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vavguh(__a, __b); +} + +/* vec_vavgsw */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vavgsw(vector int __a, vector int __b) { + return __builtin_altivec_vavgsw(__a, __b); +} + +/* vec_vavguw */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vavguw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vavguw(__a, __b); +} + +/* vec_ceil */ + +static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspip(__a); +#else + return __builtin_altivec_vrfip(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) { + return __builtin_vsx_xvrdpip(__a); +} +#endif + +/* vec_roundp */ +static __inline__ vector float __ATTRS_o_ai vec_roundp(vector float __a) { + return vec_ceil(__a); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_roundp(vector double __a) { + return vec_ceil(__a); +} +#endif + +/* vec_vrfip */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfip(vector float __a) { + return __builtin_altivec_vrfip(__a); +} + +/* vec_cmpb */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_cmpb(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp(__a, __b); +} + +/* vec_vcmpbfp */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vcmpbfp(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp(__a, __b); +} + +/* vec_cmpeq */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpeq(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpeq(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpeq(vector bool char __a, vector bool char __b) { + return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a, + vector short __b) { + return (vector bool short)__builtin_altivec_vcmpequh(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpeq(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpeq(vector bool short __a, vector bool short __b) { + return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a, + vector int __b) { + return (vector bool int)__builtin_altivec_vcmpequw(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpeq(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a, + vector bool int __b) { + return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a, + (vector int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)__builtin_altivec_vcmpequd( + (vector long long)__a, (vector long long)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)__builtin_altivec_vcmpequd( + (vector long long)__a, (vector long long)__b); +} +#elif defined(__VSX__) +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector signed long long __a, vector signed long long __b) { + vector bool int __wordcmp = + vec_cmpeq((vector signed int)__a, (vector signed int)__b); +#ifdef __LITTLE_ENDIAN__ + __wordcmp &= __builtin_shufflevector(__wordcmp, __wordcmp, 3, 0, 1, 2); + return (vector bool long long)__builtin_shufflevector(__wordcmp, __wordcmp, 1, + 1, 3, 3); +#else + __wordcmp &= __builtin_shufflevector(__wordcmp, __wordcmp, 1, 2, 3, 0); + return (vector bool long long)__builtin_shufflevector(__wordcmp, __wordcmp, 0, + 0, 2, 2); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) { + return vec_cmpeq((vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector bool long long __a, vector bool long long __b) { + return vec_cmpeq((vector signed long long)__a, (vector signed long long)__b); +} +#endif + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a, + vector float __b) { +#ifdef __VSX__ + return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b); +#else + return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector double __a, vector double __b) { + return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) { + return (vector bool __int128)__builtin_altivec_vcmpequq( + (vector bool __int128)__a, (vector bool __int128)__b); +} + +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return (vector bool __int128)__builtin_altivec_vcmpequq( + (vector bool __int128)__a, (vector bool __int128)__b); +} +#endif + +#ifdef __POWER9_VECTOR__ +/* vec_cmpne */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector bool char __a, vector bool char __b) { + return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector bool short __a, vector bool short __b) { + return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector signed short __a, vector signed short __b) { + return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector bool int __a, vector bool int __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector signed int __a, vector signed int __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector float __a, vector float __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return (vector bool __int128) ~(__builtin_altivec_vcmpequq( + (vector bool __int128)__a, (vector bool __int128)__b)); +} + +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) { + return (vector bool __int128) ~(__builtin_altivec_vcmpequq( + (vector bool __int128)__a, (vector bool __int128)__b)); +} +#endif + +/* vec_cmpnez */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpnez(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpnez(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpnez(vector signed short __a, vector signed short __b) { + return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpnez(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpnez(vector signed int __a, vector signed int __b) { + return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpnez(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a, + (vector int)__b); +} + +static __inline__ signed int __ATTRS_o_ai +vec_cntlz_lsbb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vctzlsbb(__a); +#else + return __builtin_altivec_vclzlsbb(__a); +#endif +} + +static __inline__ signed int __ATTRS_o_ai +vec_cntlz_lsbb(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vctzlsbb(__a); +#else + return __builtin_altivec_vclzlsbb(__a); +#endif +} + +static __inline__ signed int __ATTRS_o_ai +vec_cnttz_lsbb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclzlsbb(__a); +#else + return __builtin_altivec_vctzlsbb(__a); +#endif +} + +static __inline__ signed int __ATTRS_o_ai +vec_cnttz_lsbb(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclzlsbb(__a); +#else + return __builtin_altivec_vctzlsbb(__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_parity_lsbb(vector unsigned int __a) { + return __builtin_altivec_vprtybw(__a); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_parity_lsbb(vector signed int __a) { + return __builtin_altivec_vprtybw(__a); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_parity_lsbb(vector unsigned __int128 __a) { + return __builtin_altivec_vprtybq(__a); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_parity_lsbb(vector signed __int128 __a) { + return __builtin_altivec_vprtybq(__a); +} +#endif + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_parity_lsbb(vector unsigned long long __a) { + return __builtin_altivec_vprtybd(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_parity_lsbb(vector signed long long __a) { + return __builtin_altivec_vprtybd(__a); +} + +#else +/* vec_cmpne */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector bool char __a, vector bool char __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector signed char __a, vector signed char __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector unsigned char __a, vector unsigned char __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector bool short __a, vector bool short __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector signed short __a, vector signed short __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector unsigned short __a, vector unsigned short __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector bool int __a, vector bool int __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector signed int __a, vector signed int __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector unsigned int __a, vector unsigned int __b) { + return ~(vec_cmpeq(__a, __b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector float __a, vector float __b) { + return ~(vec_cmpeq(__a, __b)); +} +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector bool long long __a, vector bool long long __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector signed long long __a, vector signed long long __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} +#elif defined(__VSX__) +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)~( + vec_cmpeq((vector signed long long)__a, (vector signed long long)__b)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)~( + vec_cmpeq((vector signed long long)__a, (vector signed long long)__b)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)~( + vec_cmpeq((vector signed long long)__a, (vector signed long long)__b)); +} +#endif + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector double __a, vector double __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} +#endif + +/* vec_cmpgt */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpgt(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpgt(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a, + vector short __b) { + return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpgt(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a, + vector int __b) { + return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpgt(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b); +} +#elif defined(__VSX__) +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector signed long long __a, vector signed long long __b) { + vector signed int __sgtw = (vector signed int)vec_cmpgt( + (vector signed int)__a, (vector signed int)__b); + vector unsigned int __ugtw = (vector unsigned int)vec_cmpgt( + (vector unsigned int)__a, (vector unsigned int)__b); + vector unsigned int __eqw = (vector unsigned int)vec_cmpeq( + (vector signed int)__a, (vector signed int)__b); +#ifdef __LITTLE_ENDIAN__ + __ugtw = __builtin_shufflevector(__ugtw, __ugtw, 3, 0, 1, 2) & __eqw; + __sgtw |= (vector signed int)__ugtw; + return (vector bool long long)__builtin_shufflevector(__sgtw, __sgtw, 1, 1, 3, + 3); +#else + __ugtw = __builtin_shufflevector(__ugtw, __ugtw, 1, 2, 3, 0) & __eqw; + __sgtw |= (vector signed int)__ugtw; + return (vector bool long long)__builtin_shufflevector(__sgtw, __sgtw, 0, 0, 2, + 2); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) { + vector unsigned int __ugtw = (vector unsigned int)vec_cmpgt( + (vector unsigned int)__a, (vector unsigned int)__b); + vector unsigned int __eqw = (vector unsigned int)vec_cmpeq( + (vector signed int)__a, (vector signed int)__b); +#ifdef __LITTLE_ENDIAN__ + __eqw = __builtin_shufflevector(__ugtw, __ugtw, 3, 0, 1, 2) & __eqw; + __ugtw |= __eqw; + return (vector bool long long)__builtin_shufflevector(__ugtw, __ugtw, 1, 1, 3, + 3); +#else + __eqw = __builtin_shufflevector(__ugtw, __ugtw, 1, 2, 3, 0) & __eqw; + __ugtw |= __eqw; + return (vector bool long long)__builtin_shufflevector(__ugtw, __ugtw, 0, 0, 2, + 2); +#endif +} +#endif + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b); +#else + return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector double __a, vector double __b) { + return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) { + return (vector bool __int128)__builtin_altivec_vcmpgtsq( + (vector bool __int128)__a, (vector bool __int128)__b); +} + +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return (vector bool __int128)__builtin_altivec_vcmpgtuq( + (vector bool __int128)__a, (vector bool __int128)__b); +} +#endif + +/* vec_cmpge */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpge(vector signed char __a, vector signed char __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpge(vector unsigned char __a, vector unsigned char __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpge(vector signed short __a, vector signed short __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpge(vector unsigned short __a, vector unsigned short __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpge(vector signed int __a, vector signed int __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpge(vector unsigned int __a, vector unsigned int __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b); +#else + return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpge(vector double __a, vector double __b) { + return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpge(vector signed long long __a, vector signed long long __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) { + return ~(vec_cmpgt(__b, __a)); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpge(vector signed __int128 __a, vector signed __int128 __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmpge(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return ~(vec_cmpgt(__b, __a)); +} +#endif + +/* vec_vcmpgefp */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgefp(vector float __a, vector float __b) { + return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b); +} + +/* vec_vcmpgtsb */ + +static __inline__ vector bool char __attribute__((__always_inline__)) +vec_vcmpgtsb(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b); +} + +/* vec_vcmpgtub */ + +static __inline__ vector bool char __attribute__((__always_inline__)) +vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b); +} + +/* vec_vcmpgtsh */ + +static __inline__ vector bool short __attribute__((__always_inline__)) +vec_vcmpgtsh(vector short __a, vector short __b) { + return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b); +} + +/* vec_vcmpgtuh */ + +static __inline__ vector bool short __attribute__((__always_inline__)) +vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b); +} + +/* vec_vcmpgtsw */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgtsw(vector int __a, vector int __b) { + return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b); +} + +/* vec_vcmpgtuw */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b); +} + +/* vec_vcmpgtfp */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgtfp(vector float __a, vector float __b) { + return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b); +} + +/* vec_cmple */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmple(vector signed char __a, vector signed char __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmple(vector unsigned char __a, vector unsigned char __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmple(vector signed short __a, vector signed short __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmple(vector unsigned short __a, vector unsigned short __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmple(vector signed int __a, vector signed int __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmple(vector unsigned int __a, vector unsigned int __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a, + vector float __b) { + return vec_cmpge(__b, __a); +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmple(vector double __a, vector double __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmple(vector signed long long __a, vector signed long long __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmple(vector unsigned long long __a, vector unsigned long long __b) { + return vec_cmpge(__b, __a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmple(vector signed __int128 __a, vector signed __int128 __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmple(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return vec_cmpge(__b, __a); +} +#endif + +/* vec_cmplt */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmplt(vector signed char __a, vector signed char __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmplt(vector unsigned char __a, vector unsigned char __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a, + vector short __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmplt(vector unsigned short __a, vector unsigned short __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, + vector int __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmplt(vector unsigned int __a, vector unsigned int __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a, + vector float __b) { + return vec_cmpgt(__b, __a); +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmplt(vector double __a, vector double __b) { + return vec_cmpgt(__b, __a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmplt(vector signed __int128 __a, vector signed __int128 __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool __int128 __ATTRS_o_ai +vec_cmplt(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return vec_cmpgt(__b, __a); +} +#endif + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmplt(vector signed long long __a, vector signed long long __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) { + return vec_cmpgt(__b, __a); +} +#endif + +#ifdef __POWER8_VECTOR__ +/* vec_popcnt */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_popcnt(vector signed char __a) { + return __builtin_altivec_vpopcntb(__a); +} +static __inline__ vector unsigned char __ATTRS_o_ai +vec_popcnt(vector unsigned char __a) { + return __builtin_altivec_vpopcntb(__a); +} +static __inline__ vector signed short __ATTRS_o_ai +vec_popcnt(vector signed short __a) { + return __builtin_altivec_vpopcnth(__a); +} +static __inline__ vector unsigned short __ATTRS_o_ai +vec_popcnt(vector unsigned short __a) { + return __builtin_altivec_vpopcnth(__a); +} +static __inline__ vector signed int __ATTRS_o_ai +vec_popcnt(vector signed int __a) { + return __builtin_altivec_vpopcntw(__a); +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_popcnt(vector unsigned int __a) { + return __builtin_altivec_vpopcntw(__a); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_popcnt(vector signed long long __a) { + return __builtin_altivec_vpopcntd(__a); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_popcnt(vector unsigned long long __a) { + return __builtin_altivec_vpopcntd(__a); +} + +#define vec_vclz vec_cntlz +/* vec_cntlz */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_cntlz(vector signed char __a) { + return __builtin_altivec_vclzb(__a); +} +static __inline__ vector unsigned char __ATTRS_o_ai +vec_cntlz(vector unsigned char __a) { + return __builtin_altivec_vclzb(__a); +} +static __inline__ vector signed short __ATTRS_o_ai +vec_cntlz(vector signed short __a) { + return __builtin_altivec_vclzh(__a); +} +static __inline__ vector unsigned short __ATTRS_o_ai +vec_cntlz(vector unsigned short __a) { + return __builtin_altivec_vclzh(__a); +} +static __inline__ vector signed int __ATTRS_o_ai +vec_cntlz(vector signed int __a) { + return __builtin_altivec_vclzw(__a); +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_cntlz(vector unsigned int __a) { + return __builtin_altivec_vclzw(__a); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_cntlz(vector signed long long __a) { + return __builtin_altivec_vclzd(__a); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cntlz(vector unsigned long long __a) { + return __builtin_altivec_vclzd(__a); +} +#endif + +#ifdef __POWER9_VECTOR__ + +/* vec_cnttz */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_cnttz(vector signed char __a) { + return __builtin_altivec_vctzb(__a); +} +static __inline__ vector unsigned char __ATTRS_o_ai +vec_cnttz(vector unsigned char __a) { + return __builtin_altivec_vctzb(__a); +} +static __inline__ vector signed short __ATTRS_o_ai +vec_cnttz(vector signed short __a) { + return __builtin_altivec_vctzh(__a); +} +static __inline__ vector unsigned short __ATTRS_o_ai +vec_cnttz(vector unsigned short __a) { + return __builtin_altivec_vctzh(__a); +} +static __inline__ vector signed int __ATTRS_o_ai +vec_cnttz(vector signed int __a) { + return __builtin_altivec_vctzw(__a); +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_cnttz(vector unsigned int __a) { + return __builtin_altivec_vctzw(__a); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_cnttz(vector signed long long __a) { + return __builtin_altivec_vctzd(__a); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cnttz(vector unsigned long long __a) { + return __builtin_altivec_vctzd(__a); +} + +/* vec_first_match_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector signed char __a, vector signed char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector unsigned char __a, vector unsigned char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector signed short __a, vector signed short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector unsigned short __a, vector unsigned short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector signed int __a, vector signed int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector unsigned int __a, vector unsigned int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +/* vec_first_match_or_eos_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) { + /* Compare the result of the comparison of two vectors with either and OR the + result. Either the elements are equal or one will equal the comparison + result if either is zero. + */ + vector bool char __tmp1 = vec_cmpeq(__a, __b); + vector bool char __tmp2 = __tmp1 | + vec_cmpeq((vector signed char)__tmp1, __a) | + vec_cmpeq((vector signed char)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector unsigned char __a, + vector unsigned char __b) { + vector bool char __tmp1 = vec_cmpeq(__a, __b); + vector bool char __tmp2 = __tmp1 | + vec_cmpeq((vector unsigned char)__tmp1, __a) | + vec_cmpeq((vector unsigned char)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) { + vector bool short __tmp1 = vec_cmpeq(__a, __b); + vector bool short __tmp2 = __tmp1 | + vec_cmpeq((vector signed short)__tmp1, __a) | + vec_cmpeq((vector signed short)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector unsigned short __a, + vector unsigned short __b) { + vector bool short __tmp1 = vec_cmpeq(__a, __b); + vector bool short __tmp2 = __tmp1 | + vec_cmpeq((vector unsigned short)__tmp1, __a) | + vec_cmpeq((vector unsigned short)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) { + vector bool int __tmp1 = vec_cmpeq(__a, __b); + vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) | + vec_cmpeq((vector signed int)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) { + vector bool int __tmp1 = vec_cmpeq(__a, __b); + vector bool int __tmp2 = __tmp1 | + vec_cmpeq((vector unsigned int)__tmp1, __a) | + vec_cmpeq((vector unsigned int)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +/* vec_first_mismatch_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector signed char __a, vector signed char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector signed short __a, vector signed short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector signed int __a, vector signed int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +/* vec_first_mismatch_or_eos_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector signed char __a, + vector signed char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector unsigned char __a, + vector unsigned char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector signed short __a, + vector signed short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector unsigned short __a, + vector unsigned short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector unsigned int __a, + vector unsigned int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ vector double __ATTRS_o_ai +vec_insert_exp(vector double __a, vector unsigned long long __b) { + return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_vsx_xviexpdp(__a,__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_insert_exp(vector float __a, vector unsigned int __b) { + return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_insert_exp(vector unsigned int __a, vector unsigned int __b) { + return __builtin_vsx_xviexpsp(__a,__b); +} + +#if defined(__powerpc64__) +static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(const signed char *__a, + size_t __b) { + return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xl_len(const unsigned char *__a, size_t __b) { + return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(const signed short *__a, + size_t __b) { + return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xl_len(const unsigned short *__a, size_t __b) { + return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(const signed int *__a, + size_t __b) { + return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(const unsigned int *__a, + size_t __b) { + return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t __b) { + return (vector float)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_xl_len(const signed __int128 *__a, size_t __b) { + return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_len(const unsigned __int128 *__a, size_t __b) { + return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56)); +} +#endif + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xl_len(const signed long long *__a, size_t __b) { + return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xl_len(const unsigned long long *__a, size_t __b) { + return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector double __ATTRS_o_ai vec_xl_len(const double *__a, + size_t __b) { + return (vector double)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xl_len_r(const unsigned char *__a, size_t __b) { + vector unsigned char __res = + (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56)); + vector unsigned char __mask = + (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL); + return (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__res, (vector int)__res, __mask); +} + +// vec_xst_len +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a, + unsigned char *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a, + signed char *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a, + signed short *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a, + unsigned short *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a, + signed int *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a, + unsigned int *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a, + signed __int128 *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a, + unsigned __int128 *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} +#endif + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a, + signed long long *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a, + unsigned long long *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a, + unsigned char *__b, + size_t __c) { + vector unsigned char __mask = + (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL); + vector unsigned char __res = + __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask); + return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56)); +} +#endif +#endif + +#if defined(__POWER9_VECTOR__) && defined(__powerpc64__) +#define __vec_ldrmb(PTR, CNT) vec_xl_len_r((const unsigned char *)(PTR), (CNT)) +#define __vec_strmb(PTR, CNT, VAL) \ + vec_xst_len_r((VAL), (unsigned char *)(PTR), (CNT)) +#else +#define __vec_ldrmb __builtin_vsx_ldrmb +#define __vec_strmb __builtin_vsx_strmb +#endif + +/* vec_cpsgn */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a, + vector float __b) { + return __builtin_vsx_xvcpsgnsp(__b, __a); +} + +static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, + vector double __b) { + return __builtin_vsx_xvcpsgndp(__b, __a); +} +#endif + +/* vec_ctf */ + +#ifdef __VSX__ +// There are some functions that have different signatures with the XL compiler +// from those in Clang/GCC and documented in the PVIPR. This macro ensures that +// the XL-compatible signatures are used for those functions. +#ifdef __XL_COMPAT_ALTIVEC__ +#define vec_ctf(__a, __b) \ + _Generic((__a), vector int \ + : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \ + vector unsigned int \ + : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \ + (__b)), \ + vector unsigned long long \ + : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) * \ + (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \ + vector signed long long \ + : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) * \ + (vector float)(vector unsigned)((0x7f - (__b)) << 23))) +#else // __XL_COMPAT_ALTIVEC__ +#define vec_ctf(__a, __b) \ + _Generic((__a), vector int \ + : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \ + vector unsigned int \ + : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \ + (__b)), \ + vector unsigned long long \ + : (__builtin_convertvector((vector unsigned long long)(__a), \ + vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ + << 52)), \ + vector signed long long \ + : (__builtin_convertvector((vector signed long long)(__a), \ + vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ + << 52))) +#endif // __XL_COMPAT_ALTIVEC__ +#else +#define vec_ctf(__a, __b) \ + _Generic((__a), vector int \ + : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \ + vector unsigned int \ + : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \ + (__b))) +#endif + +/* vec_ctd */ +#ifdef __VSX__ +#define vec_ctd(__a, __b) \ + _Generic((__a), vector signed int \ + : (vec_doublee((vector signed int)(__a)) * \ + (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ + << 52)), \ + vector unsigned int \ + : (vec_doublee((vector unsigned int)(__a)) * \ + (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ + << 52)), \ + vector unsigned long long \ + : (__builtin_convertvector((vector unsigned long long)(__a), \ + vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ + << 52)), \ + vector signed long long \ + : (__builtin_convertvector((vector signed long long)(__a), \ + vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ + << 52))) +#endif // __VSX__ + +/* vec_vcfsx */ + +#define vec_vcfux __builtin_altivec_vcfux +/* vec_vcfux */ + +#define vec_vcfsx(__a, __b) __builtin_altivec_vcfsx((vector int)(__a), (__b)) + +/* vec_cts */ + +#ifdef __VSX__ +#ifdef __XL_COMPAT_ALTIVEC__ +#define vec_cts(__a, __b) \ + _Generic((__a), vector float \ + : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \ + << 52); \ + __builtin_vsx_xvcvdpsxws(__ret); \ + })) +#else // __XL_COMPAT_ALTIVEC__ +#define vec_cts(__a, __b) \ + _Generic((__a), vector float \ + : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \ + << 52); \ + __builtin_convertvector(__ret, vector signed long long); \ + })) +#endif // __XL_COMPAT_ALTIVEC__ +#else +#define vec_cts __builtin_altivec_vctsxs +#endif + +/* vec_vctsxs */ + +#define vec_vctsxs __builtin_altivec_vctsxs + +/* vec_ctu */ + +#ifdef __VSX__ +#ifdef __XL_COMPAT_ALTIVEC__ +#define vec_ctu(__a, __b) \ + _Generic((__a), vector float \ + : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + << 52); \ + __builtin_vsx_xvcvdpuxws(__ret); \ + })) +#else // __XL_COMPAT_ALTIVEC__ +#define vec_ctu(__a, __b) \ + _Generic((__a), vector float \ + : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + << 52); \ + __builtin_convertvector(__ret, vector unsigned long long); \ + })) +#endif // __XL_COMPAT_ALTIVEC__ +#else +#define vec_ctu __builtin_altivec_vctuxs +#endif + +#ifdef __LITTLE_ENDIAN__ +/* vec_ctsl */ + +#ifdef __VSX__ +#define vec_ctsl(__a, __b) \ + _Generic((__a), vector float \ + : __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ + __builtin_vsx_xvcvspsxds( \ + __builtin_vsx_xxsldwi(__ret, __ret, 1)); \ + }), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + << 52); \ + __builtin_convertvector(__ret, vector signed long long); \ + })) + +/* vec_ctul */ + +#define vec_ctul(__a, __b) \ + _Generic((__a), vector float \ + : __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ + __builtin_vsx_xvcvspuxds( \ + __builtin_vsx_xxsldwi(__ret, __ret, 1)); \ + }), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + << 52); \ + __builtin_convertvector(__ret, vector unsigned long long); \ + })) +#endif +#else // __LITTLE_ENDIAN__ +/* vec_ctsl */ + +#ifdef __VSX__ +#define vec_ctsl(__a, __b) \ + _Generic((__a), vector float \ + : __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ + __builtin_vsx_xvcvspsxds(__ret); \ + }), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + << 52); \ + __builtin_convertvector(__ret, vector signed long long); \ + })) + +/* vec_ctul */ + +#define vec_ctul(__a, __b) \ + _Generic((__a), vector float \ + : __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ + __builtin_vsx_xvcvspuxds(__ret); \ + }), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + << 52); \ + __builtin_convertvector(__ret, vector unsigned long long); \ + })) +#endif +#endif // __LITTLE_ENDIAN__ + +/* vec_vctuxs */ + +#define vec_vctuxs __builtin_altivec_vctuxs + +/* vec_signext */ + +#ifdef __POWER9_VECTOR__ +static __inline__ vector signed int __ATTRS_o_ai +vec_signexti(vector signed char __a) { + return __builtin_altivec_vextsb2w(__a); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_signexti(vector signed short __a) { + return __builtin_altivec_vextsh2w(__a); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_signextll(vector signed char __a) { + return __builtin_altivec_vextsb2d(__a); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_signextll(vector signed short __a) { + return __builtin_altivec_vextsh2d(__a); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_signextll(vector signed int __a) { + return __builtin_altivec_vextsw2d(__a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_signextq(vector signed long long __a) { + return __builtin_altivec_vextsd2q(__a); +} +#endif + +/* vec_signed */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_sld(vector signed int, vector signed int, unsigned const int __c); + +static __inline__ vector signed int __ATTRS_o_ai +vec_signed(vector float __a) { + return __builtin_convertvector(__a, vector signed int); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_signed(vector double __a) { + return __builtin_convertvector(__a, vector signed long long); +} + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_signed2(vector double __a, vector double __b) { + return (vector signed int) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_signede(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvdpsxws(__a); +#endif +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_signedo(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvdpsxws(__a); +#else + vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a); + return vec_sld(__ret, __ret, 12); +#endif +} +#endif + +/* vec_unsigned */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c); + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unsigned(vector float __a) { + return __builtin_convertvector(__a, vector unsigned int); +} + +#ifdef __VSX__ +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_unsigned(vector double __a) { + return __builtin_convertvector(__a, vector unsigned long long); +} + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_unsigned2(vector double __a, vector double __b) { + return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unsignede(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvdpuxws(__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unsignedo(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvdpuxws(__a); +#else + vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a); + return vec_sld(__ret, __ret, 12); +#endif +} +#endif + +/* vec_float */ + +static __inline__ vector float __ATTRS_o_ai +vec_sld(vector float, vector float, unsigned const int __c); + +static __inline__ vector float __ATTRS_o_ai +vec_float(vector signed int __a) { + return __builtin_convertvector(__a, vector float); +} + +static __inline__ vector float __ATTRS_o_ai +vec_float(vector unsigned int __a) { + return __builtin_convertvector(__a, vector float); +} + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai +vec_float2(vector signed long long __a, vector signed long long __b) { + return (vector float) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector float __ATTRS_o_ai +vec_float2(vector unsigned long long __a, vector unsigned long long __b) { + return (vector float) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector float __ATTRS_o_ai +vec_float2(vector double __a, vector double __b) { + return (vector float) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector float __ATTRS_o_ai +vec_floate(vector signed long long __a) { +#ifdef __LITTLE_ENDIAN__ + vector float __ret = __builtin_vsx_xvcvsxdsp(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvsxdsp(__a); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floate(vector unsigned long long __a) { +#ifdef __LITTLE_ENDIAN__ + vector float __ret = __builtin_vsx_xvcvuxdsp(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvuxdsp(__a); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floate(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + vector float __ret = __builtin_vsx_xvcvdpsp(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvdpsp(__a); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floato(vector signed long long __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvsxdsp(__a); +#else + vector float __ret = __builtin_vsx_xvcvsxdsp(__a); + return vec_sld(__ret, __ret, 12); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floato(vector unsigned long long __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvuxdsp(__a); +#else + vector float __ret = __builtin_vsx_xvcvuxdsp(__a); + return vec_sld(__ret, __ret, 12); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floato(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvdpsp(__a); +#else + vector float __ret = __builtin_vsx_xvcvdpsp(__a); + return vec_sld(__ret, __ret, 12); +#endif +} +#endif + +/* vec_double */ + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai +vec_double(vector signed long long __a) { + return __builtin_convertvector(__a, vector double); +} + +static __inline__ vector double __ATTRS_o_ai +vec_double(vector unsigned long long __a) { + return __builtin_convertvector(__a, vector double); +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublee(vector signed int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4)); +#else + return __builtin_vsx_xvcvsxwdp(__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublee(vector unsigned int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4)); +#else + return __builtin_vsx_xvcvuxwdp(__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublee(vector float __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4)); +#else + return __builtin_vsx_xvcvspdp(__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleh(vector signed int __a) { + vector double __ret = {__a[0], __a[1]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleh(vector unsigned int __a) { + vector double __ret = {__a[0], __a[1]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleh(vector float __a) { + vector double __ret = {__a[0], __a[1]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublel(vector signed int __a) { + vector double __ret = {__a[2], __a[3]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublel(vector unsigned int __a) { + vector double __ret = {__a[2], __a[3]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublel(vector float __a) { + vector double __ret = {__a[2], __a[3]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleo(vector signed int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvsxwdp(__a); +#else + return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleo(vector unsigned int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvuxwdp(__a); +#else + return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleo(vector float __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvspdp(__a); +#else + return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4)); +#endif +} + +/* vec_cvf */ +static __inline__ vector double __ATTRS_o_ai vec_cvf(vector float __a) { + return vec_doublee(__a); +} + +static __inline__ vector float __ATTRS_o_ai vec_cvf(vector double __a) { + return vec_floate(__a); +} +#endif + +/* vec_div */ + +/* Integer vector divides (vectors are scalarized, elements divided + and the vectors reassembled). +*/ +static __inline__ vector signed char __ATTRS_o_ai +vec_div(vector signed char __a, vector signed char __b) { + return __a / __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_div(vector unsigned char __a, vector unsigned char __b) { + return __a / __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_div(vector signed short __a, vector signed short __b) { + return __a / __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_div(vector unsigned short __a, vector unsigned short __b) { + return __a / __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_div(vector signed int __a, vector signed int __b) { + return __a / __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_div(vector unsigned int __a, vector unsigned int __b) { + return __a / __b; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_div(vector signed long long __a, vector signed long long __b) { + return __a / __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_div(vector unsigned long long __a, vector unsigned long long __b) { + return __a / __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a, + vector float __b) { + return __a / __b; +} + +static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a, + vector double __b) { + return __a / __b; +} +#endif + +/* vec_dive */ + +#ifdef __POWER10_VECTOR__ +static __inline__ vector signed int __ATTRS_o_ai +vec_dive(vector signed int __a, vector signed int __b) { + return __builtin_altivec_vdivesw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_dive(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vdiveuw(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_dive(vector signed long long __a, vector signed long long __b) { + return __builtin_altivec_vdivesd(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_dive(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vdiveud(__a, __b); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_dive(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vdiveuq(__a, __b); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_dive(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vdivesq(__a, __b); +} +#endif +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_div(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a / __b; +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_div(vector signed __int128 __a, vector signed __int128 __b) { + return __a / __b; +} +#endif /* __POWER10_VECTOR__ */ + +/* vec_xvtdiv */ + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_test_swdiv(vector double __a, + vector double __b) { + return __builtin_vsx_xvtdivdp(__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_test_swdivs(vector float __a, + vector float __b) { + return __builtin_vsx_xvtdivsp(__a, __b); +} +#endif + +/* vec_dss */ + +#define vec_dss __builtin_altivec_dss + +/* vec_dssall */ + +static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) { + __builtin_altivec_dssall(); +} + +/* vec_dst */ +#define vec_dst(__PTR, __CW, __STR) \ + __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)) + +/* vec_dstst */ +#define vec_dstst(__PTR, __CW, __STR) \ + __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)) + +/* vec_dststt */ +#define vec_dststt(__PTR, __CW, __STR) \ + __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)) + +/* vec_dstt */ +#define vec_dstt(__PTR, __CW, __STR) \ + __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)) + +/* vec_eqv */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed char __ATTRS_o_ai +vec_eqv(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_eqv(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a, + vector bool char __b) { + return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_eqv(vector signed short __a, vector signed short __b) { + return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_eqv(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_eqv(vector bool short __a, vector bool short __b) { + return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_eqv(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_eqv(vector unsigned int __a, vector unsigned int __b) { + return __builtin_vsx_xxleqv(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a, + vector bool int __b) { + return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_eqv(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)__builtin_vsx_xxleqv( + (vector unsigned int)__a, (vector unsigned int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_eqv(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__builtin_vsx_xxleqv( + (vector unsigned int)__a, (vector unsigned int)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_eqv(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a, + vector float __b) { + return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a, + vector double __b) { + return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} +#endif + +/* vec_expte */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_expte(vector float __a) { + return __builtin_altivec_vexptefp(__a); +} + +/* vec_vexptefp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vexptefp(vector float __a) { + return __builtin_altivec_vexptefp(__a); +} + +/* vec_floor */ + +static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspim(__a); +#else + return __builtin_altivec_vrfim(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) { + return __builtin_vsx_xvrdpim(__a); +} +#endif + +/* vec_roundm */ +static __inline__ vector float __ATTRS_o_ai vec_roundm(vector float __a) { + return vec_floor(__a); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_roundm(vector double __a) { + return vec_floor(__a); +} +#endif + +/* vec_vrfim */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfim(vector float __a) { + return __builtin_altivec_vrfim(__a); +} + +/* vec_ld */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_ld(long __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_ld(long __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ld(long __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ld(long __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_ld(long __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ld(long __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ld(long __a, const short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ld(long __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ld(long __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_ld(long __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_ld(long __a, + const vector pixel *__b) { + return (vector pixel)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ld(long __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ld(long __a, const int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ld(long __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ld(long __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_ld(long __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ld(long __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ld(long __a, const float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +/* vec_lvx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvx(long __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvx(long __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvx(long __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvx(long __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvx(long __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvx(long __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvx(long __a, const short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvx(long __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvx(long __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvx(long __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvx(long __a, + const vector pixel *__b) { + return (vector pixel)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvx(long __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvx(long __a, const int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvx(long __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvx(long __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvx(long __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvx(long __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvx(long __a, const float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +/* vec_lde */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lde(long __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvebx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lde(long __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvebx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lde(long __a, const short *__b) { + return (vector short)__builtin_altivec_lvehx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lde(long __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvehx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lde(long __a, const int *__b) { + return (vector int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lde(long __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lde(long __a, const float *__b) { + return (vector float)__builtin_altivec_lvewx(__a, __b); +} + +/* vec_lvebx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvebx(long __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvebx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvebx(long __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvebx(__a, __b); +} + +/* vec_lvehx */ + +static __inline__ vector short __ATTRS_o_ai vec_lvehx(long __a, + const short *__b) { + return (vector short)__builtin_altivec_lvehx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvehx(long __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvehx(__a, __b); +} + +/* vec_lvewx */ + +static __inline__ vector int __ATTRS_o_ai vec_lvewx(long __a, const int *__b) { + return (vector int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvewx(long __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvewx(long __a, + const float *__b) { + return (vector float)__builtin_altivec_lvewx(__a, __b); +} + +/* vec_ldl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_ldl(long __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_ldl(long __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ldl(long __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ldl(long __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_ldl(long __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ldl(long __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ldl(long __a, const short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ldl(long __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ldl(long __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_ldl(long __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_ldl(long __a, + const vector pixel *__b) { + return (vector pixel short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ldl(long __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ldl(long __a, const int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ldl(long __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ldl(long __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_ldl(long __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ldl(long __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ldl(long __a, const float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +/* vec_lvxl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvxl(long __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvxl(long __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvxl(long __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvxl(long __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvxl(long __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvxl(long __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvxl(long __a, + const short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvxl(long __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvxl(long __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvxl(long __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(long __a, + const vector pixel *__b) { + return (vector pixel)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvxl(long __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvxl(long __a, const int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvxl(long __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvxl(long __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvxl(long __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvxl(long __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvxl(long __a, + const float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +/* vec_loge */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_loge(vector float __a) { + return __builtin_altivec_vlogefp(__a); +} + +/* vec_vlogefp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vlogefp(vector float __a) { + return __builtin_altivec_vlogefp(__a); +} + +/* vec_lvsl */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const signed char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const signed char *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, + const short *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const unsigned short *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, + const int *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const unsigned int *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const float *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, + const float *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +/* vec_lvsr */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const signed char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const signed char *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, + const short *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const unsigned short *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, + const int *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const unsigned int *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const float *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, + const float *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +/* vec_madd */ +static __inline__ vector signed short __ATTRS_o_ai +vec_mladd(vector signed short, vector signed short, vector signed short); +static __inline__ vector signed short __ATTRS_o_ai +vec_mladd(vector signed short, vector unsigned short, vector unsigned short); +static __inline__ vector signed short __ATTRS_o_ai +vec_mladd(vector unsigned short, vector signed short, vector signed short); +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short); + +static __inline__ vector signed short __ATTRS_o_ai vec_madd( + vector signed short __a, vector signed short __b, vector signed short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_madd(vector signed short __a, vector unsigned short __b, + vector unsigned short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_madd(vector unsigned short __a, vector signed short __b, + vector signed short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_madd(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a, + vector float __b, + vector float __c) { +#ifdef __VSX__ + return __builtin_vsx_xvmaddasp(__a, __b, __c); +#else + return __builtin_altivec_vmaddfp(__a, __b, __c); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvmaddadp(__a, __b, __c); +} +#endif + +/* vec_vmaddfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vmaddfp(vector float __a, vector float __b, vector float __c) { + return __builtin_altivec_vmaddfp(__a, __b, __c); +} + +/* vec_madds */ + +static __inline__ vector signed short __attribute__((__always_inline__)) +vec_madds(vector signed short __a, vector signed short __b, + vector signed short __c) { + return __builtin_altivec_vmhaddshs(__a, __b, __c); +} + +/* vec_vmhaddshs */ +static __inline__ vector signed short __attribute__((__always_inline__)) +vec_vmhaddshs(vector signed short __a, vector signed short __b, + vector signed short __c) { + return __builtin_altivec_vmhaddshs(__a, __b, __c); +} + +/* vec_msub */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a, + vector float __b, + vector float __c) { + return __builtin_vsx_xvmsubasp(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvmsubadp(__a, __b, __c); +} +#endif + +/* vec_max */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_max(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_max(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_max(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vmaxsb(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_max(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_max(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_max(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a, + vector short __b) { + return __builtin_altivec_vmaxsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a, + vector short __b) { + return __builtin_altivec_vmaxsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a, + vector bool short __b) { + return __builtin_altivec_vmaxsh(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_max(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_max(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_max(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a, + vector int __b) { + return __builtin_altivec_vmaxsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a, + vector int __b) { + return __builtin_altivec_vmaxsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a, + vector bool int __b) { + return __builtin_altivec_vmaxsw(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_max(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_max(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_max(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_max(vector signed long long __a, vector signed long long __b) { + return __builtin_altivec_vmaxsd(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_max(vector bool long long __a, vector signed long long __b) { + return __builtin_altivec_vmaxsd((vector signed long long)__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_max(vector signed long long __a, vector bool long long __b) { + return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_max(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vmaxud(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_max(vector bool long long __a, vector unsigned long long __b) { + return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_max(vector unsigned long long __a, vector bool long long __b) { + return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvmaxsp(__a, __b); +#else + return __builtin_altivec_vmaxfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a, + vector double __b) { + return __builtin_vsx_xvmaxdp(__a, __b); +} +#endif + +/* vec_vmaxsb */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmaxsb(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmaxsb(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmaxsb(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vmaxsb(__a, (vector signed char)__b); +} + +/* vec_vmaxub */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmaxub(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmaxub(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmaxub(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b); +} + +/* vec_vmaxsh */ + +static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a, + vector short __b) { + return __builtin_altivec_vmaxsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a, + vector short __b) { + return __builtin_altivec_vmaxsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a, + vector bool short __b) { + return __builtin_altivec_vmaxsh(__a, (vector short)__b); +} + +/* vec_vmaxuh */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmaxuh(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmaxuh(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b); +} + +/* vec_vmaxsw */ + +static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, + vector int __b) { + return __builtin_altivec_vmaxsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a, + vector int __b) { + return __builtin_altivec_vmaxsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, + vector bool int __b) { + return __builtin_altivec_vmaxsw(__a, (vector int)__b); +} + +/* vec_vmaxuw */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmaxuw(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmaxuw(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b); +} + +/* vec_vmaxfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vmaxfp(vector float __a, vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvmaxsp(__a, __b); +#else + return __builtin_altivec_vmaxfp(__a, __b); +#endif +} + +/* vec_mergeh */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_mergeh(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_mergeh(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_mergeh(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mergeh(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_mergeh(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergeh(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeh(vector signed long long __a, vector signed long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeh(vector signed long long __a, vector bool long long __b) { + return vec_perm(__a, (vector signed long long)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector signed long long __b) { + return vec_perm((vector signed long long)__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeh(vector unsigned long long __a, vector bool long long __b) { + return vec_perm(__a, (vector unsigned long long)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector unsigned long long __b) { + return vec_perm((vector unsigned long long)__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector bool long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a, + vector double __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergeh(vector double __a, vector bool long long __b) { + return vec_perm(__a, (vector double)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector double __b) { + return vec_perm((vector double)__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} +#endif + +/* vec_vmrghb */ + +#define __builtin_altivec_vmrghb vec_vmrghb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmrghb(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmrghb(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vmrghb(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +/* vec_vmrghh */ + +#define __builtin_altivec_vmrghh vec_vmrghh + +static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmrghh(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vmrghh(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +/* vec_vmrghw */ + +#define __builtin_altivec_vmrghw vec_vmrghw + +static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmrghw(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +/* vec_mergel */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_mergel(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_mergel(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_mergel(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mergel(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_mergel(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergel(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergel(vector signed long long __a, vector signed long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergel(vector signed long long __a, vector bool long long __b) { + return vec_perm(__a, (vector signed long long)__b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector signed long long __b) { + return vec_perm((vector signed long long)__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergel(vector unsigned long long __a, vector unsigned long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergel(vector unsigned long long __a, vector bool long long __b) { + return vec_perm(__a, (vector unsigned long long)__b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector unsigned long long __b) { + return vec_perm((vector unsigned long long)__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector bool long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a, + vector double __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergel(vector double __a, vector bool long long __b) { + return vec_perm(__a, (vector double)__b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector double __b) { + return vec_perm((vector double)__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +#endif + +/* vec_vmrglb */ + +#define __builtin_altivec_vmrglb vec_vmrglb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmrglb(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmrglb(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vmrglb(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +/* vec_vmrglh */ + +#define __builtin_altivec_vmrglh vec_vmrglh + +static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmrglh(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vmrglh(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +/* vec_vmrglw */ + +#define __builtin_altivec_vmrglw vec_vmrglw + +static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmrglw(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +#ifdef __POWER8_VECTOR__ +/* vec_mergee */ + +static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_mergee(vector signed int __a, vector signed int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergee(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergee(vector bool long long __a, vector bool long long __b) { + return vec_mergeh(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergee(vector signed long long __a, vector signed long long __b) { + return vec_mergeh(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergee(vector unsigned long long __a, vector unsigned long long __b) { + return vec_mergeh(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_mergee(vector float __a, vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector double __ATTRS_o_ai +vec_mergee(vector double __a, vector double __b) { + return vec_mergeh(__a, __b); +} + +/* vec_mergeo */ + +static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_mergeo(vector signed int __a, vector signed int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergeo(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergeo(vector bool long long __a, vector bool long long __b) { + return vec_mergel(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeo(vector signed long long __a, vector signed long long __b) { + return vec_mergel(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) { + return vec_mergel(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_mergeo(vector float __a, vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector double __ATTRS_o_ai +vec_mergeo(vector double __a, vector double __b) { + return vec_mergel(__a, __b); +} + +#endif + +/* vec_mfvscr */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_mfvscr(void) { + return __builtin_altivec_mfvscr(); +} + +/* vec_min */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_min(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vminsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_min(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vminsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_min(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vminsb(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_min(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vminub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_min(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vminub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_min(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vminub(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a, + vector short __b) { + return __builtin_altivec_vminsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a, + vector short __b) { + return __builtin_altivec_vminsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a, + vector bool short __b) { + return __builtin_altivec_vminsh(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_min(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_min(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_min(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vminuh(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a, + vector int __b) { + return __builtin_altivec_vminsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a, + vector int __b) { + return __builtin_altivec_vminsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a, + vector bool int __b) { + return __builtin_altivec_vminsw(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_min(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_min(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_min(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vminuw(__a, (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_min(vector signed long long __a, vector signed long long __b) { + return __builtin_altivec_vminsd(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_min(vector bool long long __a, vector signed long long __b) { + return __builtin_altivec_vminsd((vector signed long long)__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_min(vector signed long long __a, vector bool long long __b) { + return __builtin_altivec_vminsd(__a, (vector signed long long)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_min(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vminud(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_min(vector bool long long __a, vector unsigned long long __b) { + return __builtin_altivec_vminud((vector unsigned long long)__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_min(vector unsigned long long __a, vector bool long long __b) { + return __builtin_altivec_vminud(__a, (vector unsigned long long)__b); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvminsp(__a, __b); +#else + return __builtin_altivec_vminfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a, + vector double __b) { + return __builtin_vsx_xvmindp(__a, __b); +} +#endif + +/* vec_vminsb */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vminsb(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vminsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vminsb(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vminsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vminsb(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vminsb(__a, (vector signed char)__b); +} + +/* vec_vminub */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vminub(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vminub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vminub(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vminub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vminub(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vminub(__a, (vector unsigned char)__b); +} + +/* vec_vminsh */ + +static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a, + vector short __b) { + return __builtin_altivec_vminsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a, + vector short __b) { + return __builtin_altivec_vminsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a, + vector bool short __b) { + return __builtin_altivec_vminsh(__a, (vector short)__b); +} + +/* vec_vminuh */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vminuh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vminuh(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vminuh(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vminuh(__a, (vector unsigned short)__b); +} + +/* vec_vminsw */ + +static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a, + vector int __b) { + return __builtin_altivec_vminsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a, + vector int __b) { + return __builtin_altivec_vminsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a, + vector bool int __b) { + return __builtin_altivec_vminsw(__a, (vector int)__b); +} + +/* vec_vminuw */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vminuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vminuw(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vminuw(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vminuw(__a, (vector unsigned int)__b); +} + +/* vec_vminfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vminfp(vector float __a, vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvminsp(__a, __b); +#else + return __builtin_altivec_vminfp(__a, __b); +#endif +} + +/* vec_mladd */ + +#define __builtin_altivec_vmladduhm vec_mladd + +static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a, + vector short __b, + vector short __c) { + return __a * __b + __c; +} + +static __inline__ vector short __ATTRS_o_ai vec_mladd( + vector short __a, vector unsigned short __b, vector unsigned short __c) { + return __a * (vector short)__b + (vector short)__c; +} + +static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a, + vector short __b, + vector short __c) { + return (vector short)__a * __b + __c; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mladd(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __a * __b + __c; +} + +/* vec_vmladduhm */ + +static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a, + vector short __b, + vector short __c) { + return __a * __b + __c; +} + +static __inline__ vector short __ATTRS_o_ai vec_vmladduhm( + vector short __a, vector unsigned short __b, vector unsigned short __c) { + return __a * (vector short)__b + (vector short)__c; +} + +static __inline__ vector short __ATTRS_o_ai +vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) { + return (vector short)__a * __b + __c; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmladduhm(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __a * __b + __c; +} + +/* vec_mradds */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_mradds(vector short __a, vector short __b, vector short __c) { + return __builtin_altivec_vmhraddshs(__a, __b, __c); +} + +/* vec_vmhraddshs */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vmhraddshs(vector short __a, vector short __b, vector short __c) { + return __builtin_altivec_vmhraddshs(__a, __b, __c); +} + +/* vec_msum */ + +static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a, + vector unsigned char __b, + vector int __c) { + return __builtin_altivec_vmsummbm(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_msum(vector unsigned char __a, vector unsigned char __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumubm(__a, __b, __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a, + vector short __b, + vector int __c) { + return __builtin_altivec_vmsumshm(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_msum(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhm(__a, __b, __c); +} + +/* vec_msumc */ + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_msumc(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vmsumcud(__a, __b, __c); +} +#endif + +/* vec_vmsummbm */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) { + return __builtin_altivec_vmsummbm(__a, __b, __c); +} + +/* vec_vmsumubm */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmsumubm(vector unsigned char __a, vector unsigned char __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumubm(__a, __b, __c); +} + +/* vec_vmsumshm */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmsumshm(vector short __a, vector short __b, vector int __c) { + return __builtin_altivec_vmsumshm(__a, __b, __c); +} + +/* vec_vmsumuhm */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhm(__a, __b, __c); +} + +/* vec_msums */ + +static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a, + vector short __b, + vector int __c) { + return __builtin_altivec_vmsumshs(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_msums(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhs(__a, __b, __c); +} + +/* vec_vmsumshs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmsumshs(vector short __a, vector short __b, vector int __c) { + return __builtin_altivec_vmsumshs(__a, __b, __c); +} + +/* vec_vmsumuhs */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhs(__a, __b, __c); +} + +/* vec_mtvscr */ + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +/* vec_mul */ + +/* Integer vector multiplication will involve multiplication of the odd/even + elements separately, then truncating the results and moving to the + result vector. +*/ +static __inline__ vector signed char __ATTRS_o_ai +vec_mul(vector signed char __a, vector signed char __b) { + return __a * __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_mul(vector unsigned char __a, vector unsigned char __b) { + return __a * __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_mul(vector signed short __a, vector signed short __b) { + return __a * __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mul(vector unsigned short __a, vector unsigned short __b) { + return __a * __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_mul(vector signed int __a, vector signed int __b) { + return __a * __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mul(vector unsigned int __a, vector unsigned int __b) { + return __a * __b; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mul(vector signed long long __a, vector signed long long __b) { + return __a * __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mul(vector unsigned long long __a, vector unsigned long long __b) { + return __a * __b; +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a, + vector float __b) { + return __a * __b; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a, + vector double __b) { + return __a * __b; +} +#endif + +/* The vmulos* and vmules* instructions have a big endian bias, so + we must reverse the meaning of "even" and "odd" for little endian. */ + +/* vec_mule */ + +static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a, + vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosb(__a, __b); +#else + return __builtin_altivec_vmulesb(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mule(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuloub(__a, __b); +#else + return __builtin_altivec_vmuleub(__a, __b); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a, + vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosh(__a, __b); +#else + return __builtin_altivec_vmulesh(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mule(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouh(__a, __b); +#else + return __builtin_altivec_vmuleuh(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mule(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosw(__a, __b); +#else + return __builtin_altivec_vmulesw(__a, __b); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mule(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouw(__a, __b); +#else + return __builtin_altivec_vmuleuw(__a, __b); +#endif +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_mule(vector signed long long __a, vector signed long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosd(__a, __b); +#else + return __builtin_altivec_vmulesd(__a, __b); +#endif +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_mule(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuloud(__a, __b); +#else + return __builtin_altivec_vmuleud(__a, __b); +#endif +} +#endif + +/* vec_vmulesb */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vmulesb(vector signed char __a, vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosb(__a, __b); +#else + return __builtin_altivec_vmulesb(__a, __b); +#endif +} + +/* vec_vmuleub */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vmuleub(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuloub(__a, __b); +#else + return __builtin_altivec_vmuleub(__a, __b); +#endif +} + +/* vec_vmulesh */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmulesh(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosh(__a, __b); +#else + return __builtin_altivec_vmulesh(__a, __b); +#endif +} + +/* vec_vmuleuh */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouh(__a, __b); +#else + return __builtin_altivec_vmuleuh(__a, __b); +#endif +} + +/* vec_mulh */ + +#ifdef __POWER10_VECTOR__ +static __inline__ vector signed int __ATTRS_o_ai +vec_mulh(vector signed int __a, vector signed int __b) { + return __builtin_altivec_vmulhsw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mulh(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vmulhuw(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mulh(vector signed long long __a, vector signed long long __b) { + return __builtin_altivec_vmulhsd(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mulh(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vmulhud(__a, __b); +} +#endif + +/* vec_mulo */ + +static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a, + vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesb(__a, __b); +#else + return __builtin_altivec_vmulosb(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mulo(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleub(__a, __b); +#else + return __builtin_altivec_vmuloub(__a, __b); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a, + vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesh(__a, __b); +#else + return __builtin_altivec_vmulosh(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mulo(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuh(__a, __b); +#else + return __builtin_altivec_vmulouh(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mulo(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesw(__a, __b); +#else + return __builtin_altivec_vmulosw(__a, __b); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mulo(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuw(__a, __b); +#else + return __builtin_altivec_vmulouw(__a, __b); +#endif +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_mulo(vector signed long long __a, vector signed long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesd(__a, __b); +#else + return __builtin_altivec_vmulosd(__a, __b); +#endif +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_mulo(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleud(__a, __b); +#else + return __builtin_altivec_vmuloud(__a, __b); +#endif +} +#endif + +/* vec_vmulosb */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vmulosb(vector signed char __a, vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesb(__a, __b); +#else + return __builtin_altivec_vmulosb(__a, __b); +#endif +} + +/* vec_vmuloub */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vmuloub(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleub(__a, __b); +#else + return __builtin_altivec_vmuloub(__a, __b); +#endif +} + +/* vec_vmulosh */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmulosh(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesh(__a, __b); +#else + return __builtin_altivec_vmulosh(__a, __b); +#endif +} + +/* vec_vmulouh */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmulouh(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuh(__a, __b); +#else + return __builtin_altivec_vmulouh(__a, __b); +#endif +} + +/* vec_nand */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed char __ATTRS_o_ai +vec_nand(vector signed char __a, vector signed char __b) { + return ~(__a & __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_nand(vector signed char __a, vector bool char __b) { + return ~(__a & __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_nand(vector bool char __a, vector signed char __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nand(vector unsigned char __a, vector unsigned char __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nand(vector unsigned char __a, vector bool char __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nand(vector bool char __a, vector unsigned char __b) { + return ~(__a & __b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a, + vector bool char __b) { + return ~(__a & __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_nand(vector signed short __a, vector signed short __b) { + return ~(__a & __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_nand(vector signed short __a, vector bool short __b) { + return ~(__a & __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_nand(vector bool short __a, vector signed short __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_nand(vector unsigned short __a, vector unsigned short __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_nand(vector unsigned short __a, vector bool short __b) { + return ~(__a & __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_nand(vector bool short __a, vector bool short __b) { + return ~(__a & __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_nand(vector signed int __a, vector signed int __b) { + return ~(__a & __b); +} + +static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a, + vector bool int __b) { + return ~(__a & __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_nand(vector bool int __a, vector signed int __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nand(vector unsigned int __a, vector unsigned int __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nand(vector unsigned int __a, vector bool int __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nand(vector bool int __a, vector unsigned int __b) { + return ~(__a & __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a, + vector bool int __b) { + return ~(__a & __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_nand(vector float __a, vector float __b) { + return (vector float)(~((vector unsigned int)__a & + (vector unsigned int)__b)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_nand(vector signed long long __a, vector signed long long __b) { + return ~(__a & __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_nand(vector signed long long __a, vector bool long long __b) { + return ~(__a & __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_nand(vector bool long long __a, vector signed long long __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nand(vector unsigned long long __a, vector unsigned long long __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nand(vector unsigned long long __a, vector bool long long __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nand(vector bool long long __a, vector unsigned long long __b) { + return ~(__a & __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_nand(vector bool long long __a, vector bool long long __b) { + return ~(__a & __b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_nand(vector double __a, vector double __b) { + return (vector double)(~((vector unsigned long long)__a & + (vector unsigned long long)__b)); +} + +#endif + +/* vec_nmadd */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a, + vector float __b, + vector float __c) { + return __builtin_vsx_xvnmaddasp(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvnmaddadp(__a, __b, __c); +} +#endif + +/* vec_nmsub */ + +static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a, + vector float __b, + vector float __c) { +#ifdef __VSX__ + return __builtin_vsx_xvnmsubasp(__a, __b, __c); +#else + return __builtin_altivec_vnmsubfp(__a, __b, __c); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvnmsubadp(__a, __b, __c); +} +#endif + +/* vec_vnmsubfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vnmsubfp(vector float __a, vector float __b, vector float __c) { + return __builtin_altivec_vnmsubfp(__a, __b, __c); +} + +/* vec_nor */ + +#define __builtin_altivec_vnor vec_nor + +static __inline__ vector signed char __ATTRS_o_ai +vec_nor(vector signed char __a, vector signed char __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nor(vector unsigned char __a, vector unsigned char __b) { + return ~(__a | __b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a, + vector bool char __b) { + return ~(__a | __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a, + vector short __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_nor(vector unsigned short __a, vector unsigned short __b) { + return ~(__a | __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_nor(vector bool short __a, vector bool short __b) { + return ~(__a | __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a, + vector int __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nor(vector unsigned int __a, vector unsigned int __b) { + return ~(__a | __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a, + vector bool int __b) { + return ~(__a | __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a, + vector float __b) { + vector unsigned int __res = + ~((vector unsigned int)__a | (vector unsigned int)__b); + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a, + vector double __b) { + vector unsigned long long __res = + ~((vector unsigned long long)__a | (vector unsigned long long)__b); + return (vector double)__res; +} +#endif + +/* vec_vnor */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vnor(vector signed char __a, vector signed char __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vnor(vector unsigned char __a, vector unsigned char __b) { + return ~(__a | __b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a, + vector bool char __b) { + return ~(__a | __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a, + vector short __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vnor(vector unsigned short __a, vector unsigned short __b) { + return ~(__a | __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vnor(vector bool short __a, vector bool short __b) { + return ~(__a | __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a, + vector int __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vnor(vector unsigned int __a, vector unsigned int __b) { + return ~(__a | __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a, + vector bool int __b) { + return ~(__a | __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a, + vector float __b) { + vector unsigned int __res = + ~((vector unsigned int)__a | (vector unsigned int)__b); + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_nor(vector signed long long __a, vector signed long long __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nor(vector unsigned long long __a, vector unsigned long long __b) { + return ~(__a | __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_nor(vector bool long long __a, vector bool long long __b) { + return ~(__a | __b); +} +#endif + +/* vec_or */ + +#define __builtin_altivec_vor vec_or + +static __inline__ vector signed char __ATTRS_o_ai +vec_or(vector signed char __a, vector signed char __b) { + return __a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_or(vector bool char __a, vector signed char __b) { + return (vector signed char)__a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a, + vector bool char __b) { + return __a | (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_or(vector unsigned char __a, vector unsigned char __b) { + return __a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_or(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_or(vector unsigned char __a, vector bool char __b) { + return __a | (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a, + vector bool char __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a, + vector short __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a, + vector short __b) { + return (vector short)__a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a, + vector bool short __b) { + return __a | (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_or(vector unsigned short __a, vector unsigned short __b) { + return __a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_or(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_or(vector unsigned short __a, vector bool short __b) { + return __a | (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a, + vector bool short __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a, + vector int __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a, + vector int __b) { + return (vector int)__a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a, + vector bool int __b) { + return __a | (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_or(vector unsigned int __a, vector unsigned int __b) { + return __a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_or(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_or(vector unsigned int __a, vector bool int __b) { + return __a | (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a, + vector bool int __b) { + return __a | __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a, + vector double __b) { + return (vector double)((vector unsigned long long)__a | + (vector unsigned long long)__b); +} + +static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a, + vector bool long long __b) { + return (vector double)((vector unsigned long long)__a | + (vector unsigned long long)__b); +} + +static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a, + vector double __b) { + return (vector double)((vector unsigned long long)__a | + (vector unsigned long long)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_or(vector signed long long __a, vector signed long long __b) { + return __a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_or(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_or(vector signed long long __a, vector bool long long __b) { + return __a | (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_or(vector unsigned long long __a, vector unsigned long long __b) { + return __a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_or(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_or(vector unsigned long long __a, vector bool long long __b) { + return __a | (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_or(vector bool long long __a, vector bool long long __b) { + return __a | __b; +} +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed char __ATTRS_o_ai +vec_orc(vector signed char __a, vector signed char __b) { + return __a | ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_orc(vector signed char __a, vector bool char __b) { + return __a | ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_orc(vector bool char __a, vector signed char __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_orc(vector unsigned char __a, vector unsigned char __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_orc(vector unsigned char __a, vector bool char __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_orc(vector bool char __a, vector unsigned char __b) { + return __a | ~__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a, + vector bool char __b) { + return __a | ~__b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_orc(vector signed short __a, vector signed short __b) { + return __a | ~__b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_orc(vector signed short __a, vector bool short __b) { + return __a | ~__b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_orc(vector bool short __a, vector signed short __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_orc(vector unsigned short __a, vector unsigned short __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_orc(vector unsigned short __a, vector bool short __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_orc(vector bool short __a, vector unsigned short __b) { + return __a | ~__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_orc(vector bool short __a, vector bool short __b) { + return __a | ~__b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_orc(vector signed int __a, vector signed int __b) { + return __a | ~__b; +} + +static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a, + vector bool int __b) { + return __a | ~__b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_orc(vector bool int __a, vector signed int __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_orc(vector unsigned int __a, vector unsigned int __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_orc(vector unsigned int __a, vector bool int __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_orc(vector bool int __a, vector unsigned int __b) { + return __a | ~__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a, + vector bool int __b) { + return __a | ~__b; +} + +static __inline__ vector float __ATTRS_o_ai +vec_orc(vector bool int __a, vector float __b) { + return (vector float)(__a | ~(vector unsigned int)__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_orc(vector float __a, vector bool int __b) { + return (vector float)((vector unsigned int)__a | ~__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_orc(vector signed long long __a, vector signed long long __b) { + return __a | ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_orc(vector signed long long __a, vector bool long long __b) { + return __a | ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_orc(vector bool long long __a, vector signed long long __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_orc(vector unsigned long long __a, vector unsigned long long __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_orc(vector unsigned long long __a, vector bool long long __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_orc(vector bool long long __a, vector unsigned long long __b) { + return __a | ~__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_orc(vector bool long long __a, vector bool long long __b) { + return __a | ~__b; +} + +static __inline__ vector double __ATTRS_o_ai +vec_orc(vector double __a, vector bool long long __b) { + return (vector double)((vector unsigned long long)__a | ~__b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_orc(vector bool long long __a, vector double __b) { + return (vector double)(__a | ~(vector unsigned long long)__b); +} +#endif + +/* vec_vor */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vor(vector signed char __a, vector signed char __b) { + return __a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vor(vector bool char __a, vector signed char __b) { + return (vector signed char)__a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vor(vector signed char __a, vector bool char __b) { + return __a | (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vor(vector unsigned char __a, vector unsigned char __b) { + return __a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vor(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vor(vector unsigned char __a, vector bool char __b) { + return __a | (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a, + vector bool char __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a, + vector short __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a, + vector short __b) { + return (vector short)__a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a, + vector bool short __b) { + return __a | (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vor(vector unsigned short __a, vector unsigned short __b) { + return __a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vor(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vor(vector unsigned short __a, vector bool short __b) { + return __a | (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vor(vector bool short __a, vector bool short __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a, + vector int __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a, + vector int __b) { + return (vector int)__a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a, + vector bool int __b) { + return __a | (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vor(vector unsigned int __a, vector unsigned int __b) { + return __a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vor(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vor(vector unsigned int __a, vector bool int __b) { + return __a | (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a, + vector bool int __b) { + return __a | __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vor(vector signed long long __a, vector signed long long __b) { + return __a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vor(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vor(vector signed long long __a, vector bool long long __b) { + return __a | (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vor(vector unsigned long long __a, vector unsigned long long __b) { + return __a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vor(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vor(vector unsigned long long __a, vector bool long long __b) { + return __a | (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vor(vector bool long long __a, vector bool long long __b) { + return __a | __b; +} +#endif + +/* vec_pack */ + +/* The various vector pack instructions have a big-endian bias, so for + little endian we must handle reversed element numbering. */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_pack(vector signed short __a, vector signed short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_pack(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_pack(vector bool short __a, vector bool short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a, + vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_pack(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a, + vector bool int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +#ifdef __VSX__ +static __inline__ vector signed int __ATTRS_o_ai +vec_pack(vector signed long long __a, vector signed long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector signed int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector signed int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_pack(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_pack(vector bool long long __a, vector bool long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector bool int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_pack(vector double __a, vector double __b) { + return (vector float) (__a[0], __a[1], __b[0], __b[1]); +} +#endif + +#ifdef __POWER9_VECTOR__ +static __inline__ vector unsigned short __ATTRS_o_ai +vec_pack_to_short_fp32(vector float __a, vector float __b) { + vector float __resa = __builtin_vsx_xvcvsphp(__a); + vector float __resb = __builtin_vsx_xvcvsphp(__b); +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned short)vec_mergee(__resa, __resb); +#else + return (vector unsigned short)vec_mergeo(__resa, __resb); +#endif +} + +#endif +/* vec_vpkuhum */ + +#define __builtin_altivec_vpkuhum vec_vpkuhum + +static __inline__ vector signed char __ATTRS_o_ai +vec_vpkuhum(vector signed short __a, vector signed short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vpkuhum(vector bool short __a, vector bool short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +/* vec_vpkuwum */ + +#define __builtin_altivec_vpkuwum vec_vpkuwum + +static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a, + vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vpkuwum(vector bool int __a, vector bool int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +/* vec_vpkudum */ + +#ifdef __POWER8_VECTOR__ +#define __builtin_altivec_vpkudum vec_vpkudum + +static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a, + vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vpkudum(vector bool long long __a, vector bool long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)vec_perm( + (vector long long)__a, (vector long long)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector bool int)vec_perm( + (vector long long)__a, (vector long long)__b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} +#endif + +/* vec_packpx */ + +static __inline__ vector pixel __attribute__((__always_inline__)) +vec_packpx(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector pixel)__builtin_altivec_vpkpx(__b, __a); +#else + return (vector pixel)__builtin_altivec_vpkpx(__a, __b); +#endif +} + +/* vec_vpkpx */ + +static __inline__ vector pixel __attribute__((__always_inline__)) +vec_vpkpx(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector pixel)__builtin_altivec_vpkpx(__b, __a); +#else + return (vector pixel)__builtin_altivec_vpkpx(__a, __b); +#endif +} + +/* vec_packs */ + +static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a, + vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshss(__b, __a); +#else + return __builtin_altivec_vpkshss(__a, __b); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_packs(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a, + vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswss(__b, __a); +#else + return __builtin_altivec_vpkswss(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_packs(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a, + vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdss(__b, __a); +#else + return __builtin_altivec_vpksdss(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_packs(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + +/* vec_vpkshss */ + +static __inline__ vector signed char __attribute__((__always_inline__)) +vec_vpkshss(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshss(__b, __a); +#else + return __builtin_altivec_vpkshss(__a, __b); +#endif +} + +/* vec_vpksdss */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a, + vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdss(__b, __a); +#else + return __builtin_altivec_vpksdss(__a, __b); +#endif +} +#endif + +/* vec_vpkuhus */ + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +/* vec_vpkudus */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + +/* vec_vpkswss */ + +static __inline__ vector signed short __attribute__((__always_inline__)) +vec_vpkswss(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswss(__b, __a); +#else + return __builtin_altivec_vpkswss(__a, __b); +#endif +} + +/* vec_vpkuwus */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +/* vec_packsu */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_packsu(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshus(__b, __a); +#else + return __builtin_altivec_vpkshus(__a, __b); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_packsu(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_packsu(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswus(__b, __a); +#else + return __builtin_altivec_vpkswus(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_packsu(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_packsu(vector long long __a, vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdus(__b, __a); +#else + return __builtin_altivec_vpksdus(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_packsu(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + +/* vec_vpkshus */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vpkshus(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshus(__b, __a); +#else + return __builtin_altivec_vpkshus(__a, __b); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vpkshus(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +/* vec_vpkswus */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vpkswus(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswus(__b, __a); +#else + return __builtin_altivec_vpkswus(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vpkswus(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +/* vec_vpksdus */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vpksdus(vector long long __a, vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdus(__b, __a); +#else + return __builtin_altivec_vpksdus(__a, __b); +#endif +} +#endif + +/* vec_perm */ + +// The vperm instruction is defined architecturally with a big-endian bias. +// For little endian, we swap the input operands and invert the permute +// control vector. Only the rightmost 5 bits matter, so we could use +// a vector of all 31s instead of all 255s to perform the inversion. +// However, when the PCV is not a constant, using 255 has an advantage +// in that the vec_xor can be recognized as a vec_nor (and for P8 and +// later, possibly a vec_nand). + +static __inline__ vector signed char __ATTRS_o_ai vec_perm( + vector signed char __a, vector signed char __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_perm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a, + vector signed short __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_perm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned short)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector unsigned short)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai vec_perm( + vector bool short __a, vector bool short __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, + vector pixel __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a, + vector signed int __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d); +#else + return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_perm(vector unsigned int __a, vector unsigned int __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a, + vector float __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector float)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector float)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +#ifdef __VSX__ +static __inline__ vector long long __ATTRS_o_ai +vec_perm(vector signed long long __a, vector signed long long __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed long long)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector signed long long)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_perm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned long long)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector unsigned long long)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_perm(vector bool long long __a, vector bool long long __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool long long)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector bool long long)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_perm(vector double __a, vector double __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector double)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector double)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} +#endif + +/* vec_vperm */ + +static __inline__ vector signed char __ATTRS_o_ai vec_vperm( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vperm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vperm( + vector bool char __a, vector bool char __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector short __ATTRS_o_ai +vec_vperm(vector short __a, vector short __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vperm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_vperm( + vector bool short __a, vector bool short __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector pixel __ATTRS_o_ai +vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a, + vector int __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vperm(vector unsigned int __a, vector unsigned int __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector float __ATTRS_o_ai +vec_vperm(vector float __a, vector float __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +#ifdef __VSX__ +static __inline__ vector long long __ATTRS_o_ai vec_vperm( + vector long long __a, vector long long __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vperm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai +vec_vperm(vector double __a, vector double __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} +#endif + +/* vec_re */ + +static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvresp(__a); +#else + return __builtin_altivec_vrefp(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) { + return __builtin_vsx_xvredp(__a); +} +#endif + +/* vec_vrefp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrefp(vector float __a) { + return __builtin_altivec_vrefp(__a); +} + +/* vec_rl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_rl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_rl(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a, + vector unsigned short __b) { + return __builtin_altivec_vrlh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_rl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vrlw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_rl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_rl(vector signed long long __a, vector unsigned long long __b) { + return __builtin_altivec_vrld(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_rl(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vrld(__a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) { + return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_rl(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector unsigned __int128)) - __a)); +} +#endif + +/* vec_rlmi */ +#ifdef __POWER9_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_rlmi(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_altivec_vrlwmi(__a, __c, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_rlmi(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + return __builtin_altivec_vrldmi(__a, __c, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_rlmi(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vrlqmi(__a, __c, __b); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_rlmi(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vrlqmi(__a, __c, __b); +} +#endif + +/* vec_rlnm */ +#ifdef __POWER9_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_rlnm(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + vector unsigned int OneByte = { 0x8, 0x8, 0x8, 0x8 }; + return __builtin_altivec_vrlwnm(__a, ((__c << OneByte) | __b)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_rlnm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + vector unsigned long long OneByte = { 0x8, 0x8 }; + return __builtin_altivec_vrldnm(__a, ((__c << OneByte) | __b)); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_rlnm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + // Merge __b and __c using an appropriate shuffle. + vector unsigned char TmpB = (vector unsigned char)__b; + vector unsigned char TmpC = (vector unsigned char)__c; + vector unsigned char MaskAndShift = +#ifdef __LITTLE_ENDIAN__ + __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, -1, -1, -1, 16, 0, + 1, -1, -1, -1, -1, -1); +#else + __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1, + -1, -1, -1, -1, -1, -1, -1); +#endif + return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_rlnm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + // Merge __b and __c using an appropriate shuffle. + vector unsigned char TmpB = (vector unsigned char)__b; + vector unsigned char TmpC = (vector unsigned char)__c; + vector unsigned char MaskAndShift = +#ifdef __LITTLE_ENDIAN__ + __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, -1, -1, -1, 16, 0, + 1, -1, -1, -1, -1, -1); +#else + __builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1, + -1, -1, -1, -1, -1, -1, -1); +#endif + return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift); +} +#endif + +/* vec_vrlb */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vrlb(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vrlb(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +/* vec_vrlh */ + +static __inline__ vector short __ATTRS_o_ai +vec_vrlh(vector short __a, vector unsigned short __b) { + return __builtin_altivec_vrlh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vrlh(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b); +} + +/* vec_vrlw */ + +static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vrlw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vrlw(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b); +} + +/* vec_round */ + +static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) { + return __builtin_altivec_vrfin(__a); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) { + return __builtin_vsx_xvrdpi(__a); +} + +/* vec_rint */ + +static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) { + return __builtin_vsx_xvrspic(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) { + return __builtin_vsx_xvrdpic(__a); +} + +/* vec_roundc */ + +static __inline__ vector float __ATTRS_o_ai vec_roundc(vector float __a) { + return __builtin_vsx_xvrspic(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_roundc(vector double __a) { + return __builtin_vsx_xvrdpic(__a); +} + +/* vec_nearbyint */ + +static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) { + return __builtin_vsx_xvrspi(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) { + return __builtin_vsx_xvrdpi(__a); +} +#endif + +/* vec_vrfin */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfin(vector float __a) { + return __builtin_altivec_vrfin(__a); +} + +/* vec_sqrt */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) { + return __builtin_vsx_xvsqrtsp(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) { + return __builtin_vsx_xvsqrtdp(__a); +} +#endif + +/* vec_rsqrte */ + +static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrsqrtesp(__a); +#else + return __builtin_altivec_vrsqrtefp(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) { + return __builtin_vsx_xvrsqrtedp(__a); +} +#endif + +static vector float __ATTRS_o_ai vec_rsqrt(vector float __a) { + return __builtin_ppc_rsqrtf(__a); +} + +#ifdef __VSX__ +static vector double __ATTRS_o_ai vec_rsqrt(vector double __a) { + return __builtin_ppc_rsqrtd(__a); +} +#endif + +/* vec_vrsqrtefp */ + +static __inline__ __vector float __attribute__((__always_inline__)) +vec_vrsqrtefp(vector float __a) { + return __builtin_altivec_vrsqrtefp(__a); +} + +/* vec_xvtsqrt */ + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_test_swsqrt(vector double __a) { + return __builtin_vsx_xvtsqrtdp(__a); +} + +static __inline__ int __ATTRS_o_ai vec_test_swsqrts(vector float __a) { + return __builtin_vsx_xvtsqrtsp(__a); +} +#endif + +/* vec_sel */ + +#define __builtin_altivec_vsel_4si vec_sel + +static __inline__ vector signed char __ATTRS_o_ai vec_sel( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sel(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai vec_sel( + vector unsigned char __a, vector unsigned char __b, vector bool char __c) { + return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) { + return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a, + vector bool char __b, + vector bool char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a, + vector short __b, + vector unsigned short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a, + vector short __b, + vector bool short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sel(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sel(vector unsigned short __a, vector unsigned short __b, + vector bool short __c) { + return (__a & ~(vector unsigned short)__c) | + (__b & (vector unsigned short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_sel( + vector bool short __a, vector bool short __b, vector unsigned short __c) { + return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a, + vector int __b, + vector unsigned int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a, + vector int __b, + vector bool int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sel( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) { + return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) { + return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a, + vector bool int __b, + vector bool int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a, + vector float __b, + vector unsigned int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a, + vector float __b, + vector bool int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai +vec_sel(vector double __a, vector double __b, vector bool long long __c) { + vector long long __res = ((vector long long)__a & ~(vector long long)__c) | + ((vector long long)__b & (vector long long)__c); + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai +vec_sel(vector double __a, vector double __b, vector unsigned long long __c) { + vector long long __res = ((vector long long)__a & ~(vector long long)__c) | + ((vector long long)__b & (vector long long)__c); + return (vector double)__res; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_sel(vector bool long long __a, vector bool long long __b, + vector bool long long __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_sel(vector bool long long __a, vector bool long long __b, + vector unsigned long long __c) { + return (__a & ~(vector bool long long)__c) | + (__b & (vector bool long long)__c); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sel(vector signed long long __a, vector signed long long __b, + vector bool long long __c) { + return (__a & ~(vector signed long long)__c) | + (__b & (vector signed long long)__c); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sel(vector signed long long __a, vector signed long long __b, + vector unsigned long long __c) { + return (__a & ~(vector signed long long)__c) | + (__b & (vector signed long long)__c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sel(vector unsigned long long __a, vector unsigned long long __b, + vector bool long long __c) { + return (__a & ~(vector unsigned long long)__c) | + (__b & (vector unsigned long long)__c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sel(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + return (__a & ~__c) | (__b & __c); +} +#endif + +/* vec_vsel */ + +static __inline__ vector signed char __ATTRS_o_ai vec_vsel( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsel(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel( + vector unsigned char __a, vector unsigned char __b, vector bool char __c) { + return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) { + return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a, + vector bool char __b, + vector bool char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector short __ATTRS_o_ai +vec_vsel(vector short __a, vector short __b, vector unsigned short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a, + vector short __b, + vector bool short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsel(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsel(vector unsigned short __a, vector unsigned short __b, + vector bool short __c) { + return (__a & ~(vector unsigned short)__c) | + (__b & (vector unsigned short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_vsel( + vector bool short __a, vector bool short __b, vector unsigned short __c) { + return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a, + vector int __b, + vector unsigned int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a, + vector int __b, + vector bool int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel( + vector unsigned int __a, vector unsigned int __b, vector bool int __c) { + return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) { + return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a, + vector bool int __b, + vector bool int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a, + vector float __b, + vector unsigned int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a, + vector float __b, + vector bool int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +/* vec_sl */ + +// vec_sl does modulo arithmetic on __b first, so __b is allowed to be more +// than the length of __a. +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sl(vector unsigned char __a, vector unsigned char __b) { + return __a << (__b % + (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)vec_sl((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sl(vector unsigned short __a, vector unsigned short __b) { + return __a << (__b % (vector unsigned short)(sizeof(unsigned short) * + __CHAR_BIT__)); +} + +static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a, + vector unsigned short __b) { + return (vector short)vec_sl((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sl(vector unsigned int __a, vector unsigned int __b) { + return __a << (__b % + (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__)); +} + +static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a, + vector unsigned int __b) { + return (vector int)vec_sl((vector unsigned int)__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sl(vector unsigned long long __a, vector unsigned long long __b) { + return __a << (__b % (vector unsigned long long)(sizeof(unsigned long long) * + __CHAR_BIT__)); +} + +static __inline__ vector long long __ATTRS_o_ai +vec_sl(vector long long __a, vector unsigned long long __b) { + return (vector long long)vec_sl((vector unsigned long long)__a, __b); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vspltb(vector unsigned char __a, unsigned char __b); +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sl(vector unsigned long long __a, vector unsigned long long __b) { + __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__); + + // Big endian element one (the right doubleword) can be left shifted as-is. + // The other element needs to be swapped into the right doubleword and + // shifted. Then the right doublewords of the two result vectors are merged. + vector signed long long __rightelt = + (vector signed long long)__builtin_altivec_vslo((vector signed int)__a, + (vector signed int)__b); +#ifdef __LITTLE_ENDIAN__ + __rightelt = (vector signed long long)__builtin_altivec_vsl( + (vector signed int)__rightelt, + (vector signed int)vec_vspltb((vector unsigned char)__b, 0)); +#else + __rightelt = (vector signed long long)__builtin_altivec_vsl( + (vector signed int)__rightelt, + (vector signed int)vec_vspltb((vector unsigned char)__b, 15)); +#endif + __a = __builtin_shufflevector(__a, __a, 1, 0); + __b = __builtin_shufflevector(__b, __b, 1, 0); + vector signed long long __leftelt = + (vector signed long long)__builtin_altivec_vslo((vector signed int)__a, + (vector signed int)__b); +#ifdef __LITTLE_ENDIAN__ + __leftelt = (vector signed long long)__builtin_altivec_vsl( + (vector signed int)__leftelt, + (vector signed int)vec_vspltb((vector unsigned char)__b, 0)); + return (vector unsigned long long)__builtin_shufflevector(__rightelt, + __leftelt, 0, 2); +#else + __leftelt = (vector signed long long)__builtin_altivec_vsl( + (vector signed int)__leftelt, + (vector signed int)vec_vspltb((vector unsigned char)__b, 15)); + return (vector unsigned long long)__builtin_shufflevector(__leftelt, + __rightelt, 1, 3); +#endif +} + +static __inline__ vector long long __ATTRS_o_ai +vec_sl(vector long long __a, vector unsigned long long __b) { + return (vector long long)vec_sl((vector unsigned long long)__a, __b); +} +#endif + +/* vec_vslb */ + +#define __builtin_altivec_vslb vec_vslb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vslb(vector signed char __a, vector unsigned char __b) { + return vec_sl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vslb(vector unsigned char __a, vector unsigned char __b) { + return vec_sl(__a, __b); +} + +/* vec_vslh */ + +#define __builtin_altivec_vslh vec_vslh + +static __inline__ vector short __ATTRS_o_ai +vec_vslh(vector short __a, vector unsigned short __b) { + return vec_sl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vslh(vector unsigned short __a, vector unsigned short __b) { + return vec_sl(__a, __b); +} + +/* vec_vslw */ + +#define __builtin_altivec_vslw vec_vslw + +static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a, + vector unsigned int __b) { + return vec_sl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vslw(vector unsigned int __a, vector unsigned int __b) { + return vec_sl(__a, __b); +} + +/* vec_sld */ + +#define __builtin_altivec_vsldoi_4si vec_sld + +static __inline__ vector signed char __ATTRS_o_ai vec_sld( + vector signed char __a, vector signed char __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sld(vector unsigned char __a, vector unsigned char __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed short __ATTRS_o_ai vec_sld( + vector signed short __a, vector signed short __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sld(vector unsigned short __a, vector unsigned short __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, + vector pixel __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sld( + vector unsigned int __a, vector unsigned int __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a, + vector bool int __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a, + vector float __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_sld(vector bool long long __a, vector bool long long __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sld(vector signed long long __a, vector signed long long __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sld(vector unsigned long long __a, vector unsigned long long __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a, + vector double __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} +#endif + +/* vec_sldw */ +static __inline__ vector signed char __ATTRS_o_ai vec_sldw( + vector signed char __a, vector signed char __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sldw(vector unsigned char __a, vector unsigned char __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector signed short __ATTRS_o_ai vec_sldw( + vector signed short __a, vector signed short __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sldw(vector unsigned short __a, vector unsigned short __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw( + vector unsigned int __a, vector unsigned int __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector float __ATTRS_o_ai vec_sldw( + vector float __a, vector float __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sldw(vector signed long long __a, vector signed long long __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sldw(vector unsigned long long __a, vector unsigned long long __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector double __ATTRS_o_ai vec_sldw( + vector double __a, vector double __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} +#endif + +#ifdef __POWER9_VECTOR__ +/* vec_slv */ +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slv(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vslv(__a, __b); +} + +/* vec_srv */ +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srv(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsrv(__a, __b); +} +#endif + +/* vec_vsldoi */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi( + vector unsigned char __a, vector unsigned char __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a, + vector short __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi( + vector unsigned short __a, vector unsigned short __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a, + vector pixel __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a, + vector int __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi( + vector unsigned int __a, vector unsigned int __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a, + vector float __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +/* vec_sll */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sll(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sll(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sll(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sll(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sll(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sll(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sll(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sll(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sll(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sll(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sll(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sll(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sll(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sll(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vsl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsl(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsl(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsl(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsl(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsl(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsl(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsl(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsl(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsl(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsl(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsl(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +/* vec_slo */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_slo(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_slo(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slo(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slo(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_slo(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_slo(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_slo(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_slo(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_slo(vector signed long long __a, vector signed char __b) { + return (vector signed long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_slo(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_slo(vector unsigned long long __a, vector signed char __b) { + return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_slo(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vslo */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vslo(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vslo(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vslo(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vslo(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vslo(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vslo(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vslo(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vslo(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +/* vec_splat */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_splat(vector signed char __a, unsigned const int __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_splat(vector unsigned char __a, unsigned const int __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_splat(vector bool char __a, unsigned const int __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F)); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_splat(vector signed short __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_splat(vector unsigned short __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_splat(vector bool short __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a, + unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_splat(vector signed int __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_splat(vector unsigned int __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_splat(vector bool int __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a, + unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a, + unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +static __inline__ vector bool long long __ATTRS_o_ai +vec_splat(vector bool long long __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_splat(vector signed long long __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_splat(vector unsigned long long __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +#endif + +/* vec_vspltb */ + +#define __builtin_altivec_vspltb vec_vspltb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vspltb(vector signed char __a, unsigned char __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vspltb(vector unsigned char __a, unsigned char __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a, + unsigned char __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b)); +} + +/* vec_vsplth */ + +#define __builtin_altivec_vsplth vec_vsplth + +static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a, + unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsplth(vector unsigned short __a, unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsplth(vector bool short __a, unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a, + unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +/* vec_vspltw */ + +#define __builtin_altivec_vspltw vec_vspltw + +static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a, + unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vspltw(vector unsigned int __a, unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a, + unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a, + unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +/* vec_splat_s8 */ + +#define __builtin_altivec_vspltisb vec_splat_s8 + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector signed char __ATTRS_o_ai +vec_splat_s8(signed char __a) { + return (vector signed char)(__a); +} + +/* vec_vspltisb */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector signed char __ATTRS_o_ai +vec_vspltisb(signed char __a) { + return (vector signed char)(__a); +} + +/* vec_splat_s16 */ + +#define __builtin_altivec_vspltish vec_splat_s16 + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) { + return (vector short)(__a); +} + +/* vec_vspltish */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) { + return (vector short)(__a); +} + +/* vec_splat_s32 */ + +#define __builtin_altivec_vspltisw vec_splat_s32 + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) { + return (vector int)(__a); +} + +/* vec_vspltisw */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) { + return (vector int)(__a); +} + +/* vec_splat_u8 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector unsigned char __ATTRS_o_ai +vec_splat_u8(unsigned char __a) { + return (vector unsigned char)(__a); +} + +/* vec_splat_u16 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector unsigned short __ATTRS_o_ai +vec_splat_u16(signed char __a) { + return (vector unsigned short)(__a); +} + +/* vec_splat_u32 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector unsigned int __ATTRS_o_ai +vec_splat_u32(signed char __a) { + return (vector unsigned int)(__a); +} + +/* vec_sr */ + +// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more +// than the length of __a. +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sr(vector unsigned char __a, vector unsigned char __b) { + return __a >> + (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sr(vector signed char __a, vector unsigned char __b) { + return (vector signed char)vec_sr((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sr(vector unsigned short __a, vector unsigned short __b) { + return __a >> + (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__)); +} + +static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a, + vector unsigned short __b) { + return (vector short)vec_sr((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sr(vector unsigned int __a, vector unsigned int __b) { + return __a >> + (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__)); +} + +static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a, + vector unsigned int __b) { + return (vector int)vec_sr((vector unsigned int)__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sr(vector unsigned long long __a, vector unsigned long long __b) { + return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) * + __CHAR_BIT__)); +} + +static __inline__ vector long long __ATTRS_o_ai +vec_sr(vector long long __a, vector unsigned long long __b) { + return (vector long long)vec_sr((vector unsigned long long)__a, __b); +} +#else +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sr(vector unsigned long long __a, vector unsigned long long __b) { + __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__); + + // Big endian element zero (the left doubleword) can be right shifted as-is. + // However the shift amount must be in the right doubleword. + // The other element needs to be swapped into the left doubleword and + // shifted. Then the left doublewords of the two result vectors are merged. + vector unsigned long long __swapshift = + __builtin_shufflevector(__b, __b, 1, 0); + vector unsigned long long __leftelt = + (vector unsigned long long)__builtin_altivec_vsro( + (vector signed int)__a, (vector signed int)__swapshift); +#ifdef __LITTLE_ENDIAN__ + __leftelt = (vector unsigned long long)__builtin_altivec_vsr( + (vector signed int)__leftelt, + (vector signed int)vec_vspltb((vector unsigned char)__swapshift, 0)); +#else + __leftelt = (vector unsigned long long)__builtin_altivec_vsr( + (vector signed int)__leftelt, + (vector signed int)vec_vspltb((vector unsigned char)__swapshift, 15)); +#endif + __a = __builtin_shufflevector(__a, __a, 1, 0); + vector unsigned long long __rightelt = + (vector unsigned long long)__builtin_altivec_vsro((vector signed int)__a, + (vector signed int)__b); +#ifdef __LITTLE_ENDIAN__ + __rightelt = (vector unsigned long long)__builtin_altivec_vsr( + (vector signed int)__rightelt, + (vector signed int)vec_vspltb((vector unsigned char)__b, 0)); + return __builtin_shufflevector(__rightelt, __leftelt, 1, 3); +#else + __rightelt = (vector unsigned long long)__builtin_altivec_vsr( + (vector signed int)__rightelt, + (vector signed int)vec_vspltb((vector unsigned char)__b, 15)); + return __builtin_shufflevector(__leftelt, __rightelt, 0, 2); +#endif +} + +static __inline__ vector long long __ATTRS_o_ai +vec_sr(vector long long __a, vector unsigned long long __b) { + return (vector long long)vec_sr((vector unsigned long long)__a, __b); +} +#endif + +/* vec_vsrb */ + +#define __builtin_altivec_vsrb vec_vsrb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsrb(vector signed char __a, vector unsigned char __b) { + return vec_sr(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsrb(vector unsigned char __a, vector unsigned char __b) { + return vec_sr(__a, __b); +} + +/* vec_vsrh */ + +#define __builtin_altivec_vsrh vec_vsrh + +static __inline__ vector short __ATTRS_o_ai +vec_vsrh(vector short __a, vector unsigned short __b) { + return vec_sr(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsrh(vector unsigned short __a, vector unsigned short __b) { + return vec_sr(__a, __b); +} + +/* vec_vsrw */ + +#define __builtin_altivec_vsrw vec_vsrw + +static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a, + vector unsigned int __b) { + return vec_sr(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsrw(vector unsigned int __a, vector unsigned int __b) { + return vec_sr(__a, __b); +} + +/* vec_sra */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sra(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sra(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a, + vector unsigned short __b) { + return __builtin_altivec_vsrah(__a, (vector unsigned short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sra(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vsraw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sra(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sra(vector signed long long __a, vector unsigned long long __b) { + return __a >> __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sra(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)((vector signed long long)__a >> __b); +} +#else +static __inline__ vector signed long long __ATTRS_o_ai +vec_sra(vector signed long long __a, vector unsigned long long __b) { + __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__); + return __a >> __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sra(vector unsigned long long __a, vector unsigned long long __b) { + __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__); + return (vector unsigned long long)((vector signed long long)__a >> __b); +} +#endif + +/* vec_vsrab */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsrab(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsrab(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +/* vec_vsrah */ + +static __inline__ vector short __ATTRS_o_ai +vec_vsrah(vector short __a, vector unsigned short __b) { + return __builtin_altivec_vsrah(__a, (vector unsigned short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsrah(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b); +} + +/* vec_vsraw */ + +static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vsraw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsraw(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b); +} + +/* vec_srl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_srl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_srl(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_srl(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_srl(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_srl(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_srl(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_srl(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_srl(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_srl(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_srl(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_srl(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_srl(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_srl(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_srl(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vsr */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsr(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsr(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsr(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsr(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsr(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsr(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsr(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsr(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsr(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsr(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsr(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsr(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +/* vec_sro */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sro(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sro(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sro(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sro(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sro(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sro(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sro(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sro(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sro(vector signed long long __a, vector signed char __b) { + return (vector signed long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sro(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sro(vector unsigned long long __a, vector signed char __b) { + return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sro(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vsro */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsro(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsro(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsro(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsro(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsro(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsro(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsro(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsro(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +/* vec_st */ + +static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, long __b, + vector signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, long __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, long __b, + vector unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b, + vector bool char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector short __a, long __b, + vector short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector short __a, long __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, long __b, + vector unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b, + vector bool short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b, + vector pixel *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector int __a, long __b, + vector int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector int __a, long __b, int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, long __b, + vector unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b, + int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b, + vector bool int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector float __a, long __b, + vector float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector float __a, long __b, + float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +/* vec_stvx */ + +static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, long __b, + vector signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, long __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, long __b, + vector unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b, + vector bool char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, long __b, + vector short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, long __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, long __b, + vector unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b, + vector bool short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b, + vector pixel *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, long __b, + vector int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, long __b, + int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, long __b, + vector unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b, + int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b, + vector bool int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, long __b, + vector float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, long __b, + float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +/* vec_ste */ + +static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, long __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, long __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, long __b, + short *__c) { + __builtin_altivec_stvehx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, long __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, long __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, long __b, int *__c) { + __builtin_altivec_stvewx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, long __b, + int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, long __b, + float *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +/* vec_stvebx */ + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, long __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a, + long __b, unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, long __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, long __b, + unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +/* vec_stvehx */ + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, long __b, + short *__c) { + __builtin_altivec_stvehx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a, + long __b, unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, long __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, long __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, long __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +/* vec_stvewx */ + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, long __b, + int *__c) { + __builtin_altivec_stvewx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, long __b, + int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, long __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, long __b, + float *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +/* vec_stl */ + +static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b, + vector unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b, + vector short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b, + vector unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, + vector pixel *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, + vector int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b, + vector float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +/* vec_stvxl */ + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b, + vector unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, + vector short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, + int __b, unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, + vector pixel *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, + vector int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, + int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, + vector float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +/* vec_sub */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sub(vector signed char __a, vector signed char __b) { + return __a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sub(vector bool char __a, vector signed char __b) { + return (vector signed char)__a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sub(vector signed char __a, vector bool char __b) { + return __a - (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sub(vector unsigned char __a, vector unsigned char __b) { + return __a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sub(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sub(vector unsigned char __a, vector bool char __b) { + return __a - (vector unsigned char)__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a, + vector short __b) { + return __a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a, + vector short __b) { + return (vector short)__a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a, + vector bool short __b) { + return __a - (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sub(vector unsigned short __a, vector unsigned short __b) { + return __a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sub(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sub(vector unsigned short __a, vector bool short __b) { + return __a - (vector unsigned short)__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a, + vector int __b) { + return __a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a, + vector int __b) { + return (vector int)__a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a, + vector bool int __b) { + return __a - (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sub(vector unsigned int __a, vector unsigned int __b) { + return __a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sub(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sub(vector unsigned int __a, vector bool int __b) { + return __a - (vector unsigned int)__b; +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sub(vector signed __int128 __a, vector signed __int128 __b) { + return __a - __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a - __b; +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) && + // defined(__SIZEOF_INT128__) + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sub(vector signed long long __a, vector signed long long __b) { + return __a - __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sub(vector unsigned long long __a, vector unsigned long long __b) { + return __a - __b; +} + +static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a, + vector double __b) { + return __a - __b; +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a, + vector float __b) { + return __a - __b; +} + +/* vec_vsububm */ + +#define __builtin_altivec_vsububm vec_vsububm + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsububm(vector signed char __a, vector signed char __b) { + return __a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsububm(vector bool char __a, vector signed char __b) { + return (vector signed char)__a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsububm(vector signed char __a, vector bool char __b) { + return __a - (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububm(vector unsigned char __a, vector unsigned char __b) { + return __a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububm(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububm(vector unsigned char __a, vector bool char __b) { + return __a - (vector unsigned char)__b; +} + +/* vec_vsubuhm */ + +#define __builtin_altivec_vsubuhm vec_vsubuhm + +static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a, + vector short __b) { + return __a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a, + vector short __b) { + return (vector short)__a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a, + vector bool short __b) { + return __a - (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) { + return __a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector unsigned short __a, vector bool short __b) { + return __a - (vector unsigned short)__b; +} + +/* vec_vsubuwm */ + +#define __builtin_altivec_vsubuwm vec_vsubuwm + +static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, + vector int __b) { + return __a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a, + vector int __b) { + return (vector int)__a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, + vector bool int __b) { + return __a - (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) { + return __a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector unsigned int __a, vector bool int __b) { + return __a - (vector unsigned int)__b; +} + +/* vec_vsubfp */ + +#define __builtin_altivec_vsubfp vec_vsubfp + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vsubfp(vector float __a, vector float __b) { + return __a - __b; +} + +/* vec_subc */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_subc(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a, + (vector unsigned int) __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subc(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubcuw(__a, __b); +} + +#ifdef __POWER8_VECTOR__ +#ifdef __SIZEOF_INT128__ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_subc(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_subc_u128(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b); +} +#endif // __POWER8_VECTOR__ + +/* vec_vsubcuw */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubcuw(__a, __b); +} + +/* vec_subs */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_subs(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_subs(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_subs(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vsubsbs(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_subs(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_subs(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_subs(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vsububs(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a, + vector short __b) { + return __builtin_altivec_vsubshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a, + vector short __b) { + return __builtin_altivec_vsubshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a, + vector bool short __b) { + return __builtin_altivec_vsubshs(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_subs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_subs(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_subs(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a, + vector int __b) { + return __builtin_altivec_vsubsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a, + vector int __b) { + return __builtin_altivec_vsubsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a, + vector bool int __b) { + return __builtin_altivec_vsubsws(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subs(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subs(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subs(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b); +} + +/* vec_vsubsbs */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsubsbs(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsubsbs(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsubsbs(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vsubsbs(__a, (vector signed char)__b); +} + +/* vec_vsububs */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububs(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububs(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububs(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vsububs(__a, (vector unsigned char)__b); +} + +/* vec_vsubshs */ + +static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a, + vector short __b) { + return __builtin_altivec_vsubshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a, + vector short __b) { + return __builtin_altivec_vsubshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a, + vector bool short __b) { + return __builtin_altivec_vsubshs(__a, (vector short)__b); +} + +/* vec_vsubuhs */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhs(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhs(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b); +} + +/* vec_vsubsws */ + +static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a, + vector int __b) { + return __builtin_altivec_vsubsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a, + vector int __b) { + return __builtin_altivec_vsubsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a, + vector bool int __b) { + return __builtin_altivec_vsubsws(__a, (vector int)__b); +} + +/* vec_vsubuws */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuws(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuws(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuws(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +/* vec_vsubuqm */ + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) { + return __a - __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a - __b; +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_sub_u128(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsubuqm(__a, __b); +} + +/* vec_vsubeuqm */ + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sube(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_sube_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c); +} + +/* vec_vsubcuq */ + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} + +/* vec_vsubecuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} +#endif + +#ifdef __powerpc64__ +static __inline__ vector signed int __ATTRS_o_ai +vec_subec(vector signed int __a, vector signed int __b, + vector signed int __c) { + return vec_addec(__a, ~__b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subec(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return vec_addec(__a, ~__b, __c); +} +#endif + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_subec(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} +#endif + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_subec_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c); +} +#endif // __POWER8_VECTOR__ + +static __inline__ vector signed int __ATTRS_o_ai +vec_sube(vector signed int __a, vector signed int __b, + vector signed int __c) { + vector signed int __mask = {1, 1, 1, 1}; + vector signed int __carry = __c & __mask; + return vec_adde(__a, ~__b, __carry); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sube(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + vector unsigned int __mask = {1, 1, 1, 1}; + vector unsigned int __carry = __c & __mask; + return vec_adde(__a, ~__b, __carry); +} +/* vec_sum4s */ + +static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a, + vector int __b) { + return __builtin_altivec_vsum4sbs(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sum4s(vector unsigned char __a, vector unsigned int __b) { + return __builtin_altivec_vsum4ubs(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a, + vector int __b) { + return __builtin_altivec_vsum4shs(__a, __b); +} + +/* vec_vsum4sbs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vsum4sbs(vector signed char __a, vector int __b) { + return __builtin_altivec_vsum4sbs(__a, __b); +} + +/* vec_vsum4ubs */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) { + return __builtin_altivec_vsum4ubs(__a, __b); +} + +/* vec_vsum4shs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vsum4shs(vector signed short __a, vector int __b) { + return __builtin_altivec_vsum4shs(__a, __b); +} + +/* vec_sum2s */ + +/* The vsum2sws instruction has a big-endian bias, so that the second + input vector and the result always reference big-endian elements + 1 and 3 (little-endian element 0 and 2). For ease of porting the + programmer wants elements 1 and 3 in both cases, so for little + endian we must perform some permutes. */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_sum2s(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + vector int __c = (vector signed int)vec_perm( + __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); + __c = __builtin_altivec_vsum2sws(__a, __c); + return (vector signed int)vec_perm( + __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); +#else + return __builtin_altivec_vsum2sws(__a, __b); +#endif +} + +/* vec_vsum2sws */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_vsum2sws(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + vector int __c = (vector signed int)vec_perm( + __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); + __c = __builtin_altivec_vsum2sws(__a, __c); + return (vector signed int)vec_perm( + __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); +#else + return __builtin_altivec_vsum2sws(__a, __b); +#endif +} + +/* vec_sums */ + +/* The vsumsws instruction has a big-endian bias, so that the second + input vector and the result always reference big-endian element 3 + (little-endian element 0). For ease of porting the programmer + wants element 3 in both cases, so for little endian we must perform + some permutes. */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_sums(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + __b = (vector signed int)vec_splat(__b, 3); + __b = __builtin_altivec_vsumsws(__a, __b); + return (vector signed int)(0, 0, 0, __b[0]); +#else + return __builtin_altivec_vsumsws(__a, __b); +#endif +} + +/* vec_vsumsws */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_vsumsws(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + __b = (vector signed int)vec_splat(__b, 3); + __b = __builtin_altivec_vsumsws(__a, __b); + return (vector signed int)(0, 0, 0, __b[0]); +#else + return __builtin_altivec_vsumsws(__a, __b); +#endif +} + +/* vec_trunc */ + +static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspiz(__a); +#else + return __builtin_altivec_vrfiz(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) { + return __builtin_vsx_xvrdpiz(__a); +} +#endif + +/* vec_roundz */ +static __inline__ vector float __ATTRS_o_ai vec_roundz(vector float __a) { + return vec_trunc(__a); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_roundz(vector double __a) { + return vec_trunc(__a); +} +#endif + +/* vec_vrfiz */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfiz(vector float __a) { + return __builtin_altivec_vrfiz(__a); +} + +/* vec_unpackh */ + +/* The vector unpack instructions all have a big-endian bias, so for + little endian we must reverse the meanings of "high" and "low." */ +#ifdef __LITTLE_ENDIAN__ +#define vec_vupkhpx(__a) __builtin_altivec_vupklpx((vector short)(__a)) +#define vec_vupklpx(__a) __builtin_altivec_vupkhpx((vector short)(__a)) +#else +#define vec_vupkhpx(__a) __builtin_altivec_vupkhpx((vector short)(__a)) +#define vec_vupklpx(__a) __builtin_altivec_vupklpx((vector short)(__a)) +#endif + +static __inline__ vector short __ATTRS_o_ai +vec_unpackh(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsb((vector char)__a); +#else + return __builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_unpackh(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsh(__a); +#else + return __builtin_altivec_vupkhsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_unpackh(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unpackh(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsw(__a); +#else + return __builtin_altivec_vupkhsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_unpackh(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_unpackh(vector float __a) { + return (vector double)(__a[0], __a[1]); +} +#endif + +/* vec_vupkhsb */ + +static __inline__ vector short __ATTRS_o_ai +vec_vupkhsb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsb((vector char)__a); +#else + return __builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vupkhsb(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +/* vec_vupkhsh */ + +static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsh(__a); +#else + return __builtin_altivec_vupkhsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vupkhsh(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vupkhsh(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#endif +} + +/* vec_vupkhsw */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsw(__a); +#else + return __builtin_altivec_vupkhsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vupkhsw(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#endif +} +#endif + +/* vec_unpackl */ + +static __inline__ vector short __ATTRS_o_ai +vec_unpackl(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsb((vector char)__a); +#else + return __builtin_altivec_vupklsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_unpackl(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsh(__a); +#else + return __builtin_altivec_vupklsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_unpackl(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unpackl(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsw(__a); +#else + return __builtin_altivec_vupklsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_unpackl(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_unpackl(vector float __a) { + return (vector double)(__a[2], __a[3]); +} +#endif + +/* vec_vupklsb */ + +static __inline__ vector short __ATTRS_o_ai +vec_vupklsb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsb((vector char)__a); +#else + return __builtin_altivec_vupklsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vupklsb(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#endif +} + +/* vec_vupklsh */ + +static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsh(__a); +#else + return __builtin_altivec_vupklsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vupklsh(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vupklsh(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#endif +} + +/* vec_vupklsw */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsw(__a); +#else + return __builtin_altivec_vupklsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vupklsw(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#endif +} +#endif + +/* vec_vsx_ld */ + +#ifdef __VSX__ + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector bool int *__b) { + return (vector bool int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed int *__b) { + return (vector signed int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_vsx_ld(int __a, const signed int *__b) { + return (vector signed int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsx_ld(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_vsx_ld(int __a, const vector float *__b) { + return (vector float)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a, + const float *__b) { + return (vector float)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed long long *__b) { + return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned long long *__b) { + return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_vsx_ld(int __a, const vector double *__b) { + return (vector double)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_vsx_ld(int __a, const double *__b) { + return (vector double)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsx_ld(int __a, const vector bool short *__b) { + return (vector bool short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed short *__b) { + return (vector signed short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_vsx_ld(int __a, const signed short *__b) { + return (vector signed short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsx_ld(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsx_ld(int __a, const vector bool char *__b) { + return (vector bool char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed char *__b) { + return (vector signed char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsx_ld(int __a, const signed char *__b) { + return (vector signed char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsx_ld(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b); +} + +#endif + +/* vec_vsx_st */ + +#ifdef __VSX__ + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b, + signed int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b, + vector signed int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b, + signed int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b, + vector float *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b, + float *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a, + int __b, + vector signed long long *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a, + int __b, + vector unsigned long long *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b, + vector double *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b, + double *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b, + signed short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b, + vector signed short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b, + signed short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a, + int __b, unsigned short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b, + signed char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b, + signed char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a, + int __b, + vector unsigned char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a, + int __b, unsigned char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +#endif + +#ifdef __VSX__ +#define vec_xxpermdi __builtin_vsx_xxpermdi +#define vec_xxsldwi __builtin_vsx_xxsldwi +#define vec_permi(__a, __b, __c) \ + _Generic((__a), vector signed long long \ + : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1), \ + (((__c)&0x1) + 2)), \ + vector unsigned long long \ + : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1), \ + (((__c)&0x1) + 2)), \ + vector double \ + : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1), \ + (((__c)&0x1) + 2))) +#endif + +/* vec_xor */ + +#define __builtin_altivec_vxor vec_xor + +static __inline__ vector signed char __ATTRS_o_ai +vec_xor(vector signed char __a, vector signed char __b) { + return __a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_xor(vector bool char __a, vector signed char __b) { + return (vector signed char)__a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_xor(vector signed char __a, vector bool char __b) { + return __a ^ (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char __a, vector unsigned char __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char __a, vector bool char __b) { + return __a ^ (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a, + vector bool char __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a, + vector short __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a, + vector short __b) { + return (vector short)__a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a, + vector bool short __b) { + return __a ^ (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xor(vector unsigned short __a, vector unsigned short __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xor(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xor(vector unsigned short __a, vector bool short __b) { + return __a ^ (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_xor(vector bool short __a, vector bool short __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a, + vector int __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a, + vector int __b) { + return (vector int)__a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a, + vector bool int __b) { + return __a ^ (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xor(vector unsigned int __a, vector unsigned int __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xor(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xor(vector unsigned int __a, vector bool int __b) { + return __a ^ (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a, + vector bool int __b) { + return __a ^ __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_xor(vector signed long long __a, vector signed long long __b) { + return __a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xor(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xor(vector signed long long __a, vector bool long long __b) { + return __a ^ (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xor(vector unsigned long long __a, vector unsigned long long __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xor(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xor(vector unsigned long long __a, vector bool long long __b) { + return __a ^ (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_xor(vector bool long long __a, vector bool long long __b) { + return __a ^ __b; +} + +static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a, + vector double __b) { + return (vector double)((vector unsigned long long)__a ^ + (vector unsigned long long)__b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_xor(vector double __a, vector bool long long __b) { + return (vector double)((vector unsigned long long)__a ^ + (vector unsigned long long)__b); +} + +static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a, + vector double __b) { + return (vector double)((vector unsigned long long)__a ^ + (vector unsigned long long)__b); +} +#endif + +/* vec_vxor */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vxor(vector signed char __a, vector signed char __b) { + return __a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vxor(vector bool char __a, vector signed char __b) { + return (vector signed char)__a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vxor(vector signed char __a, vector bool char __b) { + return __a ^ (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vxor(vector unsigned char __a, vector unsigned char __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vxor(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vxor(vector unsigned char __a, vector bool char __b) { + return __a ^ (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a, + vector bool char __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a, + vector short __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a, + vector short __b) { + return (vector short)__a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a, + vector bool short __b) { + return __a ^ (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vxor(vector unsigned short __a, vector unsigned short __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vxor(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vxor(vector unsigned short __a, vector bool short __b) { + return __a ^ (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vxor(vector bool short __a, vector bool short __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a, + vector int __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a, + vector int __b) { + return (vector int)__a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a, + vector bool int __b) { + return __a ^ (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vxor(vector unsigned int __a, vector unsigned int __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vxor(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vxor(vector unsigned int __a, vector bool int __b) { + return __a ^ (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a, + vector bool int __b) { + return __a ^ __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vxor(vector signed long long __a, vector signed long long __b) { + return __a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vxor(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vxor(vector signed long long __a, vector bool long long __b) { + return __a ^ (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vxor(vector unsigned long long __a, vector unsigned long long __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vxor(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vxor(vector unsigned long long __a, vector bool long long __b) { + return __a ^ (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vxor(vector bool long long __a, vector bool long long __b) { + return __a ^ __b; +} +#endif + +/* ------------------------ extensions for CBEA ----------------------------- */ + +/* vec_extract */ + +static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a, + unsigned int __b) { + return __a[__b & 0xf]; +} + +static __inline__ unsigned char __ATTRS_o_ai +vec_extract(vector unsigned char __a, unsigned int __b) { + return __a[__b & 0xf]; +} + +static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a, + unsigned int __b) { + return __a[__b & 0xf]; +} + +static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a, + unsigned int __b) { + return __a[__b & 0x7]; +} + +static __inline__ unsigned short __ATTRS_o_ai +vec_extract(vector unsigned short __a, unsigned int __b) { + return __a[__b & 0x7]; +} + +static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a, + unsigned int __b) { + return __a[__b & 0x7]; +} + +static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a, + unsigned int __b) { + return __a[__b & 0x3]; +} + +static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, + unsigned int __b) { + return __a[__b & 0x3]; +} + +static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, + unsigned int __b) { + return __a[__b & 0x3]; +} + +#ifdef __VSX__ +static __inline__ signed long long __ATTRS_o_ai +vec_extract(vector signed long long __a, unsigned int __b) { + return __a[__b & 0x1]; +} + +static __inline__ unsigned long long __ATTRS_o_ai +vec_extract(vector unsigned long long __a, unsigned int __b) { + return __a[__b & 0x1]; +} + +static __inline__ unsigned long long __ATTRS_o_ai +vec_extract(vector bool long long __a, unsigned int __b) { + return __a[__b & 0x1]; +} + +static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, + unsigned int __b) { + return __a[__b & 0x1]; +} +#endif + +static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, + unsigned int __b) { + return __a[__b & 0x3]; +} + +#ifdef __POWER9_VECTOR__ + +#define vec_insert4b __builtin_vsx_insertword +#define vec_extract4b __builtin_vsx_extractuword + +/* vec_extract_exp */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_extract_exp(vector float __a) { + return __builtin_vsx_xvxexpsp(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extract_exp(vector double __a) { + return __builtin_vsx_xvxexpdp(__a); +} + +/* vec_extract_sig */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_extract_sig(vector float __a) { + return __builtin_vsx_xvxsigsp(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extract_sig (vector double __a) { + return __builtin_vsx_xvxsigdp(__a); +} + +static __inline__ vector float __ATTRS_o_ai +vec_extract_fp32_from_shorth(vector unsigned short __a) { + vector unsigned short __b = +#ifdef __LITTLE_ENDIAN__ + __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1); +#else + __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3); +#endif + return __builtin_vsx_xvcvhpsp(__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_extract_fp32_from_shortl(vector unsigned short __a) { + vector unsigned short __b = +#ifdef __LITTLE_ENDIAN__ + __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1); +#else + __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7); +#endif + return __builtin_vsx_xvcvhpsp(__b); +} +#endif /* __POWER9_VECTOR__ */ + +/* vec_insert */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_insert(signed char __a, vector signed char __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_insert(unsigned char __a, vector unsigned char __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a, + vector bool char __b, + int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_insert(signed short __a, vector signed short __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_insert(unsigned short __a, vector unsigned short __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_insert(unsigned short __a, vector bool short __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_insert(signed int __a, vector signed int __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_insert(unsigned int __a, vector unsigned int __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a, + vector bool int __b, + int __c) { + __b[__c] = __a; + return __b; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_insert(signed long long __a, vector signed long long __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_insert(unsigned long long __a, vector bool long long __b, int __c) { + __b[__c] = __a; + return __b; +} +static __inline__ vector double __ATTRS_o_ai vec_insert(double __a, + vector double __b, + int __c) { + __b[__c] = __a; + return __b; +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_insert(float __a, + vector float __b, + int __c) { + __b[__c] = __a; + return __b; +} + +/* vec_lvlx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlx(int __a, const signed char *__b) { + return vec_perm(vec_ld(__a, __b), (vector signed char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlx(int __a, const vector signed char *__b) { + return vec_perm(vec_ld(__a, __b), (vector signed char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlx(int __a, const unsigned char *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlx(int __a, const vector unsigned char *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvlx(int __a, const vector bool char *__b) { + return vec_perm(vec_ld(__a, __b), (vector bool char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a, + const short *__b) { + return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a, + const vector short *__b) { + return vec_perm(vec_ld(__a, __b), (vector short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlx(int __a, const unsigned short *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlx(int __a, const vector unsigned short *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvlx(int __a, const vector bool short *__b) { + return vec_perm(vec_ld(__a, __b), (vector bool short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a, + const vector pixel *__b) { + return vec_perm(vec_ld(__a, __b), (vector pixel)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) { + return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, + const vector int *__b) { + return vec_perm(vec_ld(__a, __b), (vector int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlx(int __a, const unsigned int *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlx(int __a, const vector unsigned int *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvlx(int __a, const vector bool int *__b) { + return vec_perm(vec_ld(__a, __b), (vector bool int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a, + const float *__b) { + return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a, + const vector float *__b) { + return vec_perm(vec_ld(__a, __b), (vector float)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_lvlxl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlxl(int __a, const signed char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector signed char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlxl(int __a, const vector signed char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector signed char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlxl(int __a, const unsigned char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlxl(int __a, const vector unsigned char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvlxl(int __a, const vector bool char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector bool char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a, + const short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a, + const vector short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlxl(int __a, const unsigned short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlxl(int __a, const vector unsigned short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvlxl(int __a, const vector bool short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector bool short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a, + const vector pixel *__b) { + return vec_perm(vec_ldl(__a, __b), (vector pixel)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, + const vector int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlxl(int __a, const unsigned int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlxl(int __a, const vector unsigned int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvlxl(int __a, const vector bool int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector bool int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a, + const float *__b) { + return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a, + vector float *__b) { + return vec_perm(vec_ldl(__a, __b), (vector float)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_lvrx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrx(int __a, const signed char *__b) { + return vec_perm((vector signed char)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrx(int __a, const vector signed char *__b) { + return vec_perm((vector signed char)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrx(int __a, const unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrx(int __a, const vector unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvrx(int __a, const vector bool char *__b) { + return vec_perm((vector bool char)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a, + const short *__b) { + return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a, + const vector short *__b) { + return vec_perm((vector short)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrx(int __a, const unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrx(int __a, const vector unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvrx(int __a, const vector bool short *__b) { + return vec_perm((vector bool short)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a, + const vector pixel *__b) { + return vec_perm((vector pixel)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) { + return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, + const vector int *__b) { + return vec_perm((vector int)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrx(int __a, const unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrx(int __a, const vector unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvrx(int __a, const vector bool int *__b) { + return vec_perm((vector bool int)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a, + const float *__b) { + return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a, + const vector float *__b) { + return vec_perm((vector float)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_lvrxl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrxl(int __a, const signed char *__b) { + return vec_perm((vector signed char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrxl(int __a, const vector signed char *__b) { + return vec_perm((vector signed char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrxl(int __a, const unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrxl(int __a, const vector unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvrxl(int __a, const vector bool char *__b) { + return vec_perm((vector bool char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a, + const short *__b) { + return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a, + const vector short *__b) { + return vec_perm((vector short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrxl(int __a, const unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrxl(int __a, const vector unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvrxl(int __a, const vector bool short *__b) { + return vec_perm((vector bool short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a, + const vector pixel *__b) { + return vec_perm((vector pixel)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) { + return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, + const vector int *__b) { + return vec_perm((vector int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrxl(int __a, const unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrxl(int __a, const vector unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvrxl(int __a, const vector bool int *__b) { + return vec_perm((vector bool int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a, + const float *__b) { + return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a, + const vector float *__b) { + return vec_perm((vector float)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_stvlx */ + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b, + signed char *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b, + vector signed char *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b, + unsigned char *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b, + vector unsigned char *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b, + vector bool char *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, + short *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, + vector short *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b, + vector bool short *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b, + vector pixel *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, + int *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, + vector int *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b, + vector bool int *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b, + vector float *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_stvlxl */ + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b, + signed char *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b, + vector signed char *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, + int __b, unsigned char *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, + int __b, + vector unsigned char *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b, + vector bool char *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, + short *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, + vector short *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b, + vector bool short *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b, + vector pixel *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, + int *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, + vector int *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b, + vector bool int *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b, + vector float *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_stvrx */ + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b, + signed char *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b, + vector signed char *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b, + unsigned char *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b, + vector unsigned char *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b, + vector bool char *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, + short *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, + vector short *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b, + vector bool short *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b, + vector pixel *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, + int *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, + vector int *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b, + vector bool int *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b, + vector float *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_stvrxl */ + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b, + signed char *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b, + vector signed char *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, + int __b, unsigned char *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, + int __b, + vector unsigned char *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b, + vector bool char *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, + short *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, + vector short *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b, + vector bool short *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b, + vector pixel *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, + int *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, + vector int *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b, + vector bool int *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b, + vector float *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_promote */ + +static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a, + int __b) { + vector signed char __res = (vector signed char)(0); + __res[__b & 0x7] = __a; + return __res; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_promote(unsigned char __a, int __b) { + vector unsigned char __res = (vector unsigned char)(0); + __res[__b & 0x7] = __a; + return __res; +} + +static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) { + vector short __res = (vector short)(0); + __res[__b & 0x7] = __a; + return __res; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_promote(unsigned short __a, int __b) { + vector unsigned short __res = (vector unsigned short)(0); + __res[__b & 0x7] = __a; + return __res; +} + +static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) { + vector int __res = (vector int)(0); + __res[__b & 0x3] = __a; + return __res; +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a, + int __b) { + vector unsigned int __res = (vector unsigned int)(0); + __res[__b & 0x3] = __a; + return __res; +} + +static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) { + vector float __res = (vector float)(0); + __res[__b & 0x3] = __a; + return __res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_promote(double __a, int __b) { + vector double __res = (vector double)(0); + __res[__b & 0x1] = __a; + return __res; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_promote(signed long long __a, int __b) { + vector signed long long __res = (vector signed long long)(0); + __res[__b & 0x1] = __a; + return __res; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_promote(unsigned long long __a, int __b) { + vector unsigned long long __res = (vector unsigned long long)(0); + __res[__b & 0x1] = __a; + return __res; +} +#endif + +/* vec_splats */ + +static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) { + return (vector signed char)(__a); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_splats(unsigned char __a) { + return (vector unsigned char)(__a); +} + +static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) { + return (vector short)(__a); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_splats(unsigned short __a) { + return (vector unsigned short)(__a); +} + +static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) { + return (vector int)(__a); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_splats(unsigned int __a) { + return (vector unsigned int)(__a); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_splats(signed long long __a) { + return (vector signed long long)(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_splats(unsigned long long __a) { + return (vector unsigned long long)(__a); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_splats(signed __int128 __a) { + return (vector signed __int128)(__a); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_splats(unsigned __int128 __a) { + return (vector unsigned __int128)(__a); +} + +#endif + +static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) { + return (vector double)(__a); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) { + return (vector float)(__a); +} + +/* ----------------------------- predicates --------------------------------- */ + +/* vec_all_eq */ + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a, + vector long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b); +} +#endif + +/* vec_all_ge */ + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, (vector signed char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, (vector signed short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, (vector signed int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a); +} +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, + (vector signed long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __b, __a); +} +#endif + +/* vec_all_gt */ + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector signed short)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector signed int)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b); +} +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __a, __b); +} +#endif + +/* vec_all_in */ + +static __inline__ int __attribute__((__always_inline__)) +vec_all_in(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b); +} + +/* vec_all_le */ + +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector signed short)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector signed int)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_lt */ + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, (vector signed char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, (vector signed short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, (vector signed int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, + (vector signed long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __b, __a); +} +#endif + +/* vec_all_nan */ + +static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a); +} +#endif + +/* vec_all_ne */ + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_nge */ + +static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_ngt */ + +static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_nle */ + +static __inline__ int __ATTRS_o_ai +vec_all_nle(vector float __a, vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __b, __a); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_nle(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __b, __a); +} +#endif + +/* vec_all_nlt */ + +static __inline__ int __ATTRS_o_ai +vec_all_nlt(vector float __a, vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __b, __a); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_nlt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __b, __a); +} +#endif + +/* vec_all_numeric */ + +static __inline__ int __ATTRS_o_ai +vec_all_numeric(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __a); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_numeric(vector double __a) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __a); +} +#endif + +/* vec_any_eq */ + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b); +} +#endif + +/* vec_any_ge */ + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, + (vector signed char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, + (vector signed short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, + (vector signed int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, + (vector signed long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, + (vector signed long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __b, __a); +} +#endif + +/* vec_any_gt */ + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, + (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector signed short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector signed int)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, + (vector signed long long)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __a, __b); +} +#endif + +/* vec_any_le */ + +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, + (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector signed short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector signed int)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, + (vector signed long long)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __a, __b); +} +#endif + +/* vec_any_lt */ + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, + (vector signed char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, + (vector signed short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, + (vector signed int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, + (vector signed long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, + (vector signed long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __b, __a); +} +#endif + +/* vec_any_nan */ + +static __inline__ int __ATTRS_o_ai vec_any_nan(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __a); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a); +#endif +} +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_nan(vector double __a) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __a); +} +#endif + +/* vec_any_ne */ + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b); +} +#endif + +#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__) +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b); +} +#endif + +/* vec_any_nge */ + +static __inline__ int __ATTRS_o_ai vec_any_nge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_LT_REV, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_nge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_LT_REV, __a, __b); +} +#endif + +/* vec_any_ngt */ + +static __inline__ int __ATTRS_o_ai vec_any_ngt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_LT_REV, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ngt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_LT_REV, __a, __b); +} +#endif + +/* vec_any_nle */ + +static __inline__ int __ATTRS_o_ai vec_any_nle(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_LT_REV, __b, __a); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_nle(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_LT_REV, __b, __a); +} +#endif + +/* vec_any_nlt */ + +static __inline__ int __ATTRS_o_ai vec_any_nlt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_LT_REV, __b, __a); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_nlt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_LT_REV, __b, __a); +} +#endif + +/* vec_any_numeric */ + +static __inline__ int __ATTRS_o_ai vec_any_numeric(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __a); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_numeric(vector double __a) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __a); +} +#endif + +/* vec_any_out */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_out(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b); +} + +/* Power 8 Crypto functions +Note: We diverge from the current GCC implementation with regard +to cryptography and related functions as follows: +- Only the SHA and AES instructions and builtins are disabled by -mno-crypto +- The remaining ones are only available on Power8 and up so + require -mpower8-vector +The justification for this is that export requirements require that +Category:Vector.Crypto is optional (i.e. compliant hardware may not provide +support). As a result, we need to be able to turn off support for those. +The remaining ones (currently controlled by -mcrypto for GCC) still +need to be provided on compliant hardware even if Vector.Crypto is not +provided. +*/ +#ifdef __CRYPTO__ +#define vec_sbox_be __builtin_altivec_crypto_vsbox +#define vec_cipher_be __builtin_altivec_crypto_vcipher +#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast +#define vec_ncipher_be __builtin_altivec_crypto_vncipher +#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vsbox(vector unsigned long long __a) { + return __builtin_altivec_crypto_vsbox(__a); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vcipher(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vcipher(__a, __b); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vcipherlast(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vcipherlast(__a, __b); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vncipher(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vncipher(__a, __b); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vncipherlast(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vncipherlast(__a, __b); +} + +#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad +#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw + +#define vec_shasigma_be(X, Y, Z) \ + _Generic((X), vector unsigned int \ + : __builtin_crypto_vshasigmaw, vector unsigned long long \ + : __builtin_crypto_vshasigmad)((X), (Y), (Z)) +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool char __ATTRS_o_ai +vec_permxor(vector bool char __a, vector bool char __b, + vector bool char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_permxor(vector signed char __a, vector signed char __b, + vector signed char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_permxor(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (vector unsigned short)__builtin_altivec_crypto_vpermxor( + (vector unsigned char)__a, (vector unsigned char)__b, + (vector unsigned char)__c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return (vector unsigned int)__builtin_altivec_crypto_vpermxor( + (vector unsigned char)__a, (vector unsigned char)__b, + (vector unsigned char)__c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +__builtin_crypto_vpermxor(vector unsigned long long __a, + vector unsigned long long __b, + vector unsigned long long __c) { + return (vector unsigned long long)__builtin_altivec_crypto_vpermxor( + (vector unsigned char)__a, (vector unsigned char)__b, + (vector unsigned char)__c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_crypto_vpmsumb(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_crypto_vpmsumh(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_crypto_vpmsumw(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vpmsumd(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vgbbd(vector signed char __a) { + return __builtin_altivec_vgbbd((vector unsigned char)__a); +} + +#define vec_pmsum_be __builtin_crypto_vpmsumb +#define vec_gb __builtin_altivec_vgbbd + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vgbbd(vector unsigned char __a) { + return __builtin_altivec_vgbbd(__a); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_gbb(vector signed long long __a) { + return __builtin_altivec_vgbbd((vector unsigned char)__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_gbb(vector unsigned long long __a) { + return __builtin_altivec_vgbbd((vector unsigned char)__a); +} + +static __inline__ vector long long __ATTRS_o_ai +vec_vbpermq(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vbpermq((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ vector long long __ATTRS_o_ai +vec_vbpermq(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vbpermq(__a, __b); +} + +#if defined(__powerpc64__) && defined(__SIZEOF_INT128__) +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) { + return __builtin_altivec_vbpermq((vector unsigned char)__a, + (vector unsigned char)__b); +} +#endif +#endif + + +/* vec_reve */ + +static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) { + return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) { + return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_reve(vector unsigned char __a) { + return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_reve(vector unsigned int __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) { + return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector signed short +vec_reve(vector signed short __a) { + return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_reve(vector unsigned short __a) { + return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector float vec_reve(vector float __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +#ifdef __VSX__ +static inline __ATTRS_o_ai vector bool long long +vec_reve(vector bool long long __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} + +static inline __ATTRS_o_ai vector signed long long +vec_reve(vector signed long long __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_reve(vector unsigned long long __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} + +static inline __ATTRS_o_ai vector double vec_reve(vector double __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} +#endif + +/* vec_revb */ +static __inline__ vector bool char __ATTRS_o_ai +vec_revb(vector bool char __a) { + return __a; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_revb(vector signed char __a) { + return __a; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_revb(vector unsigned char __a) { + return __a; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_revb(vector bool short __a) { + vector unsigned char __indices = + { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_revb(vector signed short __a) { + vector unsigned char __indices = + { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_revb(vector unsigned short __a) { + vector unsigned char __indices = + { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_revb(vector bool int __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_revb(vector signed int __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_revb(vector unsigned int __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector float __ATTRS_o_ai +vec_revb(vector float __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_revb(vector bool long long __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_revb(vector signed long long __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_revb(vector unsigned long long __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector double __ATTRS_o_ai +vec_revb(vector double __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} +#endif /* End __VSX__ */ + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_revb(vector signed __int128 __a) { + vector unsigned char __indices = + { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; + return (vector signed __int128)vec_perm((vector signed int)__a, + (vector signed int)__a, + __indices); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_revb(vector unsigned __int128 __a) { + vector unsigned char __indices = + { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; + return (vector unsigned __int128)vec_perm((vector signed int)__a, + (vector signed int)__a, + __indices); +} +#endif /* END __POWER8_VECTOR__ && __powerpc64__ */ + +/* vec_xl */ + +#define vec_xld2 vec_xl +#define vec_xlw4 vec_xl +typedef vector signed char unaligned_vec_schar __attribute__((aligned(1))); +typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1))); +typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1))); +typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1))); +typedef vector signed int unaligned_vec_sint __attribute__((aligned(1))); +typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1))); +typedef vector float unaligned_vec_float __attribute__((aligned(1))); + +static inline __ATTRS_o_ai vector signed char vec_xl(ptrdiff_t __offset, + const signed char *__ptr) { + return *(unaligned_vec_schar *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_xl(ptrdiff_t __offset, const unsigned char *__ptr) { + return *(unaligned_vec_uchar*)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed short +vec_xl(ptrdiff_t __offset, const signed short *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_sshort *)__addr; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_xl(ptrdiff_t __offset, const unsigned short *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_ushort *)__addr; +} + +static inline __ATTRS_o_ai vector signed int vec_xl(ptrdiff_t __offset, + const signed int *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_sint *)__addr; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_xl(ptrdiff_t __offset, const unsigned int *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_uint *)__addr; +} + +static inline __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset, + const float *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_float *)__addr; +} + +#ifdef __VSX__ +typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1))); +typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1))); +typedef vector double unaligned_vec_double __attribute__((aligned(1))); + +static inline __ATTRS_o_ai vector signed long long +vec_xl(ptrdiff_t __offset, const signed long long *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_sll *)__addr; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_xl(ptrdiff_t __offset, const unsigned long long *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_ull *)__addr; +} + +static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset, + const double *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_double *)__addr; +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1))); +typedef vector unsigned __int128 unaligned_vec_ui128 + __attribute__((aligned(1))); +static inline __ATTRS_o_ai vector signed __int128 +vec_xl(ptrdiff_t __offset, const signed __int128 *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_si128 *)__addr; +} + +static inline __ATTRS_o_ai vector unsigned __int128 +vec_xl(ptrdiff_t __offset, const unsigned __int128 *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + return *(unaligned_vec_ui128 *)__addr; +} +#endif + +/* vec_xl_be */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector signed char __ATTRS_o_ai +vec_xl_be(ptrdiff_t __offset, const signed char *__ptr) { + vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xl_be(ptrdiff_t __offset, const unsigned char *__ptr) { + vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_xl_be(ptrdiff_t __offset, const signed short *__ptr) { + vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xl_be(ptrdiff_t __offset, const unsigned short *__ptr) { + vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_xl_be(signed long long __offset, const signed int *__ptr) { + return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xl_be(signed long long __offset, const unsigned int *__ptr) { + return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr); +} + +static __inline__ vector float __ATTRS_o_ai +vec_xl_be(signed long long __offset, const float *__ptr) { + return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_xl_be(signed long long __offset, const signed long long *__ptr) { + return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xl_be(signed long long __offset, const unsigned long long *__ptr) { + return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr); +} + +static __inline__ vector double __ATTRS_o_ai +vec_xl_be(signed long long __offset, const double *__ptr) { + return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr); +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_xl_be(signed long long __offset, const signed __int128 *__ptr) { + return vec_xl(__offset, __ptr); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_be(signed long long __offset, const unsigned __int128 *__ptr) { + return vec_xl(__offset, __ptr); +} +#endif +#else + #define vec_xl_be vec_xl +#endif + +#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \ + defined(__SIZEOF_INT128__) + +/* vect_xl_sext */ + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +/* vec_xl_zext */ + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_zext(ptrdiff_t __offset, const unsigned char *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_zext(ptrdiff_t __offset, const unsigned short *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_zext(ptrdiff_t __offset, const unsigned int *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_zext(ptrdiff_t __offset, const unsigned long long *__pointer) { + return (vector unsigned __int128)*(__pointer + __offset); +} + +#endif + +/* vec_xlds */ +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_xlds(ptrdiff_t __offset, const signed long long *__ptr) { + signed long long *__addr = (signed long long*)((signed char *)__ptr + __offset); + return (vector signed long long) *__addr; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xlds(ptrdiff_t __offset, const unsigned long long *__ptr) { + unsigned long long *__addr = (unsigned long long *)((signed char *)__ptr + __offset); + return (unaligned_vec_ull) *__addr; +} + +static __inline__ vector double __ATTRS_o_ai vec_xlds(ptrdiff_t __offset, + const double *__ptr) { + double *__addr = (double*)((signed char *)__ptr + __offset); + return (unaligned_vec_double) *__addr; +} + +/* vec_load_splats */ +static __inline__ vector signed int __ATTRS_o_ai +vec_load_splats(signed long long __offset, const signed int *__ptr) { + signed int *__addr = (signed int*)((signed char *)__ptr + __offset); + return (vector signed int)*__addr; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_load_splats(unsigned long long __offset, const signed int *__ptr) { + signed int *__addr = (signed int*)((signed char *)__ptr + __offset); + return (vector signed int)*__addr; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_load_splats(signed long long __offset, const unsigned int *__ptr) { + unsigned int *__addr = (unsigned int*)((signed char *)__ptr + __offset); + return (vector unsigned int)*__addr; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_load_splats(unsigned long long __offset, const unsigned int *__ptr) { + unsigned int *__addr = (unsigned int*)((signed char *)__ptr + __offset); + return (vector unsigned int)*__addr; +} + +static __inline__ vector float __ATTRS_o_ai +vec_load_splats(signed long long __offset, const float *__ptr) { + float *__addr = (float*)((signed char *)__ptr + __offset); + return (vector float)*__addr; +} + +static __inline__ vector float __ATTRS_o_ai +vec_load_splats(unsigned long long __offset, const float *__ptr) { + float *__addr = (float*)((signed char *)__ptr + __offset); + return (vector float)*__addr; +} +#endif + +/* vec_xst */ + +#define vec_xstd2 vec_xst +#define vec_xstw4 vec_xst +static inline __ATTRS_o_ai void +vec_xst(vector signed char __vec, ptrdiff_t __offset, signed char *__ptr) { + *(unaligned_vec_schar *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xst(vector unsigned char __vec, ptrdiff_t __offset, unsigned char *__ptr) { + *(unaligned_vec_uchar *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xst(vector signed short __vec, ptrdiff_t __offset, signed short *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_sshort *)__addr = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec, + ptrdiff_t __offset, + unsigned short *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_ushort *)__addr = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector signed int __vec, + ptrdiff_t __offset, signed int *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_sint *)__addr = __vec; +} + +static inline __ATTRS_o_ai void +vec_xst(vector unsigned int __vec, ptrdiff_t __offset, unsigned int *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_uint *)__addr = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector float __vec, ptrdiff_t __offset, + float *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_float *)__addr = __vec; +} + +#ifdef __VSX__ +static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec, + ptrdiff_t __offset, + signed long long *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_sll *)__addr = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec, + ptrdiff_t __offset, + unsigned long long *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_ull *)__addr = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset, + double *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_double *)__addr = __vec; +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec, + ptrdiff_t __offset, + signed __int128 *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_si128 *)__addr = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec, + ptrdiff_t __offset, + unsigned __int128 *__ptr) { + signed char *__addr = (signed char *)__ptr + __offset; + *(unaligned_vec_ui128 *)__addr = __vec; +} +#endif + +/* vec_xst_trunc */ + +#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \ + defined(__SIZEOF_INT128__) +static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec, + ptrdiff_t __offset, + signed char *__ptr) { + *(__ptr + __offset) = (signed char)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec, + ptrdiff_t __offset, + unsigned char *__ptr) { + *(__ptr + __offset) = (unsigned char)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec, + ptrdiff_t __offset, + signed short *__ptr) { + *(__ptr + __offset) = (signed short)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec, + ptrdiff_t __offset, + unsigned short *__ptr) { + *(__ptr + __offset) = (unsigned short)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec, + ptrdiff_t __offset, + signed int *__ptr) { + *(__ptr + __offset) = (signed int)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec, + ptrdiff_t __offset, + unsigned int *__ptr) { + *(__ptr + __offset) = (unsigned int)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec, + ptrdiff_t __offset, + signed long long *__ptr) { + *(__ptr + __offset) = (signed long long)__vec[0]; +} + +static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec, + ptrdiff_t __offset, + unsigned long long *__ptr) { + *(__ptr + __offset) = (unsigned long long)__vec[0]; +} +#endif + +/* vec_xst_be */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec, + signed long long __offset, + signed char *__ptr) { + vector signed char __tmp = + __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); + typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double; + __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec, + signed long long __offset, + unsigned char *__ptr) { + vector unsigned char __tmp = + __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); + typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double; + __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec, + signed long long __offset, + signed short *__ptr) { + vector signed short __tmp = + __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); + typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double; + __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec, + signed long long __offset, + unsigned short *__ptr) { + vector unsigned short __tmp = + __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); + typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double; + __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec, + signed long long __offset, + signed int *__ptr) { + __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec, + signed long long __offset, + unsigned int *__ptr) { + __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec, + signed long long __offset, + float *__ptr) { + __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr); +} + +#ifdef __VSX__ +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec, + signed long long __offset, + signed long long *__ptr) { + __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec, + signed long long __offset, + unsigned long long *__ptr) { + __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec, + signed long long __offset, + double *__ptr) { + __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr); +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \ + defined(__SIZEOF_INT128__) +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec, + signed long long __offset, + signed __int128 *__ptr) { + vec_xst(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec, + signed long long __offset, + unsigned __int128 *__ptr) { + vec_xst(__vec, __offset, __ptr); +} +#endif +#else + #define vec_xst_be vec_xst +#endif + +#ifdef __POWER9_VECTOR__ +#define vec_test_data_class(__a, __b) \ + _Generic( \ + (__a), vector float \ + : (vector bool int)__builtin_vsx_xvtstdcsp((vector float)(__a), (__b)), \ + vector double \ + : (vector bool long long)__builtin_vsx_xvtstdcdp((vector double)(__a), \ + (__b))) + +#endif /* #ifdef __POWER9_VECTOR__ */ + +static vector float __ATTRS_o_ai vec_neg(vector float __a) { + return -__a; +} + +#ifdef __VSX__ +static vector double __ATTRS_o_ai vec_neg(vector double __a) { + return -__a; +} + +#endif + +#ifdef __VSX__ +static vector long long __ATTRS_o_ai vec_neg(vector long long __a) { + return -__a; +} +#endif + +static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) { + return -__a; +} + +static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) { + return -__a; +} + +static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) { + return -__a; +} + +static vector float __ATTRS_o_ai vec_nabs(vector float __a) { + return - vec_abs(__a); +} + +#ifdef __VSX__ +static vector double __ATTRS_o_ai vec_nabs(vector double __a) { + return - vec_abs(__a); +} + +#endif + +#ifdef __POWER8_VECTOR__ +static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) { + return __builtin_altivec_vminsd(__a, -__a); +} +#endif + +static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) { + return __builtin_altivec_vminsw(__a, -__a); +} + +static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) { + return __builtin_altivec_vminsh(__a, -__a); +} + +static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) { + return __builtin_altivec_vminsb(__a, -__a); +} + +static vector float __ATTRS_o_ai vec_recipdiv(vector float __a, + vector float __b) { + return __builtin_ppc_recipdivf(__a, __b); +} + +#ifdef __VSX__ +static vector double __ATTRS_o_ai vec_recipdiv(vector double __a, + vector double __b) { + return __builtin_ppc_recipdivd(__a, __b); +} +#endif + +#ifdef __POWER10_VECTOR__ + +/* vec_extractm */ + +static __inline__ unsigned int __ATTRS_o_ai +vec_extractm(vector unsigned char __a) { + return __builtin_altivec_vextractbm(__a); +} + +static __inline__ unsigned int __ATTRS_o_ai +vec_extractm(vector unsigned short __a) { + return __builtin_altivec_vextracthm(__a); +} + +static __inline__ unsigned int __ATTRS_o_ai +vec_extractm(vector unsigned int __a) { + return __builtin_altivec_vextractwm(__a); +} + +static __inline__ unsigned int __ATTRS_o_ai +vec_extractm(vector unsigned long long __a) { + return __builtin_altivec_vextractdm(__a); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ unsigned int __ATTRS_o_ai +vec_extractm(vector unsigned __int128 __a) { + return __builtin_altivec_vextractqm(__a); +} +#endif + +/* vec_expandm */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_expandm(vector unsigned char __a) { + return __builtin_altivec_vexpandbm(__a); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_expandm(vector unsigned short __a) { + return __builtin_altivec_vexpandhm(__a); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_expandm(vector unsigned int __a) { + return __builtin_altivec_vexpandwm(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_expandm(vector unsigned long long __a) { + return __builtin_altivec_vexpanddm(__a); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_expandm(vector unsigned __int128 __a) { + return __builtin_altivec_vexpandqm(__a); +} +#endif + +/* vec_cntm */ + +#define vec_cntm(__a, __mp) \ + _Generic((__a), vector unsigned char \ + : __builtin_altivec_vcntmbb((__a), (unsigned int)(__mp)), \ + vector unsigned short \ + : __builtin_altivec_vcntmbh((__a), (unsigned int)(__mp)), \ + vector unsigned int \ + : __builtin_altivec_vcntmbw((__a), (unsigned int)(__mp)), \ + vector unsigned long long \ + : __builtin_altivec_vcntmbd((__a), (unsigned int)(__mp))) + +/* vec_gen[b|h|w|d|q]m */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_genbm(unsigned long long __bm) { + return __builtin_altivec_mtvsrbm(__bm); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_genhm(unsigned long long __bm) { + return __builtin_altivec_mtvsrhm(__bm); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_genwm(unsigned long long __bm) { + return __builtin_altivec_mtvsrwm(__bm); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_gendm(unsigned long long __bm) { + return __builtin_altivec_mtvsrdm(__bm); +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_genqm(unsigned long long __bm) { + return __builtin_altivec_mtvsrqm(__bm); +} +#endif + +/* vec_pdep */ + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_pdep(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vpdepd(__a, __b); +} + +/* vec_pext */ + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_pext(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vpextd(__a, __b); +} + +/* vec_cfuge */ + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vcfuged(__a, __b); +} + +/* vec_gnb */ + +#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b) + +/* vec_ternarylogic */ +#ifdef __VSX__ +#ifdef __SIZEOF_INT128__ +#define vec_ternarylogic(__a, __b, __c, __imm) \ + _Generic((__a), vector unsigned char \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned short \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned int \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned long long \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned __int128 \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm))) +#else +#define vec_ternarylogic(__a, __b, __c, __imm) \ + _Generic((__a), vector unsigned char \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned short \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned int \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm)), \ + vector unsigned long long \ + : __builtin_vsx_xxeval((vector unsigned long long)(__a), \ + (vector unsigned long long)(__b), \ + (vector unsigned long long)(__c), (__imm))) +#endif /* __SIZEOF_INT128__ */ +#endif /* __VSX__ */ + +/* vec_genpcvm */ + +#ifdef __VSX__ +#define vec_genpcvm(__a, __imm) \ + _Generic((__a), vector unsigned char \ + : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)), \ + vector unsigned short \ + : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)), \ + vector unsigned int \ + : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)), \ + vector unsigned long long \ + : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm))) +#endif /* __VSX__ */ + +/* vec_clrl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_clrl(vector signed char __a, unsigned int __n) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclrrb(__a, __n); +#else + return __builtin_altivec_vclrlb( __a, __n); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_clrl(vector unsigned char __a, unsigned int __n) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclrrb((vector signed char)__a, __n); +#else + return __builtin_altivec_vclrlb((vector signed char)__a, __n); +#endif +} + +/* vec_clrr */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_clrr(vector signed char __a, unsigned int __n) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclrlb(__a, __n); +#else + return __builtin_altivec_vclrrb( __a, __n); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_clrr(vector unsigned char __a, unsigned int __n) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclrlb((vector signed char)__a, __n); +#else + return __builtin_altivec_vclrrb((vector signed char)__a, __n); +#endif +} + +/* vec_cntlzm */ + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vclzdm(__a, __b); +} + +/* vec_cnttzm */ + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vctzdm(__a, __b); +} + +/* vec_mod */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_mod(vector signed int __a, vector signed int __b) { + return __a % __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mod(vector unsigned int __a, vector unsigned int __b) { + return __a % __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mod(vector signed long long __a, vector signed long long __b) { + return __a % __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mod(vector unsigned long long __a, vector unsigned long long __b) { + return __a % __b; +} + +#ifdef __SIZEOF_INT128__ +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_mod(vector signed __int128 __a, vector signed __int128 __b) { + return __a % __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a % __b; +} +#endif + +/* vec_sldbi */ + +#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7)) + +/* vec_srdbi */ + +#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7)) + +/* vec_insertl */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinsbrx(__b, __c, __a); +#else + return __builtin_altivec_vinsblx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinshrx(__b, __c, __a); +#else + return __builtin_altivec_vinshlx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinswrx(__b, __c, __a); +#else + return __builtin_altivec_vinswlx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_insertl(unsigned long long __a, vector unsigned long long __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinsdrx(__b, __c, __a); +#else + return __builtin_altivec_vinsdlx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_insertl(vector unsigned char __a, vector unsigned char __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinsbvrx(__b, __c, __a); +#else + return __builtin_altivec_vinsbvlx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_insertl(vector unsigned short __a, vector unsigned short __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinshvrx(__b, __c, __a); +#else + return __builtin_altivec_vinshvlx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_insertl(vector unsigned int __a, vector unsigned int __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinswvrx(__b, __c, __a); +#else + return __builtin_altivec_vinswvlx(__b, __c, __a); +#endif +} + +/* vec_inserth */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinsblx(__b, __c, __a); +#else + return __builtin_altivec_vinsbrx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinshlx(__b, __c, __a); +#else + return __builtin_altivec_vinshrx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinswlx(__b, __c, __a); +#else + return __builtin_altivec_vinswrx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_inserth(unsigned long long __a, vector unsigned long long __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinsdlx(__b, __c, __a); +#else + return __builtin_altivec_vinsdrx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_inserth(vector unsigned char __a, vector unsigned char __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinsbvlx(__b, __c, __a); +#else + return __builtin_altivec_vinsbvrx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_inserth(vector unsigned short __a, vector unsigned short __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinshvlx(__b, __c, __a); +#else + return __builtin_altivec_vinshvrx(__b, __c, __a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_inserth(vector unsigned int __a, vector unsigned int __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vinswvlx(__b, __c, __a); +#else + return __builtin_altivec_vinswvrx(__b, __c, __a); +#endif +} + +/* vec_extractl */ + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl( + vector unsigned char __a, vector unsigned char __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextdubvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextdubvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl( + vector unsigned short __a, vector unsigned short __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduhvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduhvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl( + vector unsigned int __a, vector unsigned int __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduwvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduwvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extractl(vector unsigned long long __a, vector unsigned long long __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextddvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextddvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +/* vec_extracth */ + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth( + vector unsigned char __a, vector unsigned char __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextdubvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextdubvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth( + vector unsigned short __a, vector unsigned short __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduhvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduhvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth( + vector unsigned int __a, vector unsigned int __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduwvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduwvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extracth(vector unsigned long long __a, vector unsigned long long __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextddvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextddvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +#ifdef __VSX__ + +/* vec_permx */ + +#define vec_permx(__a, __b, __c, __d) \ + __builtin_vsx_xxpermx((__a), (__b), (__c), (__d)) + +/* vec_blendv */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_blendv(vector signed char __a, vector signed char __b, + vector unsigned char __c) { + return __builtin_vsx_xxblendvb(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_blendv(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_vsx_xxblendvb(__a, __b, __c); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_blendv(vector signed short __a, vector signed short __b, + vector unsigned short __c) { + return __builtin_vsx_xxblendvh(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_blendv(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_vsx_xxblendvh(__a, __b, __c); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_blendv(vector signed int __a, vector signed int __b, + vector unsigned int __c) { + return __builtin_vsx_xxblendvw(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_blendv(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_vsx_xxblendvw(__a, __b, __c); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_blendv(vector signed long long __a, vector signed long long __b, + vector unsigned long long __c) { + return __builtin_vsx_xxblendvd(__a, __b, __c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_blendv(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + return __builtin_vsx_xxblendvd(__a, __b, __c); +} + +static __inline__ vector float __ATTRS_o_ai +vec_blendv(vector float __a, vector float __b, vector unsigned int __c) { + return __builtin_vsx_xxblendvw(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai +vec_blendv(vector double __a, vector double __b, + vector unsigned long long __c) { + return __builtin_vsx_xxblendvd(__a, __b, __c); +} + +/* vec_replace_elt */ + +#define vec_replace_elt __builtin_altivec_vec_replace_elt + +/* vec_replace_unaligned */ + +#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned + +/* vec_splati */ + +#define vec_splati(__a) \ + _Generic((__a), signed int \ + : ((vector signed int)__a), unsigned int \ + : ((vector unsigned int)__a), float \ + : ((vector float)__a)) + +/* vec_spatid */ + +static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) { + return ((vector double)((double)__a)); +} + +/* vec_splati_ins */ + +static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins( + vector signed int __a, const unsigned int __b, const signed int __c) { +#ifdef __LITTLE_ENDIAN__ + __a[1 - __b] = __c; + __a[3 - __b] = __c; +#else + __a[__b] = __c; + __a[2 + __b] = __c; +#endif + return __a; +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins( + vector unsigned int __a, const unsigned int __b, const unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + __a[1 - __b] = __c; + __a[3 - __b] = __c; +#else + __a[__b] = __c; + __a[2 + __b] = __c; +#endif + return __a; +} + +static __inline__ vector float __ATTRS_o_ai +vec_splati_ins(vector float __a, const unsigned int __b, const float __c) { +#ifdef __LITTLE_ENDIAN__ + __a[1 - __b] = __c; + __a[3 - __b] = __c; +#else + __a[__b] = __c; + __a[2 + __b] = __c; +#endif + return __a; +} + +/* vec_test_lsbb_all_ones */ + +static __inline__ int __ATTRS_o_ai +vec_test_lsbb_all_ones(vector unsigned char __a) { + return __builtin_vsx_xvtlsbb(__a, 1); +} + +/* vec_test_lsbb_all_zeros */ + +static __inline__ int __ATTRS_o_ai +vec_test_lsbb_all_zeros(vector unsigned char __a) { + return __builtin_vsx_xvtlsbb(__a, 0); +} +#endif /* __VSX__ */ + +/* vec_stril */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_stril(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribr((vector signed char)__a); +#else + return __builtin_altivec_vstribl((vector signed char)__a); +#endif +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_stril(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribr(__a); +#else + return __builtin_altivec_vstribl(__a); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_stril(vector unsigned short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihr((vector signed short)__a); +#else + return __builtin_altivec_vstrihl((vector signed short)__a); +#endif +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_stril(vector signed short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihr(__a); +#else + return __builtin_altivec_vstrihl(__a); +#endif +} + +/* vec_stril_p */ + +static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a); +#else + return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a); +#endif +} + +static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribr_p(__CR6_EQ, __a); +#else + return __builtin_altivec_vstribl_p(__CR6_EQ, __a); +#endif +} + +static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a); +#else + return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a); +#endif +} + +static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihr_p(__CR6_EQ, __a); +#else + return __builtin_altivec_vstrihl_p(__CR6_EQ, __a); +#endif +} + +/* vec_strir */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_strir(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribl((vector signed char)__a); +#else + return __builtin_altivec_vstribr((vector signed char)__a); +#endif +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_strir(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribl(__a); +#else + return __builtin_altivec_vstribr(__a); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_strir(vector unsigned short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihl((vector signed short)__a); +#else + return __builtin_altivec_vstrihr((vector signed short)__a); +#endif +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_strir(vector signed short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihl(__a); +#else + return __builtin_altivec_vstrihr(__a); +#endif +} + +/* vec_strir_p */ + +static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a); +#else + return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a); +#endif +} + +static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstribl_p(__CR6_EQ, __a); +#else + return __builtin_altivec_vstribr_p(__CR6_EQ, __a); +#endif +} + +static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a); +#else + return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a); +#endif +} + +static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vstrihl_p(__CR6_EQ, __a); +#else + return __builtin_altivec_vstrihr_p(__CR6_EQ, __a); +#endif +} + +/* vs[l | r | ra] */ + +#ifdef __SIZEOF_INT128__ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) { + return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) { + return ( + vector signed __int128)(((vector unsigned __int128)__a) >> + (__b % + (vector unsigned __int128)(sizeof( + unsigned __int128) * + __CHAR_BIT__))); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return ( + vector unsigned __int128)(((vector signed __int128)__a) >> + (__b % + (vector unsigned __int128)(sizeof( + unsigned __int128) * + __CHAR_BIT__))); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) { + return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +#endif /* __SIZEOF_INT128__ */ +#endif /* __POWER10_VECTOR__ */ + +#undef __ATTRS_o_ai + +#endif /* __ALTIVEC_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ammintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ammintrin.h new file mode 100644 index 0000000..3806be6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ammintrin.h @@ -0,0 +1,179 @@ +/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __AMMINTRIN_H +#define __AMMINTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a"), __min_vector_width__(128))) + +/// Extracts the specified bits from the lower 64 bits of the 128-bit +/// integer vector operand at the index \a idx and of the length \a len. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx); +/// \endcode +/// +/// This intrinsic corresponds to the EXTRQ instruction. +/// +/// \param x +/// The value from which bits are extracted. +/// \param len +/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0] +/// are zero, the length is interpreted as 64. +/// \param idx +/// Bits [5:0] specify the index of the least significant bit; the other +/// bits are ignored. If the sum of the index and length is greater than 64, +/// the result is undefined. If the length and index are both zero, bits +/// [63:0] of parameter \a x are extracted. If the length is zero but the +/// index is non-zero, the result is undefined. +/// \returns A 128-bit integer vector whose lower 64 bits contain the bits +/// extracted from the source operand. +#define _mm_extracti_si64(x, len, idx) \ + ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \ + (char)(len), (char)(idx))) + +/// Extracts the specified bits from the lower 64 bits of the 128-bit +/// integer vector operand at the index and of the length specified by +/// \a __y. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the EXTRQ instruction. +/// +/// \param __x +/// The value from which bits are extracted. +/// \param __y +/// Specifies the index of the least significant bit at [13:8] and the +/// length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the +/// length is interpreted as 64. If the sum of the index and length is +/// greater than 64, the result is undefined. If the length and index are +/// both zero, bits [63:0] of parameter \a __x are extracted. If the length +/// is zero but the index is non-zero, the result is undefined. +/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted +/// from the source operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_extract_si64(__m128i __x, __m128i __y) +{ + return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y); +} + +/// Inserts bits of a specified length from the source integer vector +/// \a y into the lower 64 bits of the destination integer vector \a x at +/// the index \a idx and of the length \a len. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len, +/// const int idx); +/// \endcode +/// +/// This intrinsic corresponds to the INSERTQ instruction. +/// +/// \param x +/// The destination operand where bits will be inserted. The inserted bits +/// are defined by the length \a len and by the index \a idx specifying the +/// least significant bit. +/// \param y +/// The source operand containing the bits to be extracted. The extracted +/// bits are the least significant bits of operand \a y of length \a len. +/// \param len +/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0] +/// are zero, the length is interpreted as 64. +/// \param idx +/// Bits [5:0] specify the index of the least significant bit; the other +/// bits are ignored. If the sum of the index and length is greater than 64, +/// the result is undefined. If the length and index are both zero, bits +/// [63:0] of parameter \a y are inserted into parameter \a x. If the length +/// is zero but the index is non-zero, the result is undefined. +/// \returns A 128-bit integer vector containing the original lower 64-bits of +/// destination operand \a x with the specified bitfields replaced by the +/// lower bits of source operand \a y. The upper 64 bits of the return value +/// are undefined. +#define _mm_inserti_si64(x, y, len, idx) \ + ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \ + (__v2di)(__m128i)(y), \ + (char)(len), (char)(idx))) + +/// Inserts bits of a specified length from the source integer vector +/// \a __y into the lower 64 bits of the destination integer vector \a __x +/// at the index and of the length specified by \a __y. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the INSERTQ instruction. +/// +/// \param __x +/// The destination operand where bits will be inserted. The inserted bits +/// are defined by the length and by the index of the least significant bit +/// specified by operand \a __y. +/// \param __y +/// The source operand containing the bits to be extracted. The extracted +/// bits are the least significant bits of operand \a __y with length +/// specified by bits [69:64]. These are inserted into the destination at the +/// index specified by bits [77:72]; all other bits are ignored. If bits +/// [69:64] are zero, the length is interpreted as 64. If the sum of the +/// index and length is greater than 64, the result is undefined. If the +/// length and index are both zero, bits [63:0] of parameter \a __y are +/// inserted into parameter \a __x. If the length is zero but the index is +/// non-zero, the result is undefined. +/// \returns A 128-bit integer vector containing the original lower 64-bits of +/// destination operand \a __x with the specified bitfields replaced by the +/// lower bits of source operand \a __y. The upper 64 bits of the return +/// value are undefined. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_insert_si64(__m128i __x, __m128i __y) +{ + return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y); +} + +/// Stores a 64-bit double-precision value in a 64-bit memory location. +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTSD instruction. +/// +/// \param __p +/// The 64-bit memory location used to store the register value. +/// \param __a +/// The 64-bit double-precision floating-point register value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_sd(double *__p, __m128d __a) +{ + __builtin_ia32_movntsd(__p, (__v2df)__a); +} + +/// Stores a 32-bit single-precision floating-point value in a 32-bit +/// memory location. To minimize caching, the data is flagged as +/// non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTSS instruction. +/// +/// \param __p +/// The 32-bit memory location used to store the register value. +/// \param __a +/// The 32-bit single-precision floating-point register value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_ss(float *__p, __m128 __a) +{ + __builtin_ia32_movntss(__p, (__v4sf)__a); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __AMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/amxintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/amxintrin.h new file mode 100644 index 0000000..ec601a5 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/amxintrin.h @@ -0,0 +1,493 @@ +/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===------------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif /* __IMMINTRIN_H */ + +#ifndef __AMXINTRIN_H +#define __AMXINTRIN_H +#ifdef __x86_64__ + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS_TILE \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-tile"))) +#define __DEFAULT_FN_ATTRS_INT8 \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-int8"))) +#define __DEFAULT_FN_ATTRS_BF16 \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-bf16"))) + +/// Load tile configuration from a 64-byte memory location specified by +/// "mem_addr". The tile configuration includes the tile type palette, the +/// number of bytes per row, and the number of rows. If the specified +/// palette_id is zero, that signifies the init state for both the tile +/// config and the tile data, and the tiles are zeroed. Any invalid +/// configurations will result in #GP fault. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LDTILECFG instruction. +/// +/// \param __config +/// A pointer to 512-bits configuration +static __inline__ void __DEFAULT_FN_ATTRS_TILE +_tile_loadconfig(const void *__config) { + __builtin_ia32_tile_loadconfig(__config); +} + +/// Stores the current tile configuration to a 64-byte memory location +/// specified by "mem_addr". The tile configuration includes the tile type +/// palette, the number of bytes per row, and the number of rows. If tiles +/// are not configured, all zeroes will be stored to memory. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the STTILECFG instruction. +/// +/// \param __config +/// A pointer to 512-bits configuration +static __inline__ void __DEFAULT_FN_ATTRS_TILE +_tile_storeconfig(void *__config) { + __builtin_ia32_tile_storeconfig(__config); +} + +/// Release the tile configuration to return to the init state, which +/// releases all storage it currently holds. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILERELEASE instruction. +static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) { + __builtin_ia32_tilerelease(); +} + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst" using the tile configuration previously configured +/// via "_tile_loadconfig". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADD instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +#define _tile_loadd(dst, base, stride) \ + __builtin_ia32_tileloadd64((dst), ((const void *)(base)), \ + (__SIZE_TYPE__)(stride)) + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst" using the tile configuration previously configured +/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation +/// that the data will likely not be reused in the near future and the data +/// caching can be optimized accordingly. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADDT1 instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +#define _tile_stream_loadd(dst, base, stride) \ + __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), \ + (__SIZE_TYPE__)(stride)) + +/// Store the tile specified by "src" to memory specifieid by "base" address and +/// "stride" using the tile configuration previously configured via +/// "_tile_loadconfig". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILESTORED instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be stored in memory. +#define _tile_stored(dst, base, stride) \ + __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride)) + +/// Zero the tile specified by "tdest". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILEZERO instruction. +/// +/// \param tile +/// The destination tile to be zero. Max size is 1024 Bytes. +#define _tile_zero(tile) __builtin_ia32_tilezero((tile)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbssd(dst, src0, src1) \ + __builtin_ia32_tdpbssd((dst), (src0), (src1)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbsud(dst, src0, src1) \ + __builtin_ia32_tdpbsud((dst), (src0), (src1)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbusd(dst, src0, src1) \ + __builtin_ia32_tdpbusd((dst), (src0), (src1)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in +/// "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbuud(dst, src0, src1) \ + __builtin_ia32_tdpbuud((dst), (src0), (src1)) + +/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and +/// src1, accumulating the intermediate single-precision (32-bit) floating-point +/// elements with elements in "dst", and store the 32-bit result back to tile +/// "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBF16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbf16ps(dst, src0, src1) \ + __builtin_ia32_tdpbf16ps((dst), (src0), (src1)) + +/// AMX tile register size can be configured, the maximum size is 16x64=1024 +/// bytes. Since there is no 2D type in llvm IR, we use vector type to +/// represent 2D tile and the fixed size is maximum amx tile register size. +typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64))); + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_loadd_internal(unsigned short m, unsigned short n, const void *base, + __SIZE_TYPE__ stride) { + return __builtin_ia32_tileloadd64_internal(m, n, base, + (__SIZE_TYPE__)(stride)); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_loaddt1_internal(unsigned short m, unsigned short n, const void *base, + __SIZE_TYPE__ stride) { + return __builtin_ia32_tileloaddt164_internal(m, n, base, + (__SIZE_TYPE__)(stride)); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbsud_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbsud_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbusd_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbusd_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbuud_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbuud_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ void __DEFAULT_FN_ATTRS_INT8 +_tile_stored_internal(unsigned short m, unsigned short n, void *base, + __SIZE_TYPE__ stride, _tile1024i tile) { + return __builtin_ia32_tilestored64_internal(m, n, base, + (__SIZE_TYPE__)(stride), tile); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_BF16 +_tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2); +} + +/// This struct pack the shape and tile data together for user. We suggest +/// initializing the struct as early as possible, because compiler depends +/// on the shape information to do configure. The constant value is preferred +/// for optimization by compiler. +typedef struct __tile1024i_str { + const unsigned short row; + const unsigned short col; + _tile1024i tile; +} __tile1024i; + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADD instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +__DEFAULT_FN_ATTRS_TILE +static void __tile_loadd(__tile1024i *dst, const void *base, + __SIZE_TYPE__ stride) { + dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride); +} + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst". This intrinsic provides a hint to the implementation +/// that the data will likely not be reused in the near future and the data +/// caching can be optimized accordingly. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADDT1 instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +__DEFAULT_FN_ATTRS_TILE +static void __tile_stream_loadd(__tile1024i *dst, const void *base, + __SIZE_TYPE__ stride) { + dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static void __tile_dpbssd(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static void __tile_dpbsud(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static void __tile_dpbusd(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in +/// "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static void __tile_dpbuud(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Store the tile specified by "src" to memory specifieid by "base" address and +/// "stride". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILESTORED instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be stored in memory. +__DEFAULT_FN_ATTRS_TILE +static void __tile_stored(void *base, __SIZE_TYPE__ stride, __tile1024i src) { + _tile_stored_internal(src.row, src.col, base, stride, src.tile); +} + +/// Zero the tile specified by "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILEZERO instruction. +/// +/// \param dst +/// The destination tile to be zero. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_TILE +static void __tile_zero(__tile1024i *dst) { + dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col); +} + +/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and +/// src1, accumulating the intermediate single-precision (32-bit) floating-point +/// elements with elements in "dst", and store the 32-bit result back to tile +/// "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBF16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_BF16 +static void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +#undef __DEFAULT_FN_ATTRS_TILE +#undef __DEFAULT_FN_ATTRS_INT8 +#undef __DEFAULT_FN_ATTRS_BF16 + +#endif /* __x86_64__ */ +#endif /* __AMXINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm64intr.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm64intr.h new file mode 100644 index 0000000..4943b2d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm64intr.h @@ -0,0 +1,35 @@ +/*===---- arm64intr.h - ARM64 Windows intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __ARM64INTR_H +#define __ARM64INTR_H + +typedef enum +{ + _ARM64_BARRIER_SY = 0xF, + _ARM64_BARRIER_ST = 0xE, + _ARM64_BARRIER_LD = 0xD, + _ARM64_BARRIER_ISH = 0xB, + _ARM64_BARRIER_ISHST = 0xA, + _ARM64_BARRIER_ISHLD = 0x9, + _ARM64_BARRIER_NSH = 0x7, + _ARM64_BARRIER_NSHST = 0x6, + _ARM64_BARRIER_NSHLD = 0x5, + _ARM64_BARRIER_OSH = 0x3, + _ARM64_BARRIER_OSHST = 0x2, + _ARM64_BARRIER_OSHLD = 0x1 +} _ARM64INTR_BARRIER_TYPE; + +#endif /* __ARM64INTR_H */ +#endif /* _MSC_VER */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_acle.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_acle.h new file mode 100644 index 0000000..45fac24 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_acle.h @@ -0,0 +1,771 @@ +/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_ACLE_H +#define __ARM_ACLE_H + +#ifndef __ARM_ACLE +#error "ACLE intrinsics support not enabled." +#endif + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */ +/* 8.3 Memory barriers */ +#if !__has_builtin(__dmb) +#define __dmb(i) __builtin_arm_dmb(i) +#endif +#if !__has_builtin(__dsb) +#define __dsb(i) __builtin_arm_dsb(i) +#endif +#if !__has_builtin(__isb) +#define __isb(i) __builtin_arm_isb(i) +#endif + +/* 8.4 Hints */ + +#if !__has_builtin(__wfi) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) { + __builtin_arm_wfi(); +} +#endif + +#if !__has_builtin(__wfe) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) { + __builtin_arm_wfe(); +} +#endif + +#if !__has_builtin(__sev) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) { + __builtin_arm_sev(); +} +#endif + +#if !__has_builtin(__sevl) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) { + __builtin_arm_sevl(); +} +#endif + +#if !__has_builtin(__yield) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) { + __builtin_arm_yield(); +} +#endif + +#if __ARM_32BIT_STATE +#define __dbg(t) __builtin_arm_dbg(t) +#endif + +/* 8.5 Swap */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__swp(uint32_t __x, volatile uint32_t *__p) { + uint32_t v; + do + v = __builtin_arm_ldrex(__p); + while (__builtin_arm_strex(__x, __p)); + return v; +} + +/* 8.6 Memory prefetch intrinsics */ +/* 8.6.1 Data prefetch */ +#define __pld(addr) __pldx(0, 0, 0, addr) + +#if __ARM_32BIT_STATE +#define __pldx(access_kind, cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, access_kind, 1) +#else +#define __pldx(access_kind, cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1) +#endif + +/* 8.6.2 Instruction prefetch */ +#define __pli(addr) __plix(0, 0, addr) + +#if __ARM_32BIT_STATE +#define __plix(cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, 0, 0) +#else +#define __plix(cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0) +#endif + +/* 8.7 NOP */ +#if !defined(_MSC_VER) || !defined(__aarch64__) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { + __builtin_arm_nop(); +} +#endif + +/* 9 DATA-PROCESSING INTRINSICS */ +/* 9.2 Miscellaneous data-processing intrinsics */ +/* ROR */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__ror(uint32_t __x, uint32_t __y) { + __y %= 32; + if (__y == 0) + return __x; + return (__x >> __y) | (__x << (32 - __y)); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__rorll(uint64_t __x, uint32_t __y) { + __y %= 64; + if (__y == 0) + return __x; + return (__x >> __y) | (__x << (64 - __y)); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rorl(unsigned long __x, uint32_t __y) { +#if __SIZEOF_LONG__ == 4 + return __ror(__x, __y); +#else + return __rorll(__x, __y); +#endif +} + + +/* CLZ */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__clz(uint32_t __t) { + return __builtin_clz(__t); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__clzl(unsigned long __t) { + return __builtin_clzl(__t); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__clzll(uint64_t __t) { + return __builtin_clzll(__t); +} + +/* CLS */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__cls(uint32_t __t) { + return __builtin_arm_cls(__t); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__clsl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __builtin_arm_cls(__t); +#else + return __builtin_arm_cls64(__t); +#endif +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__clsll(uint64_t __t) { + return __builtin_arm_cls64(__t); +} + +/* REV */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__rev(uint32_t __t) { + return __builtin_bswap32(__t); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__revl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __builtin_bswap32(__t); +#else + return __builtin_bswap64(__t); +#endif +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__revll(uint64_t __t) { + return __builtin_bswap64(__t); +} + +/* REV16 */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__rev16(uint32_t __t) { + return __ror(__rev(__t), 16); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__rev16ll(uint64_t __t) { + return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rev16l(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __rev16(__t); +#else + return __rev16ll(__t); +#endif +} + +/* REVSH */ +static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) +__revsh(int16_t __t) { + return __builtin_bswap16(__t); +} + +/* RBIT */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__rbit(uint32_t __t) { + return __builtin_arm_rbit(__t); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__rbitll(uint64_t __t) { +#if __ARM_32BIT_STATE + return (((uint64_t)__builtin_arm_rbit(__t)) << 32) | + __builtin_arm_rbit(__t >> 32); +#else + return __builtin_arm_rbit64(__t); +#endif +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rbitl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __rbit(__t); +#else + return __rbitll(__t); +#endif +} + +/* + * 9.3 16-bit multiplications + */ +#if __ARM_FEATURE_DSP +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulbb(int32_t __a, int32_t __b) { + return __builtin_arm_smulbb(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulbt(int32_t __a, int32_t __b) { + return __builtin_arm_smulbt(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smultb(int32_t __a, int32_t __b) { + return __builtin_arm_smultb(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smultt(int32_t __a, int32_t __b) { + return __builtin_arm_smultt(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulwb(int32_t __a, int32_t __b) { + return __builtin_arm_smulwb(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulwt(int32_t __a, int32_t __b) { + return __builtin_arm_smulwt(__a, __b); +} +#endif + +/* + * 9.4 Saturating intrinsics + * + * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag + * intrinsics are implemented and the flag is enabled. + */ +/* 9.4.1 Width-specified saturation intrinsics */ +#if __ARM_FEATURE_SAT +#define __ssat(x, y) __builtin_arm_ssat(x, y) +#define __usat(x, y) __builtin_arm_usat(x, y) +#endif + +/* 9.4.2 Saturating addition and subtraction intrinsics */ +#if __ARM_FEATURE_DSP +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__qadd(int32_t __t, int32_t __v) { + return __builtin_arm_qadd(__t, __v); +} + +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__qsub(int32_t __t, int32_t __v) { + return __builtin_arm_qsub(__t, __v); +} + +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__qdbl(int32_t __t) { + return __builtin_arm_qadd(__t, __t); +} +#endif + +/* 9.4.3 Accumultating multiplications */ +#if __ARM_FEATURE_DSP +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlabb(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlabb(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlabt(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlabt(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlatb(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlatb(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlatt(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlatt(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlawb(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlawb(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlawt(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlawt(__a, __b, __c); +} +#endif + + +/* 9.5.4 Parallel 16-bit saturation */ +#if __ARM_FEATURE_SIMD32 +#define __ssat16(x, y) __builtin_arm_ssat16(x, y) +#define __usat16(x, y) __builtin_arm_usat16(x, y) +#endif + +/* 9.5.5 Packing and unpacking */ +#if __ARM_FEATURE_SIMD32 +typedef int32_t int8x4_t; +typedef int32_t int16x2_t; +typedef uint32_t uint8x4_t; +typedef uint32_t uint16x2_t; + +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sxtab16(int16x2_t __a, int8x4_t __b) { + return __builtin_arm_sxtab16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sxtb16(int8x4_t __a) { + return __builtin_arm_sxtb16(__a); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__uxtab16(int16x2_t __a, int8x4_t __b) { + return __builtin_arm_uxtab16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__uxtb16(int8x4_t __a) { + return __builtin_arm_uxtb16(__a); +} +#endif + +/* 9.5.6 Parallel selection */ +#if __ARM_FEATURE_SIMD32 +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__sel(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_sel(__a, __b); +} +#endif + +/* 9.5.7 Parallel 8-bit addition and subtraction */ +#if __ARM_FEATURE_SIMD32 +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__qadd8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_qadd8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__qsub8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_qsub8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__sadd8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_sadd8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__shadd8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_shadd8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__shsub8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_shsub8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__ssub8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_ssub8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uadd8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uadd8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uhadd8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uhadd8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uhsub8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uhsub8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uqadd8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uqadd8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uqsub8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uqsub8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__usub8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_usub8(__a, __b); +} +#endif + +/* 9.5.8 Sum of 8-bit absolute differences */ +#if __ARM_FEATURE_SIMD32 +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__usad8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_usad8(__a, __b); +} +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) { + return __builtin_arm_usada8(__a, __b, __c); +} +#endif + +/* 9.5.9 Parallel 16-bit addition and subtraction */ +#if __ARM_FEATURE_SIMD32 +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qadd16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qadd16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qasx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qasx(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qsax(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qsax(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qsub16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qsub16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sadd16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_sadd16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sasx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_sasx(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shadd16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shadd16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shasx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shasx(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shsax(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shsax(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shsub16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shsub16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__ssax(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_ssax(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__ssub16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_ssub16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uadd16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uadd16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uasx(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uasx(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhadd16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhadd16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhasx(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhasx(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhsax(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhsax(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhsub16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhsub16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqadd16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqadd16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqasx(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqasx(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqsax(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqsax(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqsub16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqsub16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__usax(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_usax(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__usub16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_usub16(__a, __b); +} +#endif + +/* 9.5.10 Parallel 16-bit multiplications */ +#if __ARM_FEATURE_SIMD32 +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smlad(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smladx(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlald(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlaldx(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smlsd(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smlsdx(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlsld(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlsldx(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smuad(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smuad(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smuadx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smuadx(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smusd(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smusd(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smusdx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smusdx(__a, __b); +} +#endif + +/* 9.7 CRC32 intrinsics */ +#if __ARM_FEATURE_CRC32 +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32b(uint32_t __a, uint8_t __b) { + return __builtin_arm_crc32b(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32h(uint32_t __a, uint16_t __b) { + return __builtin_arm_crc32h(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32w(uint32_t __a, uint32_t __b) { + return __builtin_arm_crc32w(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32d(uint32_t __a, uint64_t __b) { + return __builtin_arm_crc32d(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32cb(uint32_t __a, uint8_t __b) { + return __builtin_arm_crc32cb(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32ch(uint32_t __a, uint16_t __b) { + return __builtin_arm_crc32ch(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32cw(uint32_t __a, uint32_t __b) { + return __builtin_arm_crc32cw(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__crc32cd(uint32_t __a, uint64_t __b) { + return __builtin_arm_crc32cd(__a, __b); +} +#endif + +/* Armv8.3-A Javascript conversion intrinsic */ +#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__jcvt(double __a) { + return __builtin_arm_jcvt(__a); +} +#endif + +/* Armv8.5-A FP rounding intrinsics */ +#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT) +static __inline__ float __attribute__((__always_inline__, __nodebug__)) +__frint32zf(float __a) { + return __builtin_arm_frint32zf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__)) +__frint32z(double __a) { + return __builtin_arm_frint32z(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__)) +__frint64zf(float __a) { + return __builtin_arm_frint64zf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__)) +__frint64z(double __a) { + return __builtin_arm_frint64z(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__)) +__frint32xf(float __a) { + return __builtin_arm_frint32xf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__)) +__frint32x(double __a) { + return __builtin_arm_frint32x(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__)) +__frint64xf(float __a) { + return __builtin_arm_frint64xf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__)) +__frint64x(double __a) { + return __builtin_arm_frint64x(__a); +} +#endif + +/* Armv8.7-A load/store 64-byte intrinsics */ +#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64) +typedef struct { + uint64_t val[8]; +} data512_t; + +static __inline__ data512_t __attribute__((__always_inline__, __nodebug__)) +__arm_ld64b(const void *__addr) { + data512_t __value; + __builtin_arm_ld64b(__addr, __value.val); + return __value; +} +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +__arm_st64b(void *__addr, data512_t __value) { + __builtin_arm_st64b(__addr, __value.val); +} +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__arm_st64bv(void *__addr, data512_t __value) { + return __builtin_arm_st64bv(__addr, __value.val); +} +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__arm_st64bv0(void *__addr, data512_t __value) { + return __builtin_arm_st64bv0(__addr, __value.val); +} +#endif + +/* 10.1 Special register intrinsics */ +#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg) +#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg) +#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg) +#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg)) +#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg)) +#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v) +#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v) +#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v) +#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v)) +#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) + +/* Memory Tagging Extensions (MTE) Intrinsics */ +#if __ARM_FEATURE_MEMORY_TAGGING +#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) +#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) +#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded) +#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr) +#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) +#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) +#endif + +/* Transactional Memory Extension (TME) Intrinsics */ +#if __ARM_FEATURE_TME + +#define _TMFAILURE_REASON 0x00007fffu +#define _TMFAILURE_RTRY 0x00008000u +#define _TMFAILURE_CNCL 0x00010000u +#define _TMFAILURE_MEM 0x00020000u +#define _TMFAILURE_IMP 0x00040000u +#define _TMFAILURE_ERR 0x00080000u +#define _TMFAILURE_SIZE 0x00100000u +#define _TMFAILURE_NEST 0x00200000u +#define _TMFAILURE_DBG 0x00400000u +#define _TMFAILURE_INT 0x00800000u +#define _TMFAILURE_TRIVIAL 0x01000000u + +#define __tstart() __builtin_arm_tstart() +#define __tcommit() __builtin_arm_tcommit() +#define __tcancel(__arg) __builtin_arm_tcancel(__arg) +#define __ttest() __builtin_arm_ttest() + +#endif /* __ARM_FEATURE_TME */ + +/* Armv8.5-A Random number generation intrinsics */ +#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG) +static __inline__ int __attribute__((__always_inline__, __nodebug__)) +__rndr(uint64_t *__p) { + return __builtin_arm_rndr(__p); +} +static __inline__ int __attribute__((__always_inline__, __nodebug__)) +__rndrrs(uint64_t *__p) { + return __builtin_arm_rndrrs(__p); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* __ARM_ACLE_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_bf16.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_bf16.h new file mode 100644 index 0000000..329ae39 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_bf16.h @@ -0,0 +1,20 @@ +/*===---- arm_bf16.h - ARM BF16 intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_BF16_H +#define __ARM_BF16_H + +typedef __bf16 bfloat16_t; +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + + +#undef __ai + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_cde.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_cde.h new file mode 100644 index 0000000..4ad5d82 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_cde.h @@ -0,0 +1,410 @@ +/*===---- arm_cde.h - ARM CDE intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_CDE_H +#define __ARM_CDE_H + +#if !__ARM_FEATURE_CDE +#error "CDE support not enabled" +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1))) +uint32_t __arm_cx1(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1a))) +uint32_t __arm_cx1a(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1d))) +uint64_t __arm_cx1d(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1da))) +uint64_t __arm_cx1da(int, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2))) +uint32_t __arm_cx2(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2a))) +uint32_t __arm_cx2a(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2d))) +uint64_t __arm_cx2d(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2da))) +uint64_t __arm_cx2da(int, uint64_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3))) +uint32_t __arm_cx3(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3a))) +uint32_t __arm_cx3a(int, uint32_t, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3d))) +uint64_t __arm_cx3d(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3da))) +uint64_t __arm_cx3da(int, uint64_t, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1_u32))) +uint32_t __arm_vcx1_u32(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1a_u32))) +uint32_t __arm_vcx1a_u32(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1d_u64))) +uint64_t __arm_vcx1d_u64(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1da_u64))) +uint64_t __arm_vcx1da_u64(int, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2_u32))) +uint32_t __arm_vcx2_u32(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2a_u32))) +uint32_t __arm_vcx2a_u32(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2d_u64))) +uint64_t __arm_vcx2d_u64(int, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2da_u64))) +uint64_t __arm_vcx2da_u64(int, uint64_t, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3_u32))) +uint32_t __arm_vcx3_u32(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3a_u32))) +uint32_t __arm_vcx3a_u32(int, uint32_t, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3d_u64))) +uint64_t __arm_vcx3d_u64(int, uint64_t, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3da_u64))) +uint64_t __arm_vcx3da_u64(int, uint64_t, uint64_t, uint64_t, uint32_t); + +#if __ARM_FEATURE_MVE + +typedef uint16_t mve_pred16_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t; + +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s16))) +int16x8_t __arm_vcx1q_m(int, int16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s32))) +int32x4_t __arm_vcx1q_m(int, int32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s64))) +int64x2_t __arm_vcx1q_m(int, int64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s8))) +int8x16_t __arm_vcx1q_m(int, int8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u16))) +uint16x8_t __arm_vcx1q_m(int, uint16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u32))) +uint32x4_t __arm_vcx1q_m(int, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u64))) +uint64x2_t __arm_vcx1q_m(int, uint64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u8))) +uint8x16_t __arm_vcx1q_m(int, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_u8))) +uint8x16_t __arm_vcx1q_u8(int, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s16))) +int16x8_t __arm_vcx1qa_m(int, int16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s32))) +int32x4_t __arm_vcx1qa_m(int, int32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s64))) +int64x2_t __arm_vcx1qa_m(int, int64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s8))) +int8x16_t __arm_vcx1qa_m(int, int8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u16))) +uint16x8_t __arm_vcx1qa_m(int, uint16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u32))) +uint32x4_t __arm_vcx1qa_m(int, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u64))) +uint64x2_t __arm_vcx1qa_m(int, uint64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u8))) +uint8x16_t __arm_vcx1qa_m(int, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s16))) +int16x8_t __arm_vcx1qa(int, int16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s32))) +int32x4_t __arm_vcx1qa(int, int32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s64))) +int64x2_t __arm_vcx1qa(int, int64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s8))) +int8x16_t __arm_vcx1qa(int, int8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u16))) +uint16x8_t __arm_vcx1qa(int, uint16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u32))) +uint32x4_t __arm_vcx1qa(int, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u64))) +uint64x2_t __arm_vcx1qa(int, uint64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u8))) +uint8x16_t __arm_vcx1qa(int, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s16))) +int16x8_t __arm_vcx2q_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s32))) +int32x4_t __arm_vcx2q_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s64))) +int64x2_t __arm_vcx2q_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s8))) +int8x16_t __arm_vcx2q_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u16))) +uint16x8_t __arm_vcx2q_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u32))) +uint32x4_t __arm_vcx2q_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u64))) +uint64x2_t __arm_vcx2q_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u8))) +uint8x16_t __arm_vcx2q_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s16))) +int16x8_t __arm_vcx2q(int, int16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s32))) +int32x4_t __arm_vcx2q(int, int32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s64))) +int64x2_t __arm_vcx2q(int, int64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s8))) +int8x16_t __arm_vcx2q(int, int8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u16))) +uint16x8_t __arm_vcx2q(int, uint16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u32))) +uint32x4_t __arm_vcx2q(int, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u64))) +uint64x2_t __arm_vcx2q(int, uint64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8))) +uint8x16_t __arm_vcx2q(int, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s16))) +uint8x16_t __arm_vcx2q_u8(int, int16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s32))) +uint8x16_t __arm_vcx2q_u8(int, int32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s64))) +uint8x16_t __arm_vcx2q_u8(int, int64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s8))) +uint8x16_t __arm_vcx2q_u8(int, int8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u16))) +uint8x16_t __arm_vcx2q_u8(int, uint16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u32))) +uint8x16_t __arm_vcx2q_u8(int, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u64))) +uint8x16_t __arm_vcx2q_u8(int, uint64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u8))) +uint8x16_t __arm_vcx2q_u8(int, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s16))) +int16x8_t __arm_vcx2qa_impl(int, int16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s32))) +int32x4_t __arm_vcx2qa_impl(int, int32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s64))) +int64x2_t __arm_vcx2qa_impl(int, int64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s8))) +int8x16_t __arm_vcx2qa_impl(int, int8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u16))) +uint16x8_t __arm_vcx2qa_impl(int, uint16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u32))) +uint32x4_t __arm_vcx2qa_impl(int, uint32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u64))) +uint64x2_t __arm_vcx2qa_impl(int, uint64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u8))) +uint8x16_t __arm_vcx2qa_impl(int, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s16))) +int16x8_t __arm_vcx2qa_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s32))) +int32x4_t __arm_vcx2qa_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s64))) +int64x2_t __arm_vcx2qa_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s8))) +int8x16_t __arm_vcx2qa_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u16))) +uint16x8_t __arm_vcx2qa_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u32))) +uint32x4_t __arm_vcx2qa_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u64))) +uint64x2_t __arm_vcx2qa_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u8))) +uint8x16_t __arm_vcx2qa_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s16))) +int16x8_t __arm_vcx3q_impl(int, int16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s32))) +int32x4_t __arm_vcx3q_impl(int, int32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s64))) +int64x2_t __arm_vcx3q_impl(int, int64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s8))) +int8x16_t __arm_vcx3q_impl(int, int8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u16))) +uint16x8_t __arm_vcx3q_impl(int, uint16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u32))) +uint32x4_t __arm_vcx3q_impl(int, uint32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u64))) +uint64x2_t __arm_vcx3q_impl(int, uint64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u8))) +uint8x16_t __arm_vcx3q_impl(int, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s16))) +int16x8_t __arm_vcx3q_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s32))) +int32x4_t __arm_vcx3q_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s64))) +int64x2_t __arm_vcx3q_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s8))) +int8x16_t __arm_vcx3q_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u16))) +uint16x8_t __arm_vcx3q_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u32))) +uint32x4_t __arm_vcx3q_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u64))) +uint64x2_t __arm_vcx3q_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u8))) +uint8x16_t __arm_vcx3q_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s16))) +uint8x16_t __arm_vcx3q_u8_impl(int, int16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s32))) +uint8x16_t __arm_vcx3q_u8_impl(int, int32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s64))) +uint8x16_t __arm_vcx3q_u8_impl(int, int64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s8))) +uint8x16_t __arm_vcx3q_u8_impl(int, int8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u16))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u32))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u64))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u8))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s16))) +int16x8_t __arm_vcx3qa_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s32))) +int32x4_t __arm_vcx3qa_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s64))) +int64x2_t __arm_vcx3qa_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s8))) +int8x16_t __arm_vcx3qa_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u16))) +uint16x8_t __arm_vcx3qa_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u32))) +uint32x4_t __arm_vcx3qa_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u64))) +uint64x2_t __arm_vcx3qa_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u8))) +uint8x16_t __arm_vcx3qa_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s16))) +int16x8_t __arm_vcx3qa_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s32))) +int32x4_t __arm_vcx3qa_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s64))) +int64x2_t __arm_vcx3qa_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s8))) +int8x16_t __arm_vcx3qa_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u16))) +uint16x8_t __arm_vcx3qa_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u32))) +uint32x4_t __arm_vcx3qa_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u64))) +uint64x2_t __arm_vcx3qa_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u8))) +uint8x16_t __arm_vcx3qa_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t __arm_vreinterpretq_u8(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t __arm_vreinterpretq_u8(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t __arm_vreinterpretq_u8(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t __arm_vreinterpretq_u8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t __arm_vreinterpretq_u8(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t __arm_vreinterpretq_u8(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t __arm_vreinterpretq_u8(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vreinterpretq_u8_u8))) +uint8x16_t __arm_vreinterpretq_u8(uint8x16_t); +#define __arm_vcx2q_m(cp, inactive, n, imm, pred) __arm_vcx2q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), (imm), (pred)) +#define __arm_vcx2qa(cp, acc, n, imm) __arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm)) +#define __arm_vcx2qa_m(cp, acc, n, imm, pred) __arm_vcx2qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm), (pred)) +#define __arm_vcx3q(cp, n, m, imm) __arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm)) +#define __arm_vcx3q_m(cp, inactive, n, m, imm, pred) __arm_vcx3q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred)) +#define __arm_vcx3q_u8(cp, n, m, imm) __arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm)) +#define __arm_vcx3qa(cp, acc, n, m, imm) __arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm)) +#define __arm_vcx3qa_m(cp, acc, n, m, imm, pred) __arm_vcx3qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred)) + +#endif /* __ARM_FEATURE_MVE */ + +#if __ARM_FEATURE_MVE & 2 + +typedef __fp16 float16_t; +typedef float float32_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t; + +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f16))) +float16x8_t __arm_vcx1q_m(int, float16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f32))) +float32x4_t __arm_vcx1q_m(int, float32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f16))) +float16x8_t __arm_vcx1qa(int, float16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f32))) +float32x4_t __arm_vcx1qa(int, float32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f16))) +float16x8_t __arm_vcx1qa_m(int, float16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f32))) +float32x4_t __arm_vcx1qa_m(int, float32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f16))) +float16x8_t __arm_vcx2q(int, float16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f32))) +float32x4_t __arm_vcx2q(int, float32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f16))) +float16x8_t __arm_vcx2q_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f32))) +float32x4_t __arm_vcx2q_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f16))) +uint8x16_t __arm_vcx2q_u8(int, float16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f32))) +uint8x16_t __arm_vcx2q_u8(int, float32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f16))) +float16x8_t __arm_vcx2qa_impl(int, float16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f32))) +float32x4_t __arm_vcx2qa_impl(int, float32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f16))) +float16x8_t __arm_vcx2qa_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f32))) +float32x4_t __arm_vcx2qa_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f16))) +float16x8_t __arm_vcx3q_impl(int, float16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f32))) +float32x4_t __arm_vcx3q_impl(int, float32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f16))) +float16x8_t __arm_vcx3q_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f32))) +float32x4_t __arm_vcx3q_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f16))) +uint8x16_t __arm_vcx3q_u8_impl(int, float16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f32))) +uint8x16_t __arm_vcx3q_u8_impl(int, float32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f16))) +float16x8_t __arm_vcx3qa_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f32))) +float32x4_t __arm_vcx3qa_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f16))) +float16x8_t __arm_vcx3qa_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f32))) +float32x4_t __arm_vcx3qa_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t __arm_vreinterpretq_u8(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t __arm_vreinterpretq_u8(float32x4_t); + +#endif /* __ARM_FEATURE_MVE & 2 */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __ARM_CDE_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_cmse.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_cmse.h new file mode 100644 index 0000000..ecf50ec --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_cmse.h @@ -0,0 +1,217 @@ +//===---- arm_cmse.h - Arm CMSE support -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __ARM_CMSE_H +#define __ARM_CMSE_H + +#if (__ARM_FEATURE_CMSE & 0x1) +#include +#include + +#define __ARM_CMSE_SECURE_MODE (__ARM_FEATURE_CMSE & 0x2) +#define CMSE_MPU_READWRITE 1 /* checks if readwrite_ok field is set */ +#define CMSE_AU_NONSECURE 2 /* checks if permissions have secure field unset */ +#define CMSE_MPU_UNPRIV 4 /* sets T flag on TT insrtuction */ +#define CMSE_MPU_READ 8 /* checks if read_ok field is set */ +#define CMSE_MPU_NONSECURE 16 /* sets A flag, checks if secure field unset */ +#define CMSE_NONSECURE (CMSE_AU_NONSECURE | CMSE_MPU_NONSECURE) + +#define cmse_check_pointed_object(p, f) \ + cmse_check_address_range((p), sizeof(*(p)), (f)) + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef union { + struct cmse_address_info { +#ifdef __ARM_BIG_ENDIAN + /* __ARM_BIG_ENDIAN */ +#if (__ARM_CMSE_SECURE_MODE) + unsigned idau_region : 8; + unsigned idau_region_valid : 1; + unsigned secure : 1; + unsigned nonsecure_readwrite_ok : 1; + unsigned nonsecure_read_ok : 1; +#else + unsigned : 12; +#endif + unsigned readwrite_ok : 1; + unsigned read_ok : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region_valid : 1; +#else + unsigned : 1; +#endif + unsigned mpu_region_valid : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region : 8; +#else + unsigned : 8; +#endif + unsigned mpu_region : 8; + +#else /* __ARM_LITTLE_ENDIAN */ + unsigned mpu_region : 8; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region : 8; +#else + unsigned : 8; +#endif + unsigned mpu_region_valid : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region_valid : 1; +#else + unsigned : 1; +#endif + unsigned read_ok : 1; + unsigned readwrite_ok : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned nonsecure_read_ok : 1; + unsigned nonsecure_readwrite_ok : 1; + unsigned secure : 1; + unsigned idau_region_valid : 1; + unsigned idau_region : 8; +#else + unsigned : 12; +#endif +#endif /*__ARM_LITTLE_ENDIAN */ + } flags; + unsigned value; +} cmse_address_info_t; + +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TT(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TT(__p); + return __u; +} +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TTT(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TTT(__p); + return __u; +} + +#if __ARM_CMSE_SECURE_MODE +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TTA(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TTA(__p); + return __u; +} +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TTAT(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TTAT(__p); + return __u; +} +#endif + +#define cmse_TT_fptr(p) cmse_TT(__builtin_bit_cast(void *, (p))) +#define cmse_TTT_fptr(p) cmse_TTT(__builtin_bit_cast(void *, (p))) + +#if __ARM_CMSE_SECURE_MODE +#define cmse_TTA_fptr(p) cmse_TTA(__builtin_bit_cast(void *, (p))) +#define cmse_TTAT_fptr(p) cmse_TTAT(__builtin_bit_cast(void *, (p))) +#endif + +static void *__attribute__((__always_inline__)) +cmse_check_address_range(void *__pb, size_t __s, int __flags) { + uintptr_t __begin = (uintptr_t)__pb; + uintptr_t __end = __begin + __s - 1; + + if (__end < __begin) + return NULL; /* wrap around check */ + + /* Check whether the range crosses a 32-bytes aligned address */ + const int __single_check = (__begin ^ __end) < 0x20u; + + /* execute the right variant of the TT instructions */ + void *__pe = (void *)__end; + cmse_address_info_t __permb, __perme; + switch (__flags & (CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE)) { + case 0: + __permb = cmse_TT(__pb); + __perme = __single_check ? __permb : cmse_TT(__pe); + break; + case CMSE_MPU_UNPRIV: + __permb = cmse_TTT(__pb); + __perme = __single_check ? __permb : cmse_TTT(__pe); + break; +#if __ARM_CMSE_SECURE_MODE + case CMSE_MPU_NONSECURE: + __permb = cmse_TTA(__pb); + __perme = __single_check ? __permb : cmse_TTA(__pe); + break; + case CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE: + __permb = cmse_TTAT(__pb); + __perme = __single_check ? __permb : cmse_TTAT(__pe); + break; +#endif + /* if CMSE_NONSECURE is specified w/o __ARM_CMSE_SECURE_MODE */ + default: + return NULL; + } + + /* check that the range does not cross MPU, SAU, or IDAU region boundaries */ + if (__permb.value != __perme.value) + return NULL; +#if !(__ARM_CMSE_SECURE_MODE) + /* CMSE_AU_NONSECURE is only supported when __ARM_FEATURE_CMSE & 0x2 */ + if (__flags & CMSE_AU_NONSECURE) + return NULL; +#endif + + /* check the permission on the range */ + switch (__flags & ~(CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE)) { +#if (__ARM_CMSE_SECURE_MODE) + case CMSE_MPU_READ | CMSE_MPU_READWRITE | CMSE_AU_NONSECURE: + case CMSE_MPU_READWRITE | CMSE_AU_NONSECURE: + return __permb.flags.nonsecure_readwrite_ok ? __pb : NULL; + + case CMSE_MPU_READ | CMSE_AU_NONSECURE: + return __permb.flags.nonsecure_read_ok ? __pb : NULL; + + case CMSE_AU_NONSECURE: + return __permb.flags.secure ? NULL : __pb; +#endif + case CMSE_MPU_READ | CMSE_MPU_READWRITE: + case CMSE_MPU_READWRITE: + return __permb.flags.readwrite_ok ? __pb : NULL; + + case CMSE_MPU_READ: + return __permb.flags.read_ok ? __pb : NULL; + + default: + return NULL; + } +} + +#if __ARM_CMSE_SECURE_MODE +static int __attribute__((__always_inline__, __nodebug__)) +cmse_nonsecure_caller(void) { + return !((uintptr_t)__builtin_return_address(0) & 1); +} + +#define cmse_nsfptr_create(p) \ + __builtin_bit_cast(__typeof__(p), \ + (__builtin_bit_cast(uintptr_t, p) & ~(uintptr_t)1)) + +#define cmse_is_nsfptr(p) ((__builtin_bit_cast(uintptr_t, p) & 1) == 0) + +#endif /* __ARM_CMSE_SECURE_MODE */ + +void __attribute__((__noreturn__)) cmse_abort(void); +#if defined(__cplusplus) +} +#endif + +#endif /* (__ARM_FEATURE_CMSE & 0x1) */ + +#endif /* __ARM_CMSE_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_fp16.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_fp16.h new file mode 100644 index 0000000..ce993ce --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_fp16.h @@ -0,0 +1,596 @@ +/*===---- arm_fp16.h - ARM FP16 intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_FP16_H +#define __ARM_FP16_H + +#include + +typedef __fp16 float16_t; +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__) +#define vabdh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \ + __ret; \ +}) +#define vabsh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \ + __ret; \ +}) +#define vaddh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcageh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcagth_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \ + __ret; \ +}) +#define vcaleh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcalth_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \ + __ret; \ +}) +#define vceqh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \ + __ret; \ +}) +#define vceqzh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \ + __ret; \ +}) +#define vcgeh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcgezh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \ + __ret; \ +}) +#define vcgth_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \ + __ret; \ +}) +#define vcgtzh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \ + __ret; \ +}) +#define vcleh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \ + __ret; \ +}) +#define vclezh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \ + __ret; \ +}) +#define vclth_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \ + __ret; \ +}) +#define vcltzh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \ + __ret; \ +}) +#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_s16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \ + __ret; \ +}) +#define vcvth_s32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \ + __ret; \ +}) +#define vcvth_s64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \ + __ret; \ +}) +#define vcvth_u16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \ + __ret; \ +}) +#define vcvth_u32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \ + __ret; \ +}) +#define vcvth_u64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \ + __ret; \ +}) +#define vcvtah_s16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtah_s32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtah_s64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtah_u16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtah_u32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtah_u64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \ + __ret; \ +}) +#define vcvth_f16_u16(__p0) __extension__ ({ \ + uint16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \ + __ret; \ +}) +#define vcvth_f16_s16(__p0) __extension__ ({ \ + int16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \ + __ret; \ +}) +#define vcvth_f16_u32(__p0) __extension__ ({ \ + uint32_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \ + __ret; \ +}) +#define vcvth_f16_s32(__p0) __extension__ ({ \ + int32_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \ + __ret; \ +}) +#define vcvth_f16_u64(__p0) __extension__ ({ \ + uint64_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \ + __ret; \ +}) +#define vcvth_f16_s64(__p0) __extension__ ({ \ + int64_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \ + __ret; \ +}) +#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \ + __ret; \ +}) +#define vcvtmh_s16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_s32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_s64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_u16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_u32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_u64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_s16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_s32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_s64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_u16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_u32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_u64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \ + __ret; \ +}) +#define vcvtph_s16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtph_s32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtph_s64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtph_u16_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtph_u32_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtph_u64_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \ + __ret; \ +}) +#define vdivh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \ + __ret; \ +}) +#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \ + __ret; \ +}) +#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \ + __ret; \ +}) +#define vmaxh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \ + __ret; \ +}) +#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \ + __ret; \ +}) +#define vminh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \ + __ret; \ +}) +#define vminnmh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \ + __ret; \ +}) +#define vmulh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \ + __ret; \ +}) +#define vmulxh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \ + __ret; \ +}) +#define vnegh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \ + __ret; \ +}) +#define vrecpeh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \ + __ret; \ +}) +#define vrecpsh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \ + __ret; \ +}) +#define vrecpxh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \ + __ret; \ +}) +#define vrndh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \ + __ret; \ +}) +#define vrndah_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \ + __ret; \ +}) +#define vrndih_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \ + __ret; \ +}) +#define vrndmh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \ + __ret; \ +}) +#define vrndnh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \ + __ret; \ +}) +#define vrndph_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \ + __ret; \ +}) +#define vrndxh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \ + __ret; \ +}) +#define vrsqrteh_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \ + __ret; \ +}) +#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \ + __ret; \ +}) +#define vsqrth_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \ + __ret; \ +}) +#define vsubh_f16(__p0, __p1) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \ + __ret; \ +}) +#endif + +#undef __ai + +#endif /* __ARM_FP16_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_mve.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_mve.h new file mode 100644 index 0000000..4da41dc --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_mve.h @@ -0,0 +1,19187 @@ +/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_MVE_H +#define __ARM_MVE_H + +#if !__ARM_FEATURE_MVE +#error "MVE support not enabled" +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint16_t mve_pred16_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t; +typedef struct { int16x8_t val[2]; } int16x8x2_t; +typedef struct { int16x8_t val[4]; } int16x8x4_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t; +typedef struct { int32x4_t val[2]; } int32x4x2_t; +typedef struct { int32x4_t val[4]; } int32x4x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t; +typedef struct { int64x2_t val[2]; } int64x2x2_t; +typedef struct { int64x2_t val[4]; } int64x2x4_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t; +typedef struct { int8x16_t val[2]; } int8x16x2_t; +typedef struct { int8x16_t val[4]; } int8x16x4_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t; +typedef struct { uint16x8_t val[2]; } uint16x8x2_t; +typedef struct { uint16x8_t val[4]; } uint16x8x4_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t; +typedef struct { uint32x4_t val[2]; } uint32x4x2_t; +typedef struct { uint32x4_t val[4]; } uint32x4x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t; +typedef struct { uint64x2_t val[2]; } uint64x2x2_t; +typedef struct { uint64x2_t val[4]; } uint64x2x4_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t; +typedef struct { uint8x16_t val[2]; } uint8x16x2_t; +typedef struct { uint8x16_t val[4]; } uint8x16x4_t; + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl))) +int64_t __arm_asrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll))) +uint64_t __arm_lsll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr))) +int32_t __arm_sqrshr(int32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl))) +int64_t __arm_sqrshrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48))) +int64_t __arm_sqrshrl_sat48(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl))) +int32_t __arm_sqshl(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll))) +int64_t __arm_sqshll(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr))) +int32_t __arm_srshr(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl))) +int64_t __arm_srshrl(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl))) +uint32_t __arm_uqrshl(uint32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll))) +uint64_t __arm_uqrshll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48))) +uint64_t __arm_uqrshll_sat48(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl))) +uint32_t __arm_uqshl(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll))) +uint64_t __arm_uqshll(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr))) +uint32_t __arm_urshr(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl))) +uint64_t __arm_urshrl(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t __arm_vabdq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t __arm_vabdq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t __arm_vabdq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t __arm_vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t __arm_vabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t __arm_vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t __arm_vabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t __arm_vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t __arm_vabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t __arm_vabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t __arm_vabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t __arm_vabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t __arm_vabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t __arm_vabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t __arm_vabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t __arm_vabsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t __arm_vabsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t __arm_vabsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t __arm_vabsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t __arm_vabsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t __arm_vabsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t __arm_vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t __arm_vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t __arm_vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t __arm_vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t __arm_vaddlvaq_s32(int64_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t __arm_vaddlvaq(int64_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t __arm_vaddlvaq_u32(uint64_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t __arm_vaddlvaq(uint64_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t __arm_vaddlvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t __arm_vaddlvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t __arm_vaddlvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t __arm_vaddlvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t __arm_vaddlvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t __arm_vaddlvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t __arm_vaddlvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t __arm_vaddlvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t __arm_vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t __arm_vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t __arm_vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t __arm_vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t __arm_vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t __arm_vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t __arm_vaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t __arm_vaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t __arm_vaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t __arm_vaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t __arm_vaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t __arm_vaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t __arm_vaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t __arm_vaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t __arm_vaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t __arm_vaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t __arm_vaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t __arm_vaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t __arm_vaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t __arm_vaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t __arm_vaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t __arm_vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t __arm_vaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t __arm_vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t __arm_vaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t __arm_vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t __arm_vaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t __arm_vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t __arm_vaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t __arm_vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t __arm_vaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t __arm_vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t __arm_vaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t __arm_vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t __arm_vaddvaq_p(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t __arm_vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t __arm_vaddvaq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t __arm_vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t __arm_vaddvaq_p(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t __arm_vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t __arm_vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t __arm_vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t __arm_vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t __arm_vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t __arm_vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t __arm_vaddvaq_s16(int32_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t __arm_vaddvaq(int32_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t __arm_vaddvaq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t __arm_vaddvaq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t __arm_vaddvaq_s8(int32_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t __arm_vaddvaq(int32_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t __arm_vaddvaq_u16(uint32_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t __arm_vaddvaq(uint32_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t __arm_vaddvaq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t __arm_vaddvaq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t __arm_vaddvaq_u8(uint32_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t __arm_vaddvaq(uint32_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t __arm_vaddvq_p_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t __arm_vaddvq_p(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t __arm_vaddvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t __arm_vaddvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t __arm_vaddvq_p_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t __arm_vaddvq_p(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t __arm_vaddvq_p_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t __arm_vaddvq_p(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t __arm_vaddvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t __arm_vaddvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t __arm_vaddvq_p_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t __arm_vaddvq_p(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t __arm_vaddvq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t __arm_vaddvq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t __arm_vaddvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t __arm_vaddvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t __arm_vaddvq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t __arm_vaddvq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t __arm_vaddvq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t __arm_vaddvq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t __arm_vaddvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t __arm_vaddvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t __arm_vaddvq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t __arm_vaddvq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t __arm_vandq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t __arm_vandq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t __arm_vandq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t __arm_vbicq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t __arm_vbicq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t __arm_vbicq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t __arm_vbicq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t __arm_vbicq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t __arm_vbicq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t __arm_vbicq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t __arm_vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t __arm_vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t __arm_vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t __arm_vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t __arm_vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t __arm_vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t __arm_vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t __arm_vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t __arm_vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t __arm_vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t __arm_vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t __arm_vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t __arm_vbrsrq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t __arm_vbrsrq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t __arm_vbrsrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t __arm_vbrsrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t __arm_vbrsrq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t __arm_vbrsrq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t __arm_vbrsrq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t __arm_vbrsrq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t __arm_vbrsrq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t __arm_vbrsrq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t __arm_vbrsrq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t __arm_vbrsrq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t __arm_vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t __arm_vbrsrq_x(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t __arm_vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t __arm_vbrsrq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t __arm_vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t __arm_vbrsrq_x(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t __arm_vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t __arm_vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t __arm_vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t __arm_vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t __arm_vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t __arm_vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t __arm_vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t __arm_vclsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t __arm_vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t __arm_vclsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t __arm_vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t __arm_vclsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t __arm_vclsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t __arm_vclsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t __arm_vclsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t __arm_vclsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t __arm_vclsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t __arm_vclsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t __arm_vclsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t __arm_vclsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t __arm_vclsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t __arm_vclsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t __arm_vclsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t __arm_vclsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t __arm_vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t __arm_vclzq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t __arm_vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t __arm_vclzq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t __arm_vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t __arm_vclzq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t __arm_vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t __arm_vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t __arm_vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t __arm_vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t __arm_vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t __arm_vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t __arm_vclzq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t __arm_vclzq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t __arm_vclzq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t __arm_vclzq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t __arm_vclzq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t __arm_vclzq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t __arm_vclzq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t __arm_vclzq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t __arm_vclzq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t __arm_vclzq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t __arm_vclzq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t __arm_vclzq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t __arm_vclzq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t __arm_vclzq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t __arm_vclzq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t __arm_vclzq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t __arm_vclzq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t __arm_vclzq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t __arm_vclzq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t __arm_vclzq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t __arm_vclzq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t __arm_vclzq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t __arm_vclzq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t __arm_vclzq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16))) +int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32))) +int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64))) +int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8))) +int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16))) +uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32))) +uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64))) +uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8))) +uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q))) +mve_pred16_t __arm_vctp16q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m))) +mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q))) +mve_pred16_t __arm_vctp32q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m))) +mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q))) +mve_pred16_t __arm_vctp64q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m))) +mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q))) +mve_pred16_t __arm_vctp8q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m))) +mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t __arm_vddupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t __arm_vddupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t __arm_vddupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t __arm_vddupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t __arm_vddupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t __arm_vddupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t __arm_vddupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t __arm_vddupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t __arm_vddupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16))) +int16x8_t __arm_vdupq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32))) +int32x4_t __arm_vdupq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8))) +int8x16_t __arm_vdupq_n_s8(int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16))) +uint16x8_t __arm_vdupq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32))) +uint32x4_t __arm_vdupq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8))) +uint8x16_t __arm_vdupq_n_u8(uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16))) +int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32))) +int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8))) +int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16))) +uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32))) +uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8))) +uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t __arm_veorq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t __arm_veorq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t __arm_veorq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t __arm_vgetq_lane_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t __arm_vgetq_lane(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t __arm_vgetq_lane_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t __arm_vgetq_lane(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t __arm_vgetq_lane_s64(int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t __arm_vgetq_lane(int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t __arm_vgetq_lane_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t __arm_vgetq_lane(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t __arm_vgetq_lane_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t __arm_vgetq_lane(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t __arm_vgetq_lane_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t __arm_vgetq_lane(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t __arm_vgetq_lane_u64(uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t __arm_vgetq_lane(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t __arm_vgetq_lane_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t __arm_vgetq_lane(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t __arm_vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t __arm_vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t __arm_vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t __arm_vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t __arm_vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t __arm_vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t __arm_vhaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t __arm_vhaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t __arm_vhaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t __arm_vhaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t __arm_vhaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t __arm_vhaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t __arm_vhaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t __arm_vhaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t __arm_vhaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t __arm_vhaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t __arm_vhaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t __arm_vhaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t __arm_vhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t __arm_vhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t __arm_vhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t __arm_vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t __arm_vhaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t __arm_vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t __arm_vhaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t __arm_vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t __arm_vhaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t __arm_vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t __arm_vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t __arm_vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t __arm_vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t __arm_vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t __arm_vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t __arm_vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t __arm_vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t __arm_vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t __arm_vhsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t __arm_vhsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t __arm_vhsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t __arm_vhsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t __arm_vhsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t __arm_vhsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t __arm_vhsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t __arm_vhsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t __arm_vhsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t __arm_vhsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t __arm_vhsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t __arm_vhsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t __arm_vhsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t __arm_vhsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t __arm_vhsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t __arm_vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t __arm_vhsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t __arm_vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t __arm_vhsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t __arm_vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t __arm_vhsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t __arm_vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t __arm_vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t __arm_vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t __arm_vidupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t __arm_vidupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t __arm_vidupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t __arm_vidupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t __arm_vidupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t __arm_vidupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t __arm_vidupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t __arm_vidupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t __arm_vidupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t __arm_vld1q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t __arm_vld1q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t __arm_vld1q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t __arm_vld1q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t __arm_vld1q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t __arm_vld1q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t __arm_vld1q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t __arm_vld1q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t __arm_vld1q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t __arm_vld1q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t __arm_vld1q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t __arm_vld1q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t __arm_vld2q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t __arm_vld2q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t __arm_vld2q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t __arm_vld2q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t __arm_vld2q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t __arm_vld2q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t __arm_vld2q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t __arm_vld2q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t __arm_vld2q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t __arm_vld2q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t __arm_vld2q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t __arm_vld2q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t __arm_vld4q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t __arm_vld4q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t __arm_vld4q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t __arm_vld4q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t __arm_vld4q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t __arm_vld4q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t __arm_vld4q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t __arm_vld4q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t __arm_vld4q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t __arm_vld4q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t __arm_vld4q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t __arm_vld4q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16))) +int16x8_t __arm_vldrbq_s16(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32))) +int32x4_t __arm_vldrbq_s32(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8))) +int8x16_t __arm_vldrbq_s8(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16))) +uint16x8_t __arm_vldrbq_u16(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32))) +uint32x4_t __arm_vldrbq_u32(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8))) +uint8x16_t __arm_vldrbq_u8(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16))) +int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32))) +int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8))) +int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16))) +uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32))) +uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8))) +uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64))) +int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64))) +uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64))) +int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64))) +uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64))) +int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64))) +uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64))) +int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64))) +uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16))) +int16x8_t __arm_vldrhq_s16(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32))) +int32x4_t __arm_vldrhq_s32(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16))) +uint16x8_t __arm_vldrhq_u16(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32))) +uint32x4_t __arm_vldrhq_u32(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16))) +int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32))) +int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16))) +uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32))) +uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32))) +int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32))) +uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32))) +int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32))) +uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32))) +int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32))) +uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32))) +int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32))) +uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32))) +int32x4_t __arm_vldrwq_s32(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32))) +uint32x4_t __arm_vldrwq_u32(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32))) +int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32))) +uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t __arm_vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t __arm_vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t __arm_vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t __arm_vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t __arm_vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t __arm_vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t __arm_vmaxavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t __arm_vmaxavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t __arm_vmaxavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t __arm_vmaxavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t __arm_vmaxavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t __arm_vmaxavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t __arm_vmaxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t __arm_vmaxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t __arm_vmaxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t __arm_vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t __arm_vmaxvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t __arm_vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t __arm_vmaxvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t __arm_vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t __arm_vmaxvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t __arm_vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t __arm_vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t __arm_vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t __arm_vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t __arm_vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t __arm_vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t __arm_vmaxvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t __arm_vmaxvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t __arm_vmaxvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t __arm_vmaxvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t __arm_vmaxvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t __arm_vmaxvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t __arm_vmaxvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t __arm_vmaxvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t __arm_vmaxvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t __arm_vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t __arm_vminavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t __arm_vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t __arm_vminavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t __arm_vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t __arm_vminavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t __arm_vminavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t __arm_vminavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t __arm_vminavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t __arm_vminavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t __arm_vminavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t __arm_vminavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t __arm_vminq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t __arm_vminq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t __arm_vminq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t __arm_vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t __arm_vminvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t __arm_vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t __arm_vminvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t __arm_vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t __arm_vminvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t __arm_vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t __arm_vminvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t __arm_vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t __arm_vminvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t __arm_vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t __arm_vminvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t __arm_vminvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t __arm_vminvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t __arm_vminvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t __arm_vminvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t __arm_vminvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t __arm_vminvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t __arm_vminvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t __arm_vminvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t __arm_vminvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t __arm_vmladavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t __arm_vmladavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t __arm_vmladavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t __arm_vmladavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t __arm_vmladavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t __arm_vmladavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t __arm_vmlaldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t __arm_vmlaldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t __arm_vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t __arm_vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t __arm_vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t __arm_vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t __arm_vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t __arm_vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t __arm_vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t __arm_vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t __arm_vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t __arm_vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t __arm_vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t __arm_vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t __arm_vmlaq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t __arm_vmlaq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t __arm_vmlaq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t __arm_vmlaq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t __arm_vmlaq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t __arm_vmlaq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t __arm_vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t __arm_vmlaq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t __arm_vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t __arm_vmlaq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t __arm_vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t __arm_vmlaq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t __arm_vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t __arm_vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t __arm_vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t __arm_vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t __arm_vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t __arm_vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t __arm_vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t __arm_vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t __arm_vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t __arm_vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t __arm_vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t __arm_vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t __arm_vmlasq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t __arm_vmlasq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t __arm_vmlasq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t __arm_vmlasq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t __arm_vmlasq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t __arm_vmlasq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t __arm_vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t __arm_vmlasq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t __arm_vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t __arm_vmlasq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t __arm_vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t __arm_vmlasq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t __arm_vmlsdavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t __arm_vmlsdavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t __arm_vmlsdavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t __arm_vmlsldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t __arm_vmlsldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t __arm_vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t __arm_vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t __arm_vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t __arm_vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t __arm_vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t __arm_vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t __arm_vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t __arm_vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t __arm_vmovlbq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t __arm_vmovlbq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t __arm_vmovlbq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t __arm_vmovlbq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t __arm_vmovlbq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t __arm_vmovlbq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t __arm_vmovlbq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t __arm_vmovlbq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t __arm_vmovlbq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t __arm_vmovlbq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t __arm_vmovlbq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t __arm_vmovlbq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t __arm_vmovlbq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t __arm_vmovlbq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t __arm_vmovlbq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t __arm_vmovlbq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t __arm_vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t __arm_vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t __arm_vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t __arm_vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t __arm_vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t __arm_vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t __arm_vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t __arm_vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t __arm_vmovltq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t __arm_vmovltq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t __arm_vmovltq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t __arm_vmovltq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t __arm_vmovltq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t __arm_vmovltq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t __arm_vmovltq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t __arm_vmovltq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t __arm_vmovltq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t __arm_vmovltq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t __arm_vmovltq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t __arm_vmovltq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t __arm_vmovltq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t __arm_vmovltq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t __arm_vmovltq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t __arm_vmovltq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t __arm_vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t __arm_vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t __arm_vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t __arm_vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t __arm_vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t __arm_vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t __arm_vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t __arm_vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t __arm_vmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t __arm_vmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t __arm_vmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t __arm_vmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t __arm_vmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t __arm_vmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t __arm_vmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t __arm_vmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t __arm_vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t __arm_vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t __arm_vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t __arm_vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t __arm_vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t __arm_vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t __arm_vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t __arm_vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t __arm_vmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t __arm_vmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t __arm_vmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t __arm_vmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t __arm_vmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t __arm_vmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t __arm_vmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t __arm_vmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t __arm_vmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t __arm_vmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t __arm_vmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t __arm_vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t __arm_vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t __arm_vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t __arm_vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t __arm_vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t __arm_vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t __arm_vmulq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t __arm_vmulq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t __arm_vmulq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t __arm_vmulq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t __arm_vmulq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t __arm_vmulq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t __arm_vmulq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t __arm_vmulq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t __arm_vmulq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t __arm_vmulq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t __arm_vmulq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t __arm_vmulq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t __arm_vmulq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t __arm_vmulq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t __arm_vmulq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t __arm_vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t __arm_vmulq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t __arm_vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t __arm_vmulq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t __arm_vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t __arm_vmulq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t __arm_vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t __arm_vmulq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t __arm_vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t __arm_vmulq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t __arm_vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t __arm_vmulq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t __arm_vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t __arm_vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t __arm_vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t __arm_vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t __arm_vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t __arm_vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t __arm_vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t __arm_vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t __arm_vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t __arm_vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16))) +int16x8_t __arm_vmvnq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32))) +int32x4_t __arm_vmvnq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16))) +uint16x8_t __arm_vmvnq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32))) +uint32x4_t __arm_vmvnq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t __arm_vmvnq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t __arm_vmvnq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t __arm_vmvnq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t __arm_vmvnq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t __arm_vmvnq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t __arm_vmvnq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t __arm_vmvnq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t __arm_vmvnq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t __arm_vmvnq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t __arm_vmvnq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t __arm_vmvnq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t __arm_vmvnq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16))) +int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32))) +int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16))) +uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32))) +uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t __arm_vmvnq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t __arm_vmvnq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t __arm_vmvnq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t __arm_vmvnq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t __arm_vmvnq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t __arm_vmvnq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t __arm_vmvnq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t __arm_vmvnq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t __arm_vmvnq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t __arm_vmvnq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t __arm_vmvnq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t __arm_vmvnq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t __arm_vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t __arm_vnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t __arm_vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t __arm_vnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t __arm_vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t __arm_vnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t __arm_vnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t __arm_vnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t __arm_vnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t __arm_vnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t __arm_vnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t __arm_vnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t __arm_vnegq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t __arm_vnegq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t __arm_vnegq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t __arm_vnegq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t __arm_vnegq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t __arm_vnegq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t __arm_vornq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t __arm_vornq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t __arm_vornq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t __arm_vorrq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t __arm_vorrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t __arm_vorrq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t __arm_vorrq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t __arm_vorrq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t __arm_vorrq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t __arm_vorrq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot))) +mve_pred16_t __arm_vpnot(mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t __arm_vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t __arm_vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t __arm_vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t __arm_vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t __arm_vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t __arm_vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t __arm_vqabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t __arm_vqabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t __arm_vqabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t __arm_vqabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t __arm_vqabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t __arm_vqabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t __arm_vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t __arm_vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t __arm_vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t __arm_vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t __arm_vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t __arm_vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t __arm_vqaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t __arm_vqaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t __arm_vqaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t __arm_vqaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t __arm_vqaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t __arm_vqaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t __arm_vqaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t __arm_vqaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t __arm_vqaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t __arm_vqaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t __arm_vqaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t __arm_vqaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t __arm_vqaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t __arm_vqaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t __arm_vqaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t __arm_vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t __arm_vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t __arm_vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t __arm_vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t __arm_vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t __arm_vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t __arm_vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t __arm_vqdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t __arm_vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t __arm_vqdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t __arm_vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t __arm_vqdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t __arm_vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t __arm_vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t __arm_vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t __arm_vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t __arm_vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t __arm_vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t __arm_vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t __arm_vqdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t __arm_vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t __arm_vqdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t __arm_vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t __arm_vqdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t __arm_vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t __arm_vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t __arm_vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t __arm_vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t __arm_vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t __arm_vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t __arm_vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t __arm_vqdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t __arm_vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t __arm_vqdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t __arm_vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t __arm_vqdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t __arm_vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t __arm_vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t __arm_vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t __arm_vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t __arm_vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t __arm_vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t __arm_vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t __arm_vqdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t __arm_vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t __arm_vqdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t __arm_vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t __arm_vqdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t __arm_vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t __arm_vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t __arm_vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t __arm_vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t __arm_vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t __arm_vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t __arm_vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t __arm_vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t __arm_vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t __arm_vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t __arm_vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t __arm_vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t __arm_vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t __arm_vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t __arm_vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t __arm_vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t __arm_vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t __arm_vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t __arm_vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t __arm_vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t __arm_vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t __arm_vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t __arm_vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t __arm_vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t __arm_vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t __arm_vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t __arm_vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t __arm_vqdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t __arm_vqdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t __arm_vqdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t __arm_vqdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t __arm_vqdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t __arm_vqdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t __arm_vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t __arm_vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t __arm_vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t __arm_vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t __arm_vqdmullbq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t __arm_vqdmullbq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t __arm_vqdmullbq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t __arm_vqdmullbq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t __arm_vqdmullbq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t __arm_vqdmullbq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t __arm_vqdmullbq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t __arm_vqdmullbq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t __arm_vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t __arm_vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t __arm_vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t __arm_vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t __arm_vqdmulltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t __arm_vqdmulltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t __arm_vqdmulltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t __arm_vqdmulltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t __arm_vqdmulltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t __arm_vqdmulltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t __arm_vqdmulltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t __arm_vqdmulltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t __arm_vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t __arm_vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t __arm_vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t __arm_vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t __arm_vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t __arm_vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t __arm_vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t __arm_vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t __arm_vqmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t __arm_vqmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t __arm_vqmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t __arm_vqmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t __arm_vqmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t __arm_vqmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t __arm_vqmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t __arm_vqmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t __arm_vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t __arm_vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t __arm_vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t __arm_vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t __arm_vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t __arm_vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t __arm_vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t __arm_vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t __arm_vqmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t __arm_vqmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t __arm_vqmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t __arm_vqmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t __arm_vqmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t __arm_vqmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t __arm_vqmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t __arm_vqmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t __arm_vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t __arm_vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t __arm_vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t __arm_vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t __arm_vqmovunbq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t __arm_vqmovunbq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t __arm_vqmovunbq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t __arm_vqmovunbq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t __arm_vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t __arm_vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t __arm_vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t __arm_vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t __arm_vqmovuntq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t __arm_vqmovuntq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t __arm_vqmovuntq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t __arm_vqmovuntq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t __arm_vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t __arm_vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t __arm_vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t __arm_vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t __arm_vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t __arm_vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t __arm_vqnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t __arm_vqnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t __arm_vqnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t __arm_vqnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t __arm_vqnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t __arm_vqnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t __arm_vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t __arm_vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t __arm_vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t __arm_vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t __arm_vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t __arm_vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t __arm_vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t __arm_vqrdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t __arm_vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t __arm_vqrdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t __arm_vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t __arm_vqrdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t __arm_vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t __arm_vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t __arm_vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t __arm_vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t __arm_vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t __arm_vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t __arm_vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t __arm_vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t __arm_vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t __arm_vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t __arm_vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t __arm_vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t __arm_vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t __arm_vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t __arm_vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t __arm_vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t __arm_vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t __arm_vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t __arm_vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t __arm_vqrdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t __arm_vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t __arm_vqrdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t __arm_vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t __arm_vqrdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t __arm_vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t __arm_vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t __arm_vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t __arm_vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t __arm_vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t __arm_vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t __arm_vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t __arm_vqrdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t __arm_vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t __arm_vqrdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t __arm_vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t __arm_vqrdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t __arm_vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t __arm_vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t __arm_vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t __arm_vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t __arm_vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t __arm_vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t __arm_vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t __arm_vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t __arm_vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t __arm_vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t __arm_vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t __arm_vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t __arm_vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t __arm_vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t __arm_vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t __arm_vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t __arm_vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t __arm_vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t __arm_vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t __arm_vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t __arm_vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t __arm_vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t __arm_vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t __arm_vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t __arm_vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t __arm_vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t __arm_vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t __arm_vqrdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t __arm_vqrdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t __arm_vqrdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t __arm_vqrdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t __arm_vqrdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t __arm_vqrdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t __arm_vqrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t __arm_vqrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t __arm_vqrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t __arm_vqshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t __arm_vqshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t __arm_vqshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t __arm_vqshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t __arm_vqshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t __arm_vqshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t __arm_vqshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t __arm_vqshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t __arm_vqshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t __arm_vqshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t __arm_vqshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t __arm_vqshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t __arm_vqshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t __arm_vqshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t __arm_vqshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t __arm_vqshluq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t __arm_vqshluq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t __arm_vqshluq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t __arm_vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t __arm_vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t __arm_vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t __arm_vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t __arm_vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t __arm_vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t __arm_vqsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t __arm_vqsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t __arm_vqsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t __arm_vqsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t __arm_vqsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t __arm_vqsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t __arm_vqsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t __arm_vqsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t __arm_vqsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t __arm_vqsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t __arm_vqsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t __arm_vqsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t __arm_vqsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t __arm_vqsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t __arm_vqsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t __arm_vreinterpretq_s16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t __arm_vreinterpretq_s16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t __arm_vreinterpretq_s16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t __arm_vreinterpretq_s16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t __arm_vreinterpretq_s16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t __arm_vreinterpretq_s16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t __arm_vreinterpretq_s16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t __arm_vreinterpretq_s32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t __arm_vreinterpretq_s32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t __arm_vreinterpretq_s32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t __arm_vreinterpretq_s32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t __arm_vreinterpretq_s32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t __arm_vreinterpretq_s32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t __arm_vreinterpretq_s32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t __arm_vreinterpretq_s64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t __arm_vreinterpretq_s64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t __arm_vreinterpretq_s64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t __arm_vreinterpretq_s64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t __arm_vreinterpretq_s64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t __arm_vreinterpretq_s64(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t __arm_vreinterpretq_s64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t __arm_vreinterpretq_s8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t __arm_vreinterpretq_s8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t __arm_vreinterpretq_s8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t __arm_vreinterpretq_s8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t __arm_vreinterpretq_s8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t __arm_vreinterpretq_s8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t __arm_vreinterpretq_s8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t __arm_vreinterpretq_u16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t __arm_vreinterpretq_u16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t __arm_vreinterpretq_u16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t __arm_vreinterpretq_u16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t __arm_vreinterpretq_u16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t __arm_vreinterpretq_u16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t __arm_vreinterpretq_u16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t __arm_vreinterpretq_u32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t __arm_vreinterpretq_u32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t __arm_vreinterpretq_u32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t __arm_vreinterpretq_u32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t __arm_vreinterpretq_u32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t __arm_vreinterpretq_u32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t __arm_vreinterpretq_u32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t __arm_vreinterpretq_u64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t __arm_vreinterpretq_u64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t __arm_vreinterpretq_u64(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t __arm_vreinterpretq_u64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t __arm_vreinterpretq_u64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t __arm_vreinterpretq_u64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t __arm_vreinterpretq_u64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t __arm_vreinterpretq_u8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t __arm_vreinterpretq_u8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t __arm_vreinterpretq_u8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t __arm_vreinterpretq_u8(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t __arm_vreinterpretq_u8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t __arm_vreinterpretq_u8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t __arm_vreinterpretq_u8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t __arm_vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t __arm_vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t __arm_vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t __arm_vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t __arm_vrev16q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t __arm_vrev16q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t __arm_vrev16q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t __arm_vrev16q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t __arm_vrev16q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t __arm_vrev16q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t __arm_vrev16q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t __arm_vrev16q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t __arm_vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t __arm_vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t __arm_vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t __arm_vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t __arm_vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t __arm_vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t __arm_vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t __arm_vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t __arm_vrev32q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t __arm_vrev32q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t __arm_vrev32q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t __arm_vrev32q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t __arm_vrev32q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t __arm_vrev32q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t __arm_vrev32q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t __arm_vrev32q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t __arm_vrev32q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t __arm_vrev32q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t __arm_vrev32q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t __arm_vrev32q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t __arm_vrev32q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t __arm_vrev32q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t __arm_vrev32q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t __arm_vrev32q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t __arm_vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t __arm_vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t __arm_vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t __arm_vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t __arm_vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t __arm_vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t __arm_vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t __arm_vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t __arm_vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t __arm_vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t __arm_vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t __arm_vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t __arm_vrev64q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t __arm_vrev64q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t __arm_vrev64q_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t __arm_vrev64q(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t __arm_vrev64q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t __arm_vrev64q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t __arm_vrev64q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t __arm_vrev64q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t __arm_vrev64q_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t __arm_vrev64q(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t __arm_vrev64q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t __arm_vrev64q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t __arm_vrev64q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t __arm_vrev64q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t __arm_vrev64q_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t __arm_vrev64q_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t __arm_vrev64q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t __arm_vrev64q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t __arm_vrev64q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t __arm_vrev64q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t __arm_vrev64q_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t __arm_vrev64q_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t __arm_vrev64q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t __arm_vrev64q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t __arm_vrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t __arm_vrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t __arm_vrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t __arm_vrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t __arm_vrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t __arm_vrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t __arm_vrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t __arm_vrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t __arm_vrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t __arm_vrshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t __arm_vrshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t __arm_vrshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t __arm_vrshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t __arm_vrshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t __arm_vrshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t __arm_vrshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t __arm_vrshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t __arm_vrshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t __arm_vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t __arm_vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t __arm_vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t __arm_vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t __arm_vsbciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t __arm_vsbciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t __arm_vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t __arm_vsbciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t __arm_vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t __arm_vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t __arm_vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t __arm_vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t __arm_vsbcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t __arm_vsbcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t __arm_vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t __arm_vsbcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t __arm_vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t __arm_vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t __arm_vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t __arm_vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t __arm_vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t __arm_vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t __arm_vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t __arm_vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t __arm_vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t __arm_vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t __arm_vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t __arm_vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t __arm_vshlcq_s16(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t __arm_vshlcq(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t __arm_vshlcq_s32(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t __arm_vshlcq(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t __arm_vshlcq_s8(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t __arm_vshlcq(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t __arm_vshlcq_u16(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t __arm_vshlcq(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t __arm_vshlcq_u32(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t __arm_vshlcq(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t __arm_vshlcq_u8(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t __arm_vshlcq(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t __arm_vshllbq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t __arm_vshllbq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t __arm_vshllbq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t __arm_vshllbq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t __arm_vshllbq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t __arm_vshllbq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t __arm_vshlltq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t __arm_vshlltq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t __arm_vshlltq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t __arm_vshlltq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t __arm_vshlltq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t __arm_vshlltq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t __arm_vshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t __arm_vshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t __arm_vshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t __arm_vshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t __arm_vshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t __arm_vshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t __arm_vshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t __arm_vshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t __arm_vshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t __arm_vshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t __arm_vshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t __arm_vshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t __arm_vshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t __arm_vshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t __arm_vshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t __arm_vshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t __arm_vshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t __arm_vshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t __arm_vshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t __arm_vshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t __arm_vshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t __arm_vshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t __arm_vshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t __arm_vshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void __arm_vst1q_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void __arm_vst1q(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void __arm_vst1q_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void __arm_vst1q(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void __arm_vst1q_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void __arm_vst1q(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void __arm_vst1q_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void __arm_vst1q(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void __arm_vst1q_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void __arm_vst1q(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void __arm_vst1q_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void __arm_vst1q(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void __arm_vst2q_s16(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void __arm_vst2q(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void __arm_vst2q_s32(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void __arm_vst2q(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void __arm_vst2q_s8(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void __arm_vst2q(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void __arm_vst2q_u16(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void __arm_vst2q(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void __arm_vst2q_u32(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void __arm_vst2q(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void __arm_vst2q_u8(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void __arm_vst2q(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void __arm_vst4q_s16(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void __arm_vst4q(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void __arm_vst4q_s32(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void __arm_vst4q(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void __arm_vst4q_s8(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void __arm_vst4q(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void __arm_vst4q_u16(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void __arm_vst4q(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void __arm_vst4q_u32(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void __arm_vst4q(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void __arm_vst4q_u8(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void __arm_vst4q(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void __arm_vstrbq_s16(int8_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void __arm_vstrbq(int8_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void __arm_vstrbq_s32(int8_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void __arm_vstrbq(int8_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void __arm_vstrbq_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void __arm_vstrbq(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void __arm_vstrbq_u16(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void __arm_vstrbq(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void __arm_vstrbq_u32(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void __arm_vstrbq(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void __arm_vstrbq_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void __arm_vstrbq(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void __arm_vstrhq_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void __arm_vstrhq(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void __arm_vstrhq_s32(int16_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void __arm_vstrhq(int16_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void __arm_vstrhq_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void __arm_vstrhq(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void __arm_vstrhq_u32(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void __arm_vstrhq(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void __arm_vstrwq_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void __arm_vstrwq(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void __arm_vstrwq_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void __arm_vstrwq(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t __arm_vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t __arm_vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t __arm_vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t __arm_vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t __arm_vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t __arm_vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t __arm_vsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t __arm_vsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t __arm_vsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t __arm_vsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t __arm_vsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t __arm_vsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t __arm_vsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t __arm_vsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t __arm_vsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t __arm_vsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t __arm_vsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t __arm_vsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t __arm_vsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t __arm_vsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t __arm_vsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t __arm_vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t __arm_vsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t __arm_vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t __arm_vsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t __arm_vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t __arm_vsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t __arm_vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t __arm_vsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t __arm_vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t __arm_vsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t __arm_vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t __arm_vsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16))) +int16x8_t __arm_vuninitializedq(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32))) +int32x4_t __arm_vuninitializedq(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64))) +int64x2_t __arm_vuninitializedq(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8))) +int8x16_t __arm_vuninitializedq(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16))) +uint16x8_t __arm_vuninitializedq(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32))) +uint32x4_t __arm_vuninitializedq(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64))) +uint64x2_t __arm_vuninitializedq(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8))) +uint8x16_t __arm_vuninitializedq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16))) +int16x8_t __arm_vuninitializedq_s16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32))) +int32x4_t __arm_vuninitializedq_s32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64))) +int64x2_t __arm_vuninitializedq_s64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8))) +int8x16_t __arm_vuninitializedq_s8(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16))) +uint16x8_t __arm_vuninitializedq_u16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32))) +uint32x4_t __arm_vuninitializedq_u32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64))) +uint64x2_t __arm_vuninitializedq_u64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8))) +uint8x16_t __arm_vuninitializedq_u8(); + +#if (__ARM_FEATURE_MVE & 2) + +typedef __fp16 float16_t; +typedef float float32_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t; +typedef struct { float16x8_t val[2]; } float16x8x2_t; +typedef struct { float16x8_t val[4]; } float16x8x4_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t; +typedef struct { float32x4_t val[2]; } float32x4x2_t; +typedef struct { float32x4_t val[4]; } float32x4x4_t; + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t __arm_vabdq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t __arm_vabdq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t __arm_vabsq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t __arm_vabsq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t __arm_vabsq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t __arm_vabsq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t __arm_vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t __arm_vabsq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t __arm_vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t __arm_vabsq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t __arm_vabsq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t __arm_vabsq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t __arm_vabsq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t __arm_vabsq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t __arm_vaddq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t __arm_vaddq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t __arm_vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t __arm_vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t __arm_vaddq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t __arm_vaddq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t __arm_vaddq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t __arm_vaddq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t __arm_vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t __arm_vaddq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t __arm_vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t __arm_vaddq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t __arm_vandq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t __arm_vandq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t __arm_vbicq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t __arm_vbicq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t __arm_vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t __arm_vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t __arm_vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t __arm_vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t __arm_vbrsrq_n_f16(float16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t __arm_vbrsrq(float16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t __arm_vbrsrq_n_f32(float32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t __arm_vbrsrq(float32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t __arm_vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t __arm_vbrsrq_x(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t __arm_vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t __arm_vbrsrq_x(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t __arm_vcmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t __arm_vcmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16))) +float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32))) +float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t __arm_vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t __arm_vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t __arm_vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t __arm_vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t __arm_vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t __arm_vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t __arm_vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t __arm_vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16))) +int16x8_t __arm_vcvtaq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32))) +int32x4_t __arm_vcvtaq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16))) +uint16x8_t __arm_vcvtaq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32))) +uint32x4_t __arm_vcvtaq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16))) +int16x8_t __arm_vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32))) +int32x4_t __arm_vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16))) +uint16x8_t __arm_vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32))) +uint32x4_t __arm_vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32))) +float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16))) +float32x4_t __arm_vcvtbq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32))) +float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16))) +float32x4_t __arm_vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16))) +float32x4_t __arm_vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t __arm_vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t __arm_vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t __arm_vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t __arm_vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t __arm_vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t __arm_vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t __arm_vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t __arm_vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16))) +int16x8_t __arm_vcvtmq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32))) +int32x4_t __arm_vcvtmq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16))) +uint16x8_t __arm_vcvtmq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32))) +uint32x4_t __arm_vcvtmq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16))) +int16x8_t __arm_vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32))) +int32x4_t __arm_vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16))) +uint16x8_t __arm_vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32))) +uint32x4_t __arm_vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t __arm_vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t __arm_vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t __arm_vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t __arm_vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t __arm_vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t __arm_vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t __arm_vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t __arm_vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16))) +int16x8_t __arm_vcvtnq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32))) +int32x4_t __arm_vcvtnq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16))) +uint16x8_t __arm_vcvtnq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32))) +uint32x4_t __arm_vcvtnq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16))) +int16x8_t __arm_vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32))) +int32x4_t __arm_vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16))) +uint16x8_t __arm_vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32))) +uint32x4_t __arm_vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t __arm_vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t __arm_vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t __arm_vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t __arm_vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t __arm_vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t __arm_vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t __arm_vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t __arm_vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16))) +int16x8_t __arm_vcvtpq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32))) +int32x4_t __arm_vcvtpq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16))) +uint16x8_t __arm_vcvtpq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32))) +uint32x4_t __arm_vcvtpq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16))) +int16x8_t __arm_vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32))) +int32x4_t __arm_vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16))) +uint16x8_t __arm_vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32))) +uint32x4_t __arm_vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t __arm_vcvtq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t __arm_vcvtq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t __arm_vcvtq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t __arm_vcvtq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t __arm_vcvtq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t __arm_vcvtq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t __arm_vcvtq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t __arm_vcvtq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t __arm_vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t __arm_vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t __arm_vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t __arm_vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t __arm_vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t __arm_vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t __arm_vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t __arm_vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t __arm_vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t __arm_vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t __arm_vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t __arm_vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t __arm_vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t __arm_vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t __arm_vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t __arm_vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t __arm_vcvtq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t __arm_vcvtq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t __arm_vcvtq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t __arm_vcvtq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16))) +int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32))) +int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16))) +uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32))) +uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16))) +int16x8_t __arm_vcvtq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32))) +int32x4_t __arm_vcvtq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16))) +uint16x8_t __arm_vcvtq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32))) +uint32x4_t __arm_vcvtq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t __arm_vcvtq_x_f16_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t __arm_vcvtq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t __arm_vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t __arm_vcvtq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t __arm_vcvtq_x_f32_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t __arm_vcvtq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t __arm_vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t __arm_vcvtq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16))) +int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32))) +int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16))) +uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32))) +uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16))) +int16x8_t __arm_vcvtq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32))) +int32x4_t __arm_vcvtq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16))) +uint16x8_t __arm_vcvtq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32))) +uint32x4_t __arm_vcvtq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32))) +float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16))) +float32x4_t __arm_vcvttq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32))) +float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16))) +float32x4_t __arm_vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16))) +float32x4_t __arm_vcvttq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16))) +float16x8_t __arm_vdupq_n_f16(float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32))) +float32x4_t __arm_vdupq_n_f32(float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16))) +float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32))) +float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t __arm_veorq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t __arm_veorq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t __arm_vfmaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t __arm_vfmaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t __arm_vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t __arm_vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t __arm_vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t __arm_vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t __arm_vfmaq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t __arm_vfmaq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t __arm_vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t __arm_vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t __arm_vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t __arm_vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t __arm_vfmasq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t __arm_vfmasq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t __arm_vfmasq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t __arm_vfmasq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t __arm_vfmsq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t __arm_vfmsq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t __arm_vfmsq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t __arm_vfmsq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t __arm_vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t __arm_vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t __arm_vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t __arm_vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t __arm_vgetq_lane_f16(float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t __arm_vgetq_lane(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t __arm_vgetq_lane_f32(float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t __arm_vgetq_lane(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t __arm_vld1q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t __arm_vld1q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t __arm_vld1q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t __arm_vld1q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t __arm_vld2q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t __arm_vld2q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t __arm_vld2q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t __arm_vld2q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t __arm_vld4q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t __arm_vld4q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t __arm_vld4q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t __arm_vld4q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16))) +float16x8_t __arm_vldrhq_f16(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16))) +float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32))) +float32x4_t __arm_vldrwq_f32(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32))) +float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32))) +float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32))) +float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32))) +float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32))) +float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t __arm_vmaxnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t __arm_vmaxnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t __arm_vmaxnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t __arm_vmaxnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t __arm_vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t __arm_vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t __arm_vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t __arm_vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t __arm_vmaxnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t __arm_vmaxnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t __arm_vmaxnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t __arm_vmaxnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t __arm_vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t __arm_vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t __arm_vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t __arm_vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t __arm_vminnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t __arm_vminnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t __arm_vminnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t __arm_vminnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t __arm_vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t __arm_vminnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t __arm_vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t __arm_vminnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t __arm_vminnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t __arm_vminnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t __arm_vminnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t __arm_vminnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t __arm_vminnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t __arm_vminnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t __arm_vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t __arm_vminnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t __arm_vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t __arm_vminnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t __arm_vmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t __arm_vmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t __arm_vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t __arm_vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t __arm_vmulq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t __arm_vmulq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t __arm_vmulq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t __arm_vmulq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t __arm_vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t __arm_vmulq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t __arm_vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t __arm_vmulq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t __arm_vnegq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t __arm_vnegq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t __arm_vnegq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t __arm_vnegq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t __arm_vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t __arm_vnegq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t __arm_vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t __arm_vnegq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t __arm_vnegq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t __arm_vnegq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t __arm_vnegq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t __arm_vnegq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t __arm_vornq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t __arm_vornq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t __arm_vorrq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t __arm_vorrq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t __arm_vreinterpretq_f16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t __arm_vreinterpretq_f16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t __arm_vreinterpretq_f16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t __arm_vreinterpretq_f16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t __arm_vreinterpretq_f16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t __arm_vreinterpretq_f16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t __arm_vreinterpretq_f16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t __arm_vreinterpretq_f16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t __arm_vreinterpretq_f16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t __arm_vreinterpretq_f32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t __arm_vreinterpretq_f32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t __arm_vreinterpretq_f32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t __arm_vreinterpretq_f32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t __arm_vreinterpretq_f32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t __arm_vreinterpretq_f32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t __arm_vreinterpretq_f32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t __arm_vreinterpretq_f32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t __arm_vreinterpretq_f32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t __arm_vreinterpretq_s16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t __arm_vreinterpretq_s16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t __arm_vreinterpretq_s32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t __arm_vreinterpretq_s32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t __arm_vreinterpretq_s64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t __arm_vreinterpretq_s64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t __arm_vreinterpretq_s8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t __arm_vreinterpretq_s8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t __arm_vreinterpretq_u16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t __arm_vreinterpretq_u16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t __arm_vreinterpretq_u32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t __arm_vreinterpretq_u32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t __arm_vreinterpretq_u64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t __arm_vreinterpretq_u64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t __arm_vreinterpretq_u8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t __arm_vreinterpretq_u8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t __arm_vrev32q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t __arm_vrev32q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t __arm_vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t __arm_vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t __arm_vrev32q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t __arm_vrev32q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t __arm_vrev64q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t __arm_vrev64q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t __arm_vrev64q_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t __arm_vrev64q(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t __arm_vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t __arm_vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t __arm_vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t __arm_vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t __arm_vrev64q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t __arm_vrev64q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t __arm_vrev64q_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t __arm_vrev64q_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t __arm_vrndaq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t __arm_vrndaq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t __arm_vrndaq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t __arm_vrndaq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t __arm_vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t __arm_vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t __arm_vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t __arm_vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t __arm_vrndaq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t __arm_vrndaq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t __arm_vrndaq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t __arm_vrndaq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t __arm_vrndmq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t __arm_vrndmq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t __arm_vrndmq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t __arm_vrndmq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t __arm_vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t __arm_vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t __arm_vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t __arm_vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t __arm_vrndmq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t __arm_vrndmq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t __arm_vrndmq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t __arm_vrndmq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t __arm_vrndnq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t __arm_vrndnq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t __arm_vrndnq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t __arm_vrndnq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t __arm_vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t __arm_vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t __arm_vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t __arm_vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t __arm_vrndnq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t __arm_vrndnq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t __arm_vrndnq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t __arm_vrndnq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t __arm_vrndpq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t __arm_vrndpq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t __arm_vrndpq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t __arm_vrndpq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t __arm_vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t __arm_vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t __arm_vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t __arm_vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t __arm_vrndpq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t __arm_vrndpq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t __arm_vrndpq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t __arm_vrndpq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t __arm_vrndq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t __arm_vrndq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t __arm_vrndq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t __arm_vrndq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t __arm_vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t __arm_vrndq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t __arm_vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t __arm_vrndq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t __arm_vrndq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t __arm_vrndq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t __arm_vrndq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t __arm_vrndq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t __arm_vrndxq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t __arm_vrndxq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t __arm_vrndxq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t __arm_vrndxq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t __arm_vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t __arm_vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t __arm_vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t __arm_vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t __arm_vrndxq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t __arm_vrndxq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t __arm_vrndxq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t __arm_vrndxq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void __arm_vst1q_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void __arm_vst1q(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void __arm_vst1q_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void __arm_vst1q(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void __arm_vst2q_f16(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void __arm_vst2q(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void __arm_vst2q_f32(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void __arm_vst2q(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void __arm_vst4q_f16(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void __arm_vst4q(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void __arm_vst4q_f32(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void __arm_vst4q(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void __arm_vstrhq_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void __arm_vstrhq(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void __arm_vstrwq_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void __arm_vstrwq(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t __arm_vsubq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t __arm_vsubq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t __arm_vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t __arm_vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t __arm_vsubq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t __arm_vsubq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t __arm_vsubq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t __arm_vsubq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t __arm_vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t __arm_vsubq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t __arm_vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t __arm_vsubq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16))) +float16x8_t __arm_vuninitializedq_f16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32))) +float32x4_t __arm_vuninitializedq_f32(); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16))) +float16x8_t __arm_vuninitializedq(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32))) +float32x4_t __arm_vuninitializedq(float32x4_t); + +#endif /* (__ARM_FEATURE_MVE & 2) */ + +#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl))) +int64_t asrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll))) +uint64_t lsll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr))) +int32_t sqrshr(int32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl))) +int64_t sqrshrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48))) +int64_t sqrshrl_sat48(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl))) +int32_t sqshl(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll))) +int64_t sqshll(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr))) +int32_t srshr(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl))) +int64_t srshrl(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl))) +uint32_t uqrshl(uint32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll))) +uint64_t uqrshll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48))) +uint64_t uqrshll_sat48(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl))) +uint32_t uqshl(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll))) +uint64_t uqshll(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr))) +uint32_t urshr(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl))) +uint64_t urshrl(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t vabavq(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t vabavq(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t vabavq(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t vabdq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t vabdq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t vabdq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t vabdq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t vabdq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t vabdq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t vabdq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t vabdq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t vabdq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t vabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t vabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t vabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t vabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t vabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t vabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t vabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t vabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t vabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t vabsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t vabsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t vabsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t vabsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t vabsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t vabsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t vaddlvaq_s32(int64_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t vaddlvaq(int64_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t vaddlvaq_u32(uint64_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t vaddlvaq(uint64_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t vaddlvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t vaddlvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t vaddlvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t vaddlvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t vaddlvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t vaddlvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t vaddlvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t vaddlvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t vaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t vaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t vaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t vaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t vaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t vaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t vaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t vaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t vaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t vaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t vaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t vaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t vaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t vaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t vaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t vaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t vaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t vaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t vaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t vaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t vaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t vaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t vaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t vaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t vaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t vaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t vaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t vaddvaq_p(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t vaddvaq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t vaddvaq_p(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t vaddvaq_s16(int32_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t vaddvaq(int32_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t vaddvaq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t vaddvaq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t vaddvaq_s8(int32_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t vaddvaq(int32_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t vaddvaq_u16(uint32_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t vaddvaq(uint32_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t vaddvaq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t vaddvaq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t vaddvaq_u8(uint32_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t vaddvaq(uint32_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t vaddvq_p_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t vaddvq_p(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t vaddvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t vaddvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t vaddvq_p_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t vaddvq_p(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t vaddvq_p_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t vaddvq_p(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t vaddvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t vaddvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t vaddvq_p_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t vaddvq_p(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t vaddvq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t vaddvq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t vaddvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t vaddvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t vaddvq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t vaddvq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t vaddvq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t vaddvq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t vaddvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t vaddvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t vaddvq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t vaddvq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t vandq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t vandq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t vandq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t vandq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t vandq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t vandq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t vandq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t vandq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t vandq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t vandq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t vandq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t vandq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t vbicq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t vbicq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t vbicq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t vbicq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t vbicq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t vbicq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t vbicq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t vbicq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t vbicq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t vbicq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t vbicq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t vbicq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t vbicq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t vbicq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t vbicq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t vbrsrq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t vbrsrq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t vbrsrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t vbrsrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t vbrsrq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t vbrsrq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t vbrsrq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t vbrsrq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t vbrsrq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t vbrsrq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t vbrsrq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t vbrsrq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t vbrsrq_x(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t vbrsrq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t vbrsrq_x(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t vcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t vcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t vcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t vcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t vcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t vcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t vclsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t vclsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t vclsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t vclsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t vclsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t vclsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t vclsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t vclsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t vclsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t vclsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t vclsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t vclsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t vclsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t vclsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t vclsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t vclzq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t vclzq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t vclzq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t vclzq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t vclzq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t vclzq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t vclzq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t vclzq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t vclzq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t vclzq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t vclzq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t vclzq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t vclzq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t vclzq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t vclzq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t vclzq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t vclzq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t vclzq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t vclzq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t vclzq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t vclzq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t vclzq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t vclzq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t vclzq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t vclzq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t vclzq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t vclzq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t vcmpcsq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t vcmpcsq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t vcmpcsq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t vcmpeqq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t vcmpeqq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t vcmpeqq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t vcmpeqq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t vcmpeqq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t vcmpeqq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t vcmpeqq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t vcmpeqq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t vcmpeqq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t vcmpgeq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t vcmpgeq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t vcmpgeq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t vcmpgeq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t vcmpgeq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t vcmpgeq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t vcmpgtq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t vcmpgtq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t vcmpgtq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t vcmpgtq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t vcmpgtq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t vcmpgtq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t vcmphiq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t vcmphiq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t vcmphiq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t vcmpleq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t vcmpleq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t vcmpleq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t vcmpleq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t vcmpleq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t vcmpleq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t vcmpltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t vcmpltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t vcmpltq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t vcmpltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t vcmpltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t vcmpltq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t vcmpneq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t vcmpneq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t vcmpneq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t vcmpneq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t vcmpneq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t vcmpneq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t vcmpneq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t vcmpneq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t vcmpneq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16))) +int16x8_t vcreateq_s16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32))) +int32x4_t vcreateq_s32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64))) +int64x2_t vcreateq_s64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8))) +int8x16_t vcreateq_s8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16))) +uint16x8_t vcreateq_u16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32))) +uint32x4_t vcreateq_u32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64))) +uint64x2_t vcreateq_u64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8))) +uint8x16_t vcreateq_u8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q))) +mve_pred16_t vctp16q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m))) +mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q))) +mve_pred16_t vctp32q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m))) +mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q))) +mve_pred16_t vctp64q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m))) +mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q))) +mve_pred16_t vctp8q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m))) +mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t vddupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t vddupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t vddupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t vddupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t vddupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t vddupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t vddupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t vddupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t vddupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t vddupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t vddupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t vddupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16))) +int16x8_t vdupq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32))) +int32x4_t vdupq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8))) +int8x16_t vdupq_n_s8(int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16))) +uint16x8_t vdupq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32))) +uint32x4_t vdupq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8))) +uint8x16_t vdupq_n_u8(uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16))) +int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32))) +int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8))) +int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16))) +uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32))) +uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8))) +uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t veorq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t veorq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t veorq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t veorq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t veorq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t veorq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t veorq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t veorq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t veorq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t veorq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t veorq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t veorq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t vgetq_lane_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t vgetq_lane(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t vgetq_lane_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t vgetq_lane(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t vgetq_lane_s64(int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t vgetq_lane(int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t vgetq_lane_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t vgetq_lane(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t vgetq_lane_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t vgetq_lane(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t vgetq_lane_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t vgetq_lane(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t vgetq_lane_u64(uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t vgetq_lane(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t vgetq_lane_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t vgetq_lane(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t vhaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t vhaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t vhaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t vhaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t vhaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t vhaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t vhaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t vhaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t vhaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t vhaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t vhaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t vhaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t vhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t vhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t vhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t vhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t vhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t vhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t vhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t vhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t vhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t vhaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t vhaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t vhaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t vhsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t vhsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t vhsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t vhsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t vhsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t vhsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t vhsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t vhsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t vhsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t vhsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t vhsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t vhsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t vhsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t vhsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t vhsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t vhsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t vhsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t vhsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t vhsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t vhsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t vhsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t vhsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t vhsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t vhsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t vidupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t vidupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t vidupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t vidupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t vidupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t vidupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t vidupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t vidupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t vidupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t vidupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t vidupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t vidupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t viwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t viwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t viwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t vld1q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t vld1q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t vld1q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t vld1q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t vld1q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t vld1q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t vld1q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t vld1q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t vld1q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t vld1q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t vld1q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t vld1q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t vld1q_z(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t vld1q_z(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t vld1q_z(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t vld2q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t vld2q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t vld2q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t vld2q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t vld2q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t vld2q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t vld2q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t vld2q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t vld2q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t vld2q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t vld2q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t vld2q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t vld4q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t vld4q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t vld4q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t vld4q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t vld4q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t vld4q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t vld4q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t vld4q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t vld4q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t vld4q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t vld4q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t vld4q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16))) +int16x8_t vldrbq_s16(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32))) +int32x4_t vldrbq_s32(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8))) +int8x16_t vldrbq_s8(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16))) +uint16x8_t vldrbq_u16(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32))) +uint32x4_t vldrbq_u32(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8))) +uint8x16_t vldrbq_u8(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16))) +int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32))) +int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8))) +int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16))) +uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32))) +uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8))) +uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64))) +int64x2_t vldrdq_gather_base_s64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64))) +uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64))) +int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64))) +uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64))) +int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64))) +uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64))) +int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64))) +uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16))) +int16x8_t vldrhq_s16(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32))) +int32x4_t vldrhq_s32(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16))) +uint16x8_t vldrhq_u16(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32))) +uint32x4_t vldrhq_u32(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16))) +int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32))) +int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16))) +uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32))) +uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32))) +int32x4_t vldrwq_gather_base_s32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32))) +uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32))) +int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32))) +uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32))) +int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32))) +uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32))) +int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32))) +uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32))) +int32x4_t vldrwq_s32(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32))) +uint32x4_t vldrwq_u32(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32))) +int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32))) +uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t vmaxaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t vmaxaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t vmaxaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t vmaxavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t vmaxavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t vmaxavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t vmaxavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t vmaxavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t vmaxavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t vmaxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t vmaxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t vmaxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t vmaxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t vmaxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t vmaxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t vmaxq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t vmaxq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t vmaxq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t vmaxvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t vmaxvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t vmaxvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t vmaxvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t vmaxvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t vmaxvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t vmaxvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t vmaxvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t vmaxvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t vmaxvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t vmaxvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t vmaxvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t vmaxvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t vmaxvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t vmaxvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t vminaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t vminaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t vminaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t vminaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t vminaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t vminaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t vminavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t vminavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t vminavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t vminavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t vminavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t vminavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t vminavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t vminavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t vminavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t vminq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t vminq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t vminq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t vminq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t vminq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t vminq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t vminq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t vminq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t vminq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t vminq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t vminq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t vminq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t vminvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t vminvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t vminvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t vminvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t vminvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t vminvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t vminvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t vminvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t vminvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t vminvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t vminvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t vminvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t vminvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t vminvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t vminvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t vminvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t vminvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t vminvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t vmladavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t vmladavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t vmladavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t vmladavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t vmladavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t vmladavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t vmladavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t vmladavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t vmladavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t vmladavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t vmladavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t vmladavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t vmladavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t vmladavq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t vmladavq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t vmladavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t vmladavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t vmladavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t vmladavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t vmladavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t vmladavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t vmlaldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t vmlaldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t vmlaldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t vmlaldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t vmlaldavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t vmlaldavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t vmlaldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t vmlaldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t vmlaldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t vmlaldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t vmlaq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t vmlaq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t vmlaq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t vmlaq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t vmlaq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t vmlaq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t vmlaq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t vmlaq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t vmlaq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t vmlasq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t vmlasq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t vmlasq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t vmlasq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t vmlasq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t vmlasq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t vmlasq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t vmlasq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t vmlasq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t vmlsdavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t vmlsdavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t vmlsdavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t vmlsdavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t vmlsdavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t vmlsdavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t vmlsdavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t vmlsdavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t vmlsdavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t vmlsdavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t vmlsdavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t vmlsdavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t vmlsldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t vmlsldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t vmlsldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t vmlsldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t vmlsldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t vmlsldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t vmlsldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t vmlsldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t vmovlbq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t vmovlbq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t vmovlbq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t vmovlbq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t vmovlbq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t vmovlbq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t vmovlbq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t vmovlbq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t vmovlbq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t vmovlbq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t vmovlbq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t vmovlbq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t vmovlbq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t vmovlbq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t vmovlbq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t vmovlbq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t vmovltq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t vmovltq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t vmovltq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t vmovltq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t vmovltq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t vmovltq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t vmovltq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t vmovltq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t vmovltq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t vmovltq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t vmovltq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t vmovltq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t vmovltq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t vmovltq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t vmovltq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t vmovltq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t vmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t vmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t vmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t vmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t vmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t vmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t vmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t vmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t vmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t vmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t vmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t vmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t vmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t vmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t vmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t vmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t vmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t vmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t vmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t vmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t vmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t vmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t vmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t vmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t vmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t vmullbq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t vmullbq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t vmullbq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t vmulltq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t vmulltq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t vmulltq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t vmulq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t vmulq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t vmulq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t vmulq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t vmulq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t vmulq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t vmulq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t vmulq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t vmulq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t vmulq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t vmulq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t vmulq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t vmulq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t vmulq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t vmulq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t vmulq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t vmulq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t vmulq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t vmulq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t vmulq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t vmulq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t vmulq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t vmulq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t vmulq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t vmulq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t vmulq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t vmulq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16))) +int16x8_t vmvnq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32))) +int32x4_t vmvnq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16))) +uint16x8_t vmvnq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32))) +uint32x4_t vmvnq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t vmvnq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t vmvnq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t vmvnq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t vmvnq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t vmvnq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t vmvnq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t vmvnq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t vmvnq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t vmvnq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t vmvnq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t vmvnq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t vmvnq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16))) +int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32))) +int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16))) +uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32))) +uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t vmvnq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t vmvnq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t vmvnq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t vmvnq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t vmvnq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t vmvnq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t vmvnq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t vmvnq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t vmvnq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t vmvnq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t vmvnq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t vmvnq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t vnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t vnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t vnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t vnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t vnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t vnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t vnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t vnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t vnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t vnegq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t vnegq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t vnegq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t vnegq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t vnegq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t vnegq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t vornq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t vornq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t vornq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t vornq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t vornq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t vornq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t vornq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t vornq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t vornq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t vornq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t vornq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t vornq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t vorrq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t vorrq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t vorrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t vorrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t vorrq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t vorrq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t vorrq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t vorrq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t vorrq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t vorrq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t vorrq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t vorrq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t vorrq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t vorrq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t vorrq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot))) +mve_pred16_t vpnot(mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t vqabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t vqabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t vqabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t vqabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t vqabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t vqabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t vqaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t vqaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t vqaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t vqaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t vqaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t vqaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t vqaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t vqaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t vqaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t vqaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t vqaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t vqaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t vqaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t vqaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t vqaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t vqaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t vqaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t vqaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t vqaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t vqaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t vqaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t vqdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t vqdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t vqdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t vqdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t vqdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t vqdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t vqdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t vqdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t vqdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t vqdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t vqdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t vqdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t vqdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t vqdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t vqdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t vqdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t vqdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t vqdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t vqdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t vqdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t vqdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t vqdmullbq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t vqdmullbq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t vqdmullbq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t vqdmullbq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t vqdmullbq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t vqdmullbq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t vqdmullbq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t vqdmullbq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t vqdmulltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t vqdmulltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t vqdmulltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t vqdmulltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t vqdmulltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t vqdmulltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t vqdmulltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t vqdmulltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t vqmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t vqmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t vqmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t vqmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t vqmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t vqmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t vqmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t vqmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t vqmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t vqmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t vqmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t vqmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t vqmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t vqmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t vqmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t vqmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t vqmovunbq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t vqmovunbq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t vqmovunbq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t vqmovunbq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t vqmovuntq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t vqmovuntq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t vqmovuntq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t vqmovuntq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t vqnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t vqnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t vqnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t vqnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t vqnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t vqnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t vqrdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t vqrdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t vqrdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t vqrdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t vqrdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t vqrdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t vqrdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t vqrdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t vqrdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t vqrdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t vqrdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t vqrdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t vqrdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t vqrdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t vqrdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t vqrdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t vqrdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t vqrdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t vqrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t vqrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t vqrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t vqrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t vqrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t vqrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t vqrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t vqrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t vqrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t vqrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t vqrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t vqrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t vqrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t vqrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t vqrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t vqrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t vqrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t vqrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t vqrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t vqrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t vqshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t vqshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t vqshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t vqshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t vqshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t vqshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t vqshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t vqshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t vqshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t vqshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t vqshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t vqshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t vqshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t vqshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t vqshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t vqshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t vqshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t vqshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t vqshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t vqshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t vqshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t vqshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t vqshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t vqshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t vqshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t vqshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t vqshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t vqshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t vqshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t vqshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t vqshluq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t vqshluq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t vqshluq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t vqshluq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t vqshluq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t vqshluq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t vqshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t vqshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t vqshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t vqshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t vqsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t vqsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t vqsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t vqsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t vqsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t vqsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t vqsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t vqsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t vqsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t vqsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t vqsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t vqsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t vqsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t vqsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t vqsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t vqsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t vqsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t vqsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t vqsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t vqsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t vqsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t vreinterpretq_s16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t vreinterpretq_s16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t vreinterpretq_s16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t vreinterpretq_s16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t vreinterpretq_s16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t vreinterpretq_s16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t vreinterpretq_s16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t vreinterpretq_s16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t vreinterpretq_s16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t vreinterpretq_s16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t vreinterpretq_s16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t vreinterpretq_s16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t vreinterpretq_s16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t vreinterpretq_s16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t vreinterpretq_s32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t vreinterpretq_s32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t vreinterpretq_s32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t vreinterpretq_s32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t vreinterpretq_s32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t vreinterpretq_s32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t vreinterpretq_s32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t vreinterpretq_s32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t vreinterpretq_s32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t vreinterpretq_s32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t vreinterpretq_s32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t vreinterpretq_s32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t vreinterpretq_s32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t vreinterpretq_s32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t vreinterpretq_s64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t vreinterpretq_s64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t vreinterpretq_s64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t vreinterpretq_s64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t vreinterpretq_s64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t vreinterpretq_s64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t vreinterpretq_s64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t vreinterpretq_s64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t vreinterpretq_s64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t vreinterpretq_s64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t vreinterpretq_s64_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t vreinterpretq_s64(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t vreinterpretq_s64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t vreinterpretq_s64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t vreinterpretq_s8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t vreinterpretq_s8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t vreinterpretq_s8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t vreinterpretq_s8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t vreinterpretq_s8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t vreinterpretq_s8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t vreinterpretq_s8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t vreinterpretq_s8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t vreinterpretq_s8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t vreinterpretq_s8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t vreinterpretq_s8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t vreinterpretq_s8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t vreinterpretq_s8_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t vreinterpretq_s8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t vreinterpretq_u16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t vreinterpretq_u16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t vreinterpretq_u16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t vreinterpretq_u16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t vreinterpretq_u16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t vreinterpretq_u16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t vreinterpretq_u16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t vreinterpretq_u16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t vreinterpretq_u16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t vreinterpretq_u16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t vreinterpretq_u16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t vreinterpretq_u16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t vreinterpretq_u16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t vreinterpretq_u16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t vreinterpretq_u32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t vreinterpretq_u32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t vreinterpretq_u32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t vreinterpretq_u32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t vreinterpretq_u32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t vreinterpretq_u32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t vreinterpretq_u32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t vreinterpretq_u32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t vreinterpretq_u32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t vreinterpretq_u32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t vreinterpretq_u32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t vreinterpretq_u32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t vreinterpretq_u32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t vreinterpretq_u32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t vreinterpretq_u64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t vreinterpretq_u64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t vreinterpretq_u64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t vreinterpretq_u64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t vreinterpretq_u64_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t vreinterpretq_u64(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t vreinterpretq_u64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t vreinterpretq_u64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t vreinterpretq_u64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t vreinterpretq_u64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t vreinterpretq_u64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t vreinterpretq_u64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t vreinterpretq_u64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t vreinterpretq_u64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t vreinterpretq_u8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t vreinterpretq_u8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t vreinterpretq_u8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t vreinterpretq_u8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t vreinterpretq_u8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t vreinterpretq_u8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t vreinterpretq_u8_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t vreinterpretq_u8(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t vreinterpretq_u8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t vreinterpretq_u8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t vreinterpretq_u8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t vreinterpretq_u8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t vreinterpretq_u8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t vreinterpretq_u8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t vrev16q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t vrev16q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t vrev16q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t vrev16q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t vrev16q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t vrev16q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t vrev16q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t vrev16q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t vrev32q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t vrev32q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t vrev32q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t vrev32q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t vrev32q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t vrev32q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t vrev32q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t vrev32q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t vrev32q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t vrev32q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t vrev32q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t vrev32q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t vrev32q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t vrev32q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t vrev32q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t vrev32q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t vrev64q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t vrev64q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t vrev64q_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t vrev64q(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t vrev64q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t vrev64q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t vrev64q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t vrev64q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t vrev64q_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t vrev64q(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t vrev64q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t vrev64q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t vrev64q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t vrev64q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t vrev64q_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t vrev64q_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t vrev64q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t vrev64q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t vrev64q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t vrev64q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t vrev64q_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t vrev64q_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t vrev64q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t vrev64q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t vrhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t vrhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t vrhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t vrhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t vrhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t vrhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t vrhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t vrhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t vrhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t vrmlaldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t vrmlaldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t vrmlsldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t vrmlsldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t vrmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t vrmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t vrmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t vrmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t vrmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t vrmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t vrmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t vrmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t vrmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t vrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t vrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t vrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t vrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t vrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t vrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t vrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t vrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t vrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t vrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t vrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t vrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t vrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t vrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t vrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t vrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t vrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t vrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t vrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t vrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t vrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t vrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t vrshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t vrshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t vrshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t vrshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t vrshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t vrshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t vrshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t vrshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t vrshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t vrshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t vrshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t vrshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t vsbciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t vsbciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t vsbciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t vsbcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t vsbcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t vsbcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t vsetq_lane(int16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t vsetq_lane(int32_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t vsetq_lane(int64_t, int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t vsetq_lane(int8_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t vshlcq_s16(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t vshlcq(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t vshlcq_s32(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t vshlcq(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t vshlcq_s8(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t vshlcq(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t vshlcq_u16(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t vshlcq(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t vshlcq_u32(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t vshlcq(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t vshlcq_u8(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t vshlcq(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t vshllbq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t vshllbq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t vshllbq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t vshllbq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t vshllbq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t vshllbq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t vshllbq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t vshllbq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t vshlltq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t vshlltq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t vshlltq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t vshlltq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t vshlltq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t vshlltq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t vshlltq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t vshlltq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t vshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t vshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t vshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t vshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t vshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t vshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t vshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t vshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t vshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t vshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t vshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t vshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t vshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t vshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t vshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t vshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t vshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t vshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t vshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t vshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t vshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t vshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t vshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t vshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t vshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t vshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t vshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t vshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t vshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t vshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t vshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t vshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t vshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t vshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t vshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t vshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t vshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t vshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t vshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t vshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t vshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t vshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t vshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t vshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t vshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t vshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t vshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t vshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t vshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t vshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t vshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t vshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t vsliq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t vsliq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t vsliq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t vsliq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t vsliq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t vsliq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t vsriq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t vsriq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t vsriq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t vsriq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t vsriq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t vsriq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void vst1q_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void vst1q_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void vst1q_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void vst1q_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void vst1q(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void vst1q_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void vst1q(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void vst1q_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void vst1q(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void vst1q_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void vst1q(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void vst1q_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void vst1q(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void vst1q_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void vst1q(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void vst2q_s16(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void vst2q(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void vst2q_s32(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void vst2q(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void vst2q_s8(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void vst2q(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void vst2q_u16(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void vst2q(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void vst2q_u32(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void vst2q(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void vst2q_u8(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void vst2q(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void vst4q_s16(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void vst4q(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void vst4q_s32(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void vst4q(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void vst4q_s8(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void vst4q(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void vst4q_u16(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void vst4q(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void vst4q_u32(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void vst4q(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void vst4q_u8(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void vst4q(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void vstrbq_s16(int8_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void vstrbq(int8_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void vstrbq_s32(int8_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void vstrbq(int8_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void vstrbq_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void vstrbq(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void vstrbq_u16(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void vstrbq(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void vstrbq_u32(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void vstrbq(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void vstrbq_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void vstrbq(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void vstrdq_scatter_base(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void vstrhq_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void vstrhq(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void vstrhq_s32(int16_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void vstrhq(int16_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void vstrhq_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void vstrhq(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void vstrhq_u32(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void vstrhq(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void vstrwq_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void vstrwq(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void vstrwq_scatter_base(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void vstrwq_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void vstrwq(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t vsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t vsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t vsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t vsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t vsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t vsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t vsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t vsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t vsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t vsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t vsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t vsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t vsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t vsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t vsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t vsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t vsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t vsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t vsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t vsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t vsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t vsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t vsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t vsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t vsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t vsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t vsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16))) +int16x8_t vuninitializedq(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32))) +int32x4_t vuninitializedq(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64))) +int64x2_t vuninitializedq(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8))) +int8x16_t vuninitializedq(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16))) +uint16x8_t vuninitializedq(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32))) +uint32x4_t vuninitializedq(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64))) +uint64x2_t vuninitializedq(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8))) +uint8x16_t vuninitializedq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16))) +int16x8_t vuninitializedq_s16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32))) +int32x4_t vuninitializedq_s32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64))) +int64x2_t vuninitializedq_s64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8))) +int8x16_t vuninitializedq_s8(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16))) +uint16x8_t vuninitializedq_u16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32))) +uint32x4_t vuninitializedq_u32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64))) +uint64x2_t vuninitializedq_u64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8))) +uint8x16_t vuninitializedq_u8(); + +#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */ + +#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t vabdq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t vabdq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t vabdq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t vabdq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t vabsq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t vabsq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t vabsq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t vabsq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t vabsq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t vabsq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t vabsq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t vabsq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t vabsq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t vabsq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t vaddq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t vaddq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t vaddq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t vaddq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t vaddq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t vaddq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t vaddq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t vaddq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t vaddq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t vaddq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t vandq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t vandq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t vandq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t vandq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t vbicq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t vbicq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t vbicq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t vbicq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t vbrsrq_n_f16(float16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t vbrsrq(float16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t vbrsrq_n_f32(float32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t vbrsrq(float32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t vbrsrq_x(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t vbrsrq_x(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t vcaddq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t vcaddq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t vcaddq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t vcaddq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t vcmpeqq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t vcmpeqq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t vcmpeqq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t vcmpeqq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t vcmpgeq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t vcmpgeq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t vcmpgeq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t vcmpgeq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t vcmpgtq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t vcmpgtq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t vcmpgtq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t vcmpgtq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t vcmpleq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t vcmpleq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t vcmpleq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t vcmpleq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t vcmpltq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t vcmpltq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t vcmpltq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t vcmpltq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t vcmpneq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t vcmpneq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t vcmpneq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t vcmpneq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t vcmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t vcmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t vcmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t vcmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t vcmulq_rot180(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t vcmulq_rot180(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t vcmulq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t vcmulq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t vcmulq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t vcmulq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16))) +float16x8_t vcreateq_f16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32))) +float32x4_t vcreateq_f32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16))) +int16x8_t vcvtaq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32))) +int32x4_t vcvtaq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16))) +uint16x8_t vcvtaq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32))) +uint32x4_t vcvtaq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16))) +int16x8_t vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32))) +int32x4_t vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16))) +uint16x8_t vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32))) +uint32x4_t vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32))) +float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16))) +float32x4_t vcvtbq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32))) +float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16))) +float32x4_t vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16))) +float32x4_t vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16))) +int16x8_t vcvtmq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32))) +int32x4_t vcvtmq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16))) +uint16x8_t vcvtmq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32))) +uint32x4_t vcvtmq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16))) +int16x8_t vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32))) +int32x4_t vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16))) +uint16x8_t vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32))) +uint32x4_t vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16))) +int16x8_t vcvtnq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32))) +int32x4_t vcvtnq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16))) +uint16x8_t vcvtnq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32))) +uint32x4_t vcvtnq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16))) +int16x8_t vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32))) +int32x4_t vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16))) +uint16x8_t vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32))) +uint32x4_t vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16))) +int16x8_t vcvtpq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32))) +int32x4_t vcvtpq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16))) +uint16x8_t vcvtpq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32))) +uint32x4_t vcvtpq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16))) +int16x8_t vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32))) +int32x4_t vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16))) +uint16x8_t vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32))) +uint32x4_t vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t vcvtq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t vcvtq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t vcvtq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t vcvtq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t vcvtq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t vcvtq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t vcvtq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t vcvtq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t vcvtq_n_f16_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t vcvtq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t vcvtq_n_f16_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t vcvtq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t vcvtq_n_f32_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t vcvtq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t vcvtq_n_f32_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t vcvtq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16))) +int16x8_t vcvtq_n_s16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32))) +int32x4_t vcvtq_n_s32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16))) +uint16x8_t vcvtq_n_u16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32))) +uint32x4_t vcvtq_n_u32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16))) +int16x8_t vcvtq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32))) +int32x4_t vcvtq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16))) +uint16x8_t vcvtq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32))) +uint32x4_t vcvtq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t vcvtq_x_f16_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t vcvtq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t vcvtq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t vcvtq_x_f32_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t vcvtq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t vcvtq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16))) +int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32))) +int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16))) +uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32))) +uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16))) +int16x8_t vcvtq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32))) +int32x4_t vcvtq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16))) +uint16x8_t vcvtq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32))) +uint32x4_t vcvtq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32))) +float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16))) +float32x4_t vcvttq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32))) +float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16))) +float32x4_t vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16))) +float32x4_t vcvttq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16))) +float16x8_t vdupq_n_f16(float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32))) +float32x4_t vdupq_n_f32(float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16))) +float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32))) +float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t veorq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t veorq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t veorq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t veorq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t vfmaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t vfmaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t vfmaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t vfmaq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t vfmaq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t vfmaq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t vfmaq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t vfmasq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t vfmasq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t vfmasq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t vfmasq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t vfmsq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t vfmsq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t vfmsq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t vfmsq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t vgetq_lane_f16(float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t vgetq_lane(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t vgetq_lane_f32(float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t vgetq_lane(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t vld1q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t vld1q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t vld1q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t vld1q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t vld1q_z(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t vld1q_z(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t vld2q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t vld2q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t vld2q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t vld2q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t vld4q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t vld4q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t vld4q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t vld4q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16))) +float16x8_t vldrhq_f16(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16))) +float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32))) +float32x4_t vldrwq_f32(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32))) +float32x4_t vldrwq_gather_base_f32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32))) +float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32))) +float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32))) +float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32))) +float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t vmaxnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t vmaxnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t vmaxnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t vmaxnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t vmaxnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t vmaxnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t vmaxnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t vmaxnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t vmaxnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t vmaxnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t vmaxnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t vmaxnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t vminnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t vminnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t vminnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t vminnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t vminnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t vminnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t vminnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t vminnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t vminnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t vminnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t vminnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t vminnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t vminnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t vminnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t vminnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t vminnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t vminnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t vminnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t vminnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t vminnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t vmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t vmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t vmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t vmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t vmulq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t vmulq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t vmulq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t vmulq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t vmulq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t vmulq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t vnegq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t vnegq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t vnegq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t vnegq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t vnegq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t vnegq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t vnegq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t vnegq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t vnegq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t vnegq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t vornq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t vornq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t vornq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t vornq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t vorrq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t vorrq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t vorrq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t vorrq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t vreinterpretq_f16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t vreinterpretq_f16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t vreinterpretq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t vreinterpretq_f16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t vreinterpretq_f16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t vreinterpretq_f16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t vreinterpretq_f16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t vreinterpretq_f16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t vreinterpretq_f16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t vreinterpretq_f16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t vreinterpretq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t vreinterpretq_f16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t vreinterpretq_f16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t vreinterpretq_f16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t vreinterpretq_f16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t vreinterpretq_f16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t vreinterpretq_f16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t vreinterpretq_f16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t vreinterpretq_f32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t vreinterpretq_f32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t vreinterpretq_f32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t vreinterpretq_f32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t vreinterpretq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t vreinterpretq_f32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t vreinterpretq_f32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t vreinterpretq_f32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t vreinterpretq_f32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t vreinterpretq_f32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t vreinterpretq_f32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t vreinterpretq_f32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t vreinterpretq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t vreinterpretq_f32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t vreinterpretq_f32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t vreinterpretq_f32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t vreinterpretq_f32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t vreinterpretq_f32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t vreinterpretq_s16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t vreinterpretq_s16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t vreinterpretq_s16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t vreinterpretq_s16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t vreinterpretq_s32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t vreinterpretq_s32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t vreinterpretq_s32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t vreinterpretq_s32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t vreinterpretq_s64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t vreinterpretq_s64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t vreinterpretq_s64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t vreinterpretq_s64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t vreinterpretq_s8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t vreinterpretq_s8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t vreinterpretq_s8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t vreinterpretq_s8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t vreinterpretq_u16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t vreinterpretq_u16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t vreinterpretq_u16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t vreinterpretq_u16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t vreinterpretq_u32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t vreinterpretq_u32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t vreinterpretq_u32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t vreinterpretq_u32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t vreinterpretq_u64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t vreinterpretq_u64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t vreinterpretq_u64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t vreinterpretq_u64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t vreinterpretq_u8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t vreinterpretq_u8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t vreinterpretq_u8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t vreinterpretq_u8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t vrev32q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t vrev32q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t vrev32q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t vrev32q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t vrev64q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t vrev64q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t vrev64q_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t vrev64q(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t vrev64q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t vrev64q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t vrev64q_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t vrev64q_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t vrndaq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t vrndaq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t vrndaq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t vrndaq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t vrndaq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t vrndaq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t vrndaq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t vrndaq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t vrndmq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t vrndmq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t vrndmq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t vrndmq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t vrndmq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t vrndmq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t vrndmq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t vrndmq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t vrndnq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t vrndnq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t vrndnq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t vrndnq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t vrndnq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t vrndnq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t vrndnq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t vrndnq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t vrndpq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t vrndpq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t vrndpq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t vrndpq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t vrndpq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t vrndpq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t vrndpq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t vrndpq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t vrndq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t vrndq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t vrndq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t vrndq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t vrndq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t vrndq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t vrndq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t vrndq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t vrndq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t vrndq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t vrndxq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t vrndxq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t vrndxq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t vrndxq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t vrndxq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t vrndxq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t vrndxq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t vrndxq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t vsetq_lane(float16_t, float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t vsetq_lane(float32_t, float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void vst1q_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void vst1q(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void vst1q_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void vst1q(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void vst1q_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void vst1q_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void vst2q_f16(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void vst2q(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void vst2q_f32(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void vst2q(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void vst4q_f16(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void vst4q(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void vst4q_f32(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void vst4q(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void vstrhq_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void vstrhq(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void vstrwq_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void vstrwq(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void vstrwq_scatter_base(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t vsubq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t vsubq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t vsubq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t vsubq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t vsubq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t vsubq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t vsubq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t vsubq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t vsubq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t vsubq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16))) +float16x8_t vuninitializedq_f16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32))) +float32x4_t vuninitializedq_f32(); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16))) +float16x8_t vuninitializedq(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32))) +float32x4_t vuninitializedq(float32x4_t); + +#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __ARM_MVE_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_neon.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_neon.h new file mode 100644 index 0000000..2448870 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_neon.h @@ -0,0 +1,69894 @@ +/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_H +#define __ARM_NEON_H + +#ifndef __ARM_FP +#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard" +#else + +#if !defined(__ARM_NEON) +#error "NEON support not enabled" +#else + +#include + +#ifdef __ARM_FEATURE_BF16 +#include +typedef __bf16 bfloat16_t; +#endif + +typedef float float32_t; +typedef __fp16 float16_t; +#ifdef __aarch64__ +typedef double float64_t; +#endif + +#ifdef __aarch64__ +typedef uint8_t poly8_t; +typedef uint16_t poly16_t; +typedef uint64_t poly64_t; +typedef __uint128_t poly128_t; +#else +typedef int8_t poly8_t; +typedef int16_t poly16_t; +typedef int64_t poly64_t; +#endif +typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; +typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; +typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; +typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; +typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; +typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; +typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; +typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; +typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; +typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; +typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; +typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; +typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; +typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; +typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; +typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; +typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; +typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; +typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; +typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; +#ifdef __aarch64__ +typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; +typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; +#endif +typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t; +typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t; +typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t; +typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t; +typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t; +typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t; + +typedef struct int8x8x2_t { + int8x8_t val[2]; +} int8x8x2_t; + +typedef struct int8x16x2_t { + int8x16_t val[2]; +} int8x16x2_t; + +typedef struct int16x4x2_t { + int16x4_t val[2]; +} int16x4x2_t; + +typedef struct int16x8x2_t { + int16x8_t val[2]; +} int16x8x2_t; + +typedef struct int32x2x2_t { + int32x2_t val[2]; +} int32x2x2_t; + +typedef struct int32x4x2_t { + int32x4_t val[2]; +} int32x4x2_t; + +typedef struct int64x1x2_t { + int64x1_t val[2]; +} int64x1x2_t; + +typedef struct int64x2x2_t { + int64x2_t val[2]; +} int64x2x2_t; + +typedef struct uint8x8x2_t { + uint8x8_t val[2]; +} uint8x8x2_t; + +typedef struct uint8x16x2_t { + uint8x16_t val[2]; +} uint8x16x2_t; + +typedef struct uint16x4x2_t { + uint16x4_t val[2]; +} uint16x4x2_t; + +typedef struct uint16x8x2_t { + uint16x8_t val[2]; +} uint16x8x2_t; + +typedef struct uint32x2x2_t { + uint32x2_t val[2]; +} uint32x2x2_t; + +typedef struct uint32x4x2_t { + uint32x4_t val[2]; +} uint32x4x2_t; + +typedef struct uint64x1x2_t { + uint64x1_t val[2]; +} uint64x1x2_t; + +typedef struct uint64x2x2_t { + uint64x2_t val[2]; +} uint64x2x2_t; + +typedef struct float16x4x2_t { + float16x4_t val[2]; +} float16x4x2_t; + +typedef struct float16x8x2_t { + float16x8_t val[2]; +} float16x8x2_t; + +typedef struct float32x2x2_t { + float32x2_t val[2]; +} float32x2x2_t; + +typedef struct float32x4x2_t { + float32x4_t val[2]; +} float32x4x2_t; + +#ifdef __aarch64__ +typedef struct float64x1x2_t { + float64x1_t val[2]; +} float64x1x2_t; + +typedef struct float64x2x2_t { + float64x2_t val[2]; +} float64x2x2_t; + +#endif +typedef struct poly8x8x2_t { + poly8x8_t val[2]; +} poly8x8x2_t; + +typedef struct poly8x16x2_t { + poly8x16_t val[2]; +} poly8x16x2_t; + +typedef struct poly16x4x2_t { + poly16x4_t val[2]; +} poly16x4x2_t; + +typedef struct poly16x8x2_t { + poly16x8_t val[2]; +} poly16x8x2_t; + +typedef struct poly64x1x2_t { + poly64x1_t val[2]; +} poly64x1x2_t; + +typedef struct poly64x2x2_t { + poly64x2_t val[2]; +} poly64x2x2_t; + +typedef struct int8x8x3_t { + int8x8_t val[3]; +} int8x8x3_t; + +typedef struct int8x16x3_t { + int8x16_t val[3]; +} int8x16x3_t; + +typedef struct int16x4x3_t { + int16x4_t val[3]; +} int16x4x3_t; + +typedef struct int16x8x3_t { + int16x8_t val[3]; +} int16x8x3_t; + +typedef struct int32x2x3_t { + int32x2_t val[3]; +} int32x2x3_t; + +typedef struct int32x4x3_t { + int32x4_t val[3]; +} int32x4x3_t; + +typedef struct int64x1x3_t { + int64x1_t val[3]; +} int64x1x3_t; + +typedef struct int64x2x3_t { + int64x2_t val[3]; +} int64x2x3_t; + +typedef struct uint8x8x3_t { + uint8x8_t val[3]; +} uint8x8x3_t; + +typedef struct uint8x16x3_t { + uint8x16_t val[3]; +} uint8x16x3_t; + +typedef struct uint16x4x3_t { + uint16x4_t val[3]; +} uint16x4x3_t; + +typedef struct uint16x8x3_t { + uint16x8_t val[3]; +} uint16x8x3_t; + +typedef struct uint32x2x3_t { + uint32x2_t val[3]; +} uint32x2x3_t; + +typedef struct uint32x4x3_t { + uint32x4_t val[3]; +} uint32x4x3_t; + +typedef struct uint64x1x3_t { + uint64x1_t val[3]; +} uint64x1x3_t; + +typedef struct uint64x2x3_t { + uint64x2_t val[3]; +} uint64x2x3_t; + +typedef struct float16x4x3_t { + float16x4_t val[3]; +} float16x4x3_t; + +typedef struct float16x8x3_t { + float16x8_t val[3]; +} float16x8x3_t; + +typedef struct float32x2x3_t { + float32x2_t val[3]; +} float32x2x3_t; + +typedef struct float32x4x3_t { + float32x4_t val[3]; +} float32x4x3_t; + +#ifdef __aarch64__ +typedef struct float64x1x3_t { + float64x1_t val[3]; +} float64x1x3_t; + +typedef struct float64x2x3_t { + float64x2_t val[3]; +} float64x2x3_t; + +#endif +typedef struct poly8x8x3_t { + poly8x8_t val[3]; +} poly8x8x3_t; + +typedef struct poly8x16x3_t { + poly8x16_t val[3]; +} poly8x16x3_t; + +typedef struct poly16x4x3_t { + poly16x4_t val[3]; +} poly16x4x3_t; + +typedef struct poly16x8x3_t { + poly16x8_t val[3]; +} poly16x8x3_t; + +typedef struct poly64x1x3_t { + poly64x1_t val[3]; +} poly64x1x3_t; + +typedef struct poly64x2x3_t { + poly64x2_t val[3]; +} poly64x2x3_t; + +typedef struct int8x8x4_t { + int8x8_t val[4]; +} int8x8x4_t; + +typedef struct int8x16x4_t { + int8x16_t val[4]; +} int8x16x4_t; + +typedef struct int16x4x4_t { + int16x4_t val[4]; +} int16x4x4_t; + +typedef struct int16x8x4_t { + int16x8_t val[4]; +} int16x8x4_t; + +typedef struct int32x2x4_t { + int32x2_t val[4]; +} int32x2x4_t; + +typedef struct int32x4x4_t { + int32x4_t val[4]; +} int32x4x4_t; + +typedef struct int64x1x4_t { + int64x1_t val[4]; +} int64x1x4_t; + +typedef struct int64x2x4_t { + int64x2_t val[4]; +} int64x2x4_t; + +typedef struct uint8x8x4_t { + uint8x8_t val[4]; +} uint8x8x4_t; + +typedef struct uint8x16x4_t { + uint8x16_t val[4]; +} uint8x16x4_t; + +typedef struct uint16x4x4_t { + uint16x4_t val[4]; +} uint16x4x4_t; + +typedef struct uint16x8x4_t { + uint16x8_t val[4]; +} uint16x8x4_t; + +typedef struct uint32x2x4_t { + uint32x2_t val[4]; +} uint32x2x4_t; + +typedef struct uint32x4x4_t { + uint32x4_t val[4]; +} uint32x4x4_t; + +typedef struct uint64x1x4_t { + uint64x1_t val[4]; +} uint64x1x4_t; + +typedef struct uint64x2x4_t { + uint64x2_t val[4]; +} uint64x2x4_t; + +typedef struct float16x4x4_t { + float16x4_t val[4]; +} float16x4x4_t; + +typedef struct float16x8x4_t { + float16x8_t val[4]; +} float16x8x4_t; + +typedef struct float32x2x4_t { + float32x2_t val[4]; +} float32x2x4_t; + +typedef struct float32x4x4_t { + float32x4_t val[4]; +} float32x4x4_t; + +#ifdef __aarch64__ +typedef struct float64x1x4_t { + float64x1_t val[4]; +} float64x1x4_t; + +typedef struct float64x2x4_t { + float64x2_t val[4]; +} float64x2x4_t; + +#endif +typedef struct poly8x8x4_t { + poly8x8_t val[4]; +} poly8x8x4_t; + +typedef struct poly8x16x4_t { + poly8x16_t val[4]; +} poly8x16x4_t; + +typedef struct poly16x4x4_t { + poly16x4_t val[4]; +} poly16x4x4_t; + +typedef struct poly16x8x4_t { + poly16x8_t val[4]; +} poly16x8x4_t; + +typedef struct poly64x1x4_t { + poly64x1_t val[4]; +} poly64x1x4_t; + +typedef struct poly64x2x4_t { + poly64x2_t val[4]; +} poly64x2x4_t; + +#ifdef __ARM_FEATURE_BF16 +typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t; +typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t; + +typedef struct bfloat16x4x2_t { + bfloat16x4_t val[2]; +} bfloat16x4x2_t; + +typedef struct bfloat16x8x2_t { + bfloat16x8_t val[2]; +} bfloat16x8x2_t; + +typedef struct bfloat16x4x3_t { + bfloat16x4_t val[3]; +} bfloat16x4x3_t; + +typedef struct bfloat16x8x3_t { + bfloat16x8_t val[3]; +} bfloat16x8x3_t; + +typedef struct bfloat16x4x4_t { + bfloat16x4_t val[4]; +} bfloat16x4x4_t; + +typedef struct bfloat16x8x4_t { + bfloat16x8_t val[4]; +} bfloat16x8x4_t; + +#endif + +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#else +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#endif + +#define splat_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#else +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#else +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#else +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#else +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#else +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#else +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#else +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#define splat_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#define splat_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#else +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#else +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#define splat_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#else +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#else +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret; \ +}) +#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#else +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#else +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#else +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#else +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret; \ +}) +#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret; \ +}) +#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret; \ +}) +#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); + return __ret; +} +#else +__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); + return __ret; +} +#else +__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); + return __ret; +} +#else +__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); + return __ret; +} +#else +__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclsq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclsq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclsq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcls_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcls_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcls_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int16x4_t)(__promote); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \ + poly8x8_t __s0_0 = __p0_0; \ + poly8x8_t __ret_0; \ + __ret_0 = splat_lane_p8(__s0_0, __p1_0); \ + __ret_0; \ +}) +#else +#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \ + poly8x8_t __s0_1 = __p0_1; \ + poly8x8_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret_1; \ + __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \ + __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \ + poly16x4_t __s0_2 = __p0_2; \ + poly16x4_t __ret_2; \ + __ret_2 = splat_lane_p16(__s0_2, __p1_2); \ + __ret_2; \ +}) +#else +#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \ + poly16x4_t __s0_3 = __p0_3; \ + poly16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ + poly16x4_t __ret_3; \ + __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \ + __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ + __ret_3; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \ + poly8x8_t __s0_4 = __p0_4; \ + poly8x16_t __ret_4; \ + __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \ + __ret_4; \ +}) +#else +#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \ + poly8x8_t __s0_5 = __p0_5; \ + poly8x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret_5; \ + __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \ + __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \ + poly16x4_t __s0_6 = __p0_6; \ + poly16x8_t __ret_6; \ + __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \ + __ret_6; \ +}) +#else +#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \ + poly16x4_t __s0_7 = __p0_7; \ + poly16x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \ + poly16x8_t __ret_7; \ + __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \ + __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_7; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \ + uint8x8_t __s0_8 = __p0_8; \ + uint8x16_t __ret_8; \ + __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \ + __ret_8; \ +}) +#else +#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \ + uint8x8_t __s0_9 = __p0_9; \ + uint8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_9; \ + __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \ + __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \ + uint32x2_t __s0_10 = __p0_10; \ + uint32x4_t __ret_10; \ + __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \ + __ret_10; \ +}) +#else +#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \ + uint32x2_t __s0_11 = __p0_11; \ + uint32x2_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \ + uint32x4_t __ret_11; \ + __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \ + __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ + __ret_11; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \ + uint64x1_t __s0_12 = __p0_12; \ + uint64x2_t __ret_12; \ + __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \ + __ret_12; \ +}) +#else +#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \ + uint64x1_t __s0_13 = __p0_13; \ + uint64x2_t __ret_13; \ + __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \ + __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \ + __ret_13; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \ + uint16x4_t __s0_14 = __p0_14; \ + uint16x8_t __ret_14; \ + __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \ + __ret_14; \ +}) +#else +#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \ + uint16x4_t __s0_15 = __p0_15; \ + uint16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ + uint16x8_t __ret_15; \ + __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \ + __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_15; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \ + int8x8_t __s0_16 = __p0_16; \ + int8x16_t __ret_16; \ + __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \ + __ret_16; \ +}) +#else +#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \ + int8x8_t __s0_17 = __p0_17; \ + int8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_17; \ + __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \ + __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \ + float32x2_t __s0_18 = __p0_18; \ + float32x4_t __ret_18; \ + __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \ + __ret_18; \ +}) +#else +#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \ + float32x2_t __s0_19 = __p0_19; \ + float32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ + float32x4_t __ret_19; \ + __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \ + __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ + __ret_19; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \ + int32x2_t __s0_20 = __p0_20; \ + int32x4_t __ret_20; \ + __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \ + __ret_20; \ +}) +#else +#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \ + int32x2_t __s0_21 = __p0_21; \ + int32x2_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \ + int32x4_t __ret_21; \ + __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \ + __ret_21; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \ + int64x1_t __s0_22 = __p0_22; \ + int64x2_t __ret_22; \ + __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \ + __ret_22; \ +}) +#else +#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \ + int64x1_t __s0_23 = __p0_23; \ + int64x2_t __ret_23; \ + __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \ + __ret_23; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \ + int16x4_t __s0_24 = __p0_24; \ + int16x8_t __ret_24; \ + __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \ + __ret_24; \ +}) +#else +#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \ + int16x4_t __s0_25 = __p0_25; \ + int16x4_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \ + int16x8_t __ret_25; \ + __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_25; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \ + uint8x8_t __s0_26 = __p0_26; \ + uint8x8_t __ret_26; \ + __ret_26 = splat_lane_u8(__s0_26, __p1_26); \ + __ret_26; \ +}) +#else +#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \ + uint8x8_t __s0_27 = __p0_27; \ + uint8x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret_27; \ + __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \ + __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_27; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \ + uint32x2_t __s0_28 = __p0_28; \ + uint32x2_t __ret_28; \ + __ret_28 = splat_lane_u32(__s0_28, __p1_28); \ + __ret_28; \ +}) +#else +#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \ + uint32x2_t __s0_29 = __p0_29; \ + uint32x2_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \ + uint32x2_t __ret_29; \ + __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \ + __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \ + __ret_29; \ +}) +#endif + +#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \ + uint64x1_t __s0_30 = __p0_30; \ + uint64x1_t __ret_30; \ + __ret_30 = splat_lane_u64(__s0_30, __p1_30); \ + __ret_30; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \ + uint16x4_t __s0_31 = __p0_31; \ + uint16x4_t __ret_31; \ + __ret_31 = splat_lane_u16(__s0_31, __p1_31); \ + __ret_31; \ +}) +#else +#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \ + uint16x4_t __s0_32 = __p0_32; \ + uint16x4_t __rev0_32; __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \ + uint16x4_t __ret_32; \ + __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \ + __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \ + __ret_32; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \ + int8x8_t __s0_33 = __p0_33; \ + int8x8_t __ret_33; \ + __ret_33 = splat_lane_s8(__s0_33, __p1_33); \ + __ret_33; \ +}) +#else +#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \ + int8x8_t __s0_34 = __p0_34; \ + int8x8_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret_34; \ + __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \ + __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_34; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \ + float32x2_t __s0_35 = __p0_35; \ + float32x2_t __ret_35; \ + __ret_35 = splat_lane_f32(__s0_35, __p1_35); \ + __ret_35; \ +}) +#else +#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \ + float32x2_t __s0_36 = __p0_36; \ + float32x2_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \ + float32x2_t __ret_36; \ + __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \ + __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \ + __ret_36; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \ + int32x2_t __s0_37 = __p0_37; \ + int32x2_t __ret_37; \ + __ret_37 = splat_lane_s32(__s0_37, __p1_37); \ + __ret_37; \ +}) +#else +#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \ + int32x2_t __s0_38 = __p0_38; \ + int32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ + int32x2_t __ret_38; \ + __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \ + __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \ + __ret_38; \ +}) +#endif + +#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \ + int64x1_t __s0_39 = __p0_39; \ + int64x1_t __ret_39; \ + __ret_39 = splat_lane_s64(__s0_39, __p1_39); \ + __ret_39; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \ + int16x4_t __s0_40 = __p0_40; \ + int16x4_t __ret_40; \ + __ret_40 = splat_lane_s16(__s0_40, __p1_40); \ + __ret_40; \ +}) +#else +#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \ + int16x4_t __s0_41 = __p0_41; \ + int16x4_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \ + int16x4_t __ret_41; \ + __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \ + __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \ + __ret_41; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vdup_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdup_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vdup_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai int8x8_t vget_high_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai float32x2_t vget_high_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai float16x4_t vget_high_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai int32x2_t vget_high_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai int64x1_t vget_high_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai int16x4_t vget_high_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai int8x8_t vget_low_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai float32x2_t vget_low_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai float16x4_t vget_low_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai int32x2_t vget_low_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai int64x1_t vget_low_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai int16x4_t vget_low_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x2(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x2(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x3(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x3(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x4(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x4(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ + __ret; \ +}) +#else +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ + __ret; \ +}) +#else +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ + __ret; \ +}) +#else +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ + __ret; \ +}) +#else +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ + __ret; \ +}) +#else +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ + __ret; \ +}) +#else +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ + __ret; \ +}) +#else +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ + __ret; \ +}) +#else +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ + __ret; \ +}) +#else +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ + __ret; \ +}) +#else +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ + __ret; \ +}) +#else +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ + __ret; \ +}) +#else +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ + __ret; \ +}) +#else +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ + __ret; \ +}) +#else +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ + __ret; \ +}) +#else +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ + __ret; \ +}) +#else +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ + __ret; \ +}) +#else +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ + __ret; \ +}) +#else +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ + __ret; \ +}) +#else +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ + __ret; \ +}) +#else +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ + __ret; \ +}) +#else +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ + __ret; \ +}) +#else +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ + __ret; \ +}) +#else +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ + __ret; \ +}) +#else +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ + __ret; \ +}) +#else +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ + __ret; \ +}) +#else +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ + __ret; \ +}) +#else +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ + __ret; \ +}) +#else +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ + __ret; \ +}) +#else +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ + __ret; \ +}) +#else +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __ret; \ +}) +#else +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __ret; \ +}) +#else +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __ret; \ +}) +#else +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __ret; \ +}) +#else +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ + __ret; \ +}) +#else +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ + __ret; \ +}) +#else +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ + __ret; \ +}) +#else +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ + __ret; \ +}) +#else +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __ret; \ +}) +#else +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __ret; \ +}) +#else +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __ret; \ +}) +#else +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __ret; \ +}) +#else +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ + __ret; \ +}) +#else +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ + __ret; \ +}) +#else +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ + __ret; \ +}) +#else +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \ + uint32x4_t __s0_42 = __p0_42; \ + uint32x4_t __s1_42 = __p1_42; \ + uint32x2_t __s2_42 = __p2_42; \ + uint32x4_t __ret_42; \ + __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \ + __ret_42; \ +}) +#else +#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \ + uint32x4_t __s0_43 = __p0_43; \ + uint32x4_t __s1_43 = __p1_43; \ + uint32x2_t __s2_43 = __p2_43; \ + uint32x4_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \ + uint32x4_t __rev1_43; __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \ + uint32x2_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \ + uint32x4_t __ret_43; \ + __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \ + __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \ + __ret_43; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \ + uint16x8_t __s0_44 = __p0_44; \ + uint16x8_t __s1_44 = __p1_44; \ + uint16x4_t __s2_44 = __p2_44; \ + uint16x8_t __ret_44; \ + __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \ + __ret_44; \ +}) +#else +#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \ + uint16x8_t __s0_45 = __p0_45; \ + uint16x8_t __s1_45 = __p1_45; \ + uint16x4_t __s2_45 = __p2_45; \ + uint16x8_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_45; __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \ + uint16x8_t __ret_45; \ + __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \ + __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_45; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ + float32x4_t __s0_46 = __p0_46; \ + float32x4_t __s1_46 = __p1_46; \ + float32x2_t __s2_46 = __p2_46; \ + float32x4_t __ret_46; \ + __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \ + __ret_46; \ +}) +#else +#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ + float32x4_t __s0_47 = __p0_47; \ + float32x4_t __s1_47 = __p1_47; \ + float32x2_t __s2_47 = __p2_47; \ + float32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ + float32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ + float32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ + float32x4_t __ret_47; \ + __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \ + __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \ + __ret_47; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ + int32x4_t __s0_48 = __p0_48; \ + int32x4_t __s1_48 = __p1_48; \ + int32x2_t __s2_48 = __p2_48; \ + int32x4_t __ret_48; \ + __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \ + __ret_48; \ +}) +#else +#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ + int32x4_t __s0_49 = __p0_49; \ + int32x4_t __s1_49 = __p1_49; \ + int32x2_t __s2_49 = __p2_49; \ + int32x4_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \ + int32x4_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \ + int32x2_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \ + int32x4_t __ret_49; \ + __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \ + __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \ + __ret_49; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ + int16x8_t __s0_50 = __p0_50; \ + int16x8_t __s1_50 = __p1_50; \ + int16x4_t __s2_50 = __p2_50; \ + int16x8_t __ret_50; \ + __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \ + __ret_50; \ +}) +#else +#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ + int16x8_t __s0_51 = __p0_51; \ + int16x8_t __s1_51 = __p1_51; \ + int16x4_t __s2_51 = __p2_51; \ + int16x8_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \ + int16x8_t __ret_51; \ + __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \ + __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_51; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ + uint32x2_t __s0_52 = __p0_52; \ + uint32x2_t __s1_52 = __p1_52; \ + uint32x2_t __s2_52 = __p2_52; \ + uint32x2_t __ret_52; \ + __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \ + __ret_52; \ +}) +#else +#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ + uint32x2_t __s0_53 = __p0_53; \ + uint32x2_t __s1_53 = __p1_53; \ + uint32x2_t __s2_53 = __p2_53; \ + uint32x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \ + uint32x2_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \ + uint32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ + uint32x2_t __ret_53; \ + __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \ + __ret_53; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + uint16x4_t __s0_54 = __p0_54; \ + uint16x4_t __s1_54 = __p1_54; \ + uint16x4_t __s2_54 = __p2_54; \ + uint16x4_t __ret_54; \ + __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \ + __ret_54; \ +}) +#else +#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + uint16x4_t __s0_55 = __p0_55; \ + uint16x4_t __s1_55 = __p1_55; \ + uint16x4_t __s2_55 = __p2_55; \ + uint16x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \ + uint16x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \ + uint16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ + uint16x4_t __ret_55; \ + __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \ + __ret_55; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + float32x2_t __s0_56 = __p0_56; \ + float32x2_t __s1_56 = __p1_56; \ + float32x2_t __s2_56 = __p2_56; \ + float32x2_t __ret_56; \ + __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \ + __ret_56; \ +}) +#else +#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + float32x2_t __s0_57 = __p0_57; \ + float32x2_t __s1_57 = __p1_57; \ + float32x2_t __s2_57 = __p2_57; \ + float32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ + float32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ + float32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ + float32x2_t __ret_57; \ + __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \ + __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \ + __ret_57; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + int32x2_t __s0_58 = __p0_58; \ + int32x2_t __s1_58 = __p1_58; \ + int32x2_t __s2_58 = __p2_58; \ + int32x2_t __ret_58; \ + __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \ + __ret_58; \ +}) +#else +#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + int32x2_t __s0_59 = __p0_59; \ + int32x2_t __s1_59 = __p1_59; \ + int32x2_t __s2_59 = __p2_59; \ + int32x2_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \ + int32x2_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \ + int32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \ + int32x2_t __ret_59; \ + __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \ + __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \ + __ret_59; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + int16x4_t __s0_60 = __p0_60; \ + int16x4_t __s1_60 = __p1_60; \ + int16x4_t __s2_60 = __p2_60; \ + int16x4_t __ret_60; \ + __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \ + __ret_60; \ +}) +#else +#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + int16x4_t __s0_61 = __p0_61; \ + int16x4_t __s1_61 = __p1_61; \ + int16x4_t __s2_61 = __p2_61; \ + int16x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ + int16x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \ + int16x4_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \ + int16x4_t __ret_61; \ + __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ + __ret_61; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + uint32x4_t __s0_62 = __p0_62; \ + uint32x4_t __s1_62 = __p1_62; \ + uint32x2_t __s2_62 = __p2_62; \ + uint32x4_t __ret_62; \ + __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \ + __ret_62; \ +}) +#else +#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + uint32x4_t __s0_63 = __p0_63; \ + uint32x4_t __s1_63 = __p1_63; \ + uint32x2_t __s2_63 = __p2_63; \ + uint32x4_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \ + uint32x4_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \ + uint32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ + uint32x4_t __ret_63; \ + __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \ + __ret_63; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + uint16x8_t __s0_64 = __p0_64; \ + uint16x8_t __s1_64 = __p1_64; \ + uint16x4_t __s2_64 = __p2_64; \ + uint16x8_t __ret_64; \ + __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \ + __ret_64; \ +}) +#else +#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + uint16x8_t __s0_65 = __p0_65; \ + uint16x8_t __s1_65 = __p1_65; \ + uint16x4_t __s2_65 = __p2_65; \ + uint16x8_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ + uint16x8_t __ret_65; \ + __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_65; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + float32x4_t __s0_66 = __p0_66; \ + float32x4_t __s1_66 = __p1_66; \ + float32x2_t __s2_66 = __p2_66; \ + float32x4_t __ret_66; \ + __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \ + __ret_66; \ +}) +#else +#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + float32x4_t __s0_67 = __p0_67; \ + float32x4_t __s1_67 = __p1_67; \ + float32x2_t __s2_67 = __p2_67; \ + float32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ + float32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ + float32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ + float32x4_t __ret_67; \ + __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \ + __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ + __ret_67; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + int32x4_t __s0_68 = __p0_68; \ + int32x4_t __s1_68 = __p1_68; \ + int32x2_t __s2_68 = __p2_68; \ + int32x4_t __ret_68; \ + __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \ + __ret_68; \ +}) +#else +#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + int32x4_t __s0_69 = __p0_69; \ + int32x4_t __s1_69 = __p1_69; \ + int32x2_t __s2_69 = __p2_69; \ + int32x4_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \ + int32x4_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \ + int32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ + int32x4_t __ret_69; \ + __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \ + __ret_69; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + int16x8_t __s0_70 = __p0_70; \ + int16x8_t __s1_70 = __p1_70; \ + int16x4_t __s2_70 = __p2_70; \ + int16x8_t __ret_70; \ + __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \ + __ret_70; \ +}) +#else +#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + int16x8_t __s0_71 = __p0_71; \ + int16x8_t __s1_71 = __p1_71; \ + int16x4_t __s2_71 = __p2_71; \ + int16x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \ + int16x8_t __ret_71; \ + __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_71; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + uint32x2_t __s0_72 = __p0_72; \ + uint32x2_t __s1_72 = __p1_72; \ + uint32x2_t __s2_72 = __p2_72; \ + uint32x2_t __ret_72; \ + __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \ + __ret_72; \ +}) +#else +#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + uint32x2_t __s0_73 = __p0_73; \ + uint32x2_t __s1_73 = __p1_73; \ + uint32x2_t __s2_73 = __p2_73; \ + uint32x2_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \ + uint32x2_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \ + uint32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ + uint32x2_t __ret_73; \ + __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \ + __ret_73; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + uint16x4_t __s0_74 = __p0_74; \ + uint16x4_t __s1_74 = __p1_74; \ + uint16x4_t __s2_74 = __p2_74; \ + uint16x4_t __ret_74; \ + __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \ + __ret_74; \ +}) +#else +#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + uint16x4_t __s0_75 = __p0_75; \ + uint16x4_t __s1_75 = __p1_75; \ + uint16x4_t __s2_75 = __p2_75; \ + uint16x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ + uint16x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \ + uint16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ + uint16x4_t __ret_75; \ + __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \ + __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ + __ret_75; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + float32x2_t __s0_76 = __p0_76; \ + float32x2_t __s1_76 = __p1_76; \ + float32x2_t __s2_76 = __p2_76; \ + float32x2_t __ret_76; \ + __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \ + __ret_76; \ +}) +#else +#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + float32x2_t __s0_77 = __p0_77; \ + float32x2_t __s1_77 = __p1_77; \ + float32x2_t __s2_77 = __p2_77; \ + float32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ + float32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ + float32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ + float32x2_t __ret_77; \ + __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \ + __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \ + __ret_77; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + int32x2_t __s0_78 = __p0_78; \ + int32x2_t __s1_78 = __p1_78; \ + int32x2_t __s2_78 = __p2_78; \ + int32x2_t __ret_78; \ + __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \ + __ret_78; \ +}) +#else +#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + int32x2_t __s0_79 = __p0_79; \ + int32x2_t __s1_79 = __p1_79; \ + int32x2_t __s2_79 = __p2_79; \ + int32x2_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \ + int32x2_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \ + int32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ + int32x2_t __ret_79; \ + __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \ + __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \ + __ret_79; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + int16x4_t __s0_80 = __p0_80; \ + int16x4_t __s1_80 = __p1_80; \ + int16x4_t __s2_80 = __p2_80; \ + int16x4_t __ret_80; \ + __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \ + __ret_80; \ +}) +#else +#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + int16x4_t __s0_81 = __p0_81; \ + int16x4_t __s1_81 = __p1_81; \ + int16x4_t __s2_81 = __p2_81; \ + int16x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \ + int16x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \ + int16x4_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \ + int16x4_t __ret_81; \ + __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \ + __ret_81; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vmov_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmov_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmov_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vmov_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vmovl_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vmovl_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vmovl_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vmovn_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vmovn_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vmovn_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \ + uint32x4_t __s0_82 = __p0_82; \ + uint32x2_t __s1_82 = __p1_82; \ + uint32x4_t __ret_82; \ + __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \ + __ret_82; \ +}) +#else +#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \ + uint32x4_t __s0_83 = __p0_83; \ + uint32x2_t __s1_83 = __p1_83; \ + uint32x4_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \ + uint32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ + uint32x4_t __ret_83; \ + __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \ + __ret_83; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \ + uint16x8_t __s0_84 = __p0_84; \ + uint16x4_t __s1_84 = __p1_84; \ + uint16x8_t __ret_84; \ + __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \ + __ret_84; \ +}) +#else +#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \ + uint16x8_t __s0_85 = __p0_85; \ + uint16x4_t __s1_85 = __p1_85; \ + uint16x8_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ + uint16x8_t __ret_85; \ + __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \ + __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_85; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ + float32x4_t __s0_86 = __p0_86; \ + float32x2_t __s1_86 = __p1_86; \ + float32x4_t __ret_86; \ + __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \ + __ret_86; \ +}) +#else +#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ + float32x4_t __s0_87 = __p0_87; \ + float32x2_t __s1_87 = __p1_87; \ + float32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + float32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ + float32x4_t __ret_87; \ + __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \ + __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ + __ret_87; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \ + int32x4_t __s0_88 = __p0_88; \ + int32x2_t __s1_88 = __p1_88; \ + int32x4_t __ret_88; \ + __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \ + __ret_88; \ +}) +#else +#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \ + int32x4_t __s0_89 = __p0_89; \ + int32x2_t __s1_89 = __p1_89; \ + int32x4_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \ + int32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \ + int32x4_t __ret_89; \ + __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \ + __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \ + __ret_89; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \ + int16x8_t __s0_90 = __p0_90; \ + int16x4_t __s1_90 = __p1_90; \ + int16x8_t __ret_90; \ + __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \ + __ret_90; \ +}) +#else +#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \ + int16x8_t __s0_91 = __p0_91; \ + int16x4_t __s1_91 = __p1_91; \ + int16x8_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \ + int16x8_t __ret_91; \ + __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \ + __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_91; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ + uint32x2_t __s0_92 = __p0_92; \ + uint32x2_t __s1_92 = __p1_92; \ + uint32x2_t __ret_92; \ + __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \ + __ret_92; \ +}) +#else +#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ + uint32x2_t __s0_93 = __p0_93; \ + uint32x2_t __s1_93 = __p1_93; \ + uint32x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \ + uint32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ + uint32x2_t __ret_93; \ + __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \ + __ret_93; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ + uint16x4_t __s0_94 = __p0_94; \ + uint16x4_t __s1_94 = __p1_94; \ + uint16x4_t __ret_94; \ + __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \ + __ret_94; \ +}) +#else +#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ + uint16x4_t __s0_95 = __p0_95; \ + uint16x4_t __s1_95 = __p1_95; \ + uint16x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ + uint16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ + uint16x4_t __ret_95; \ + __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ + __ret_95; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ + float32x2_t __s0_96 = __p0_96; \ + float32x2_t __s1_96 = __p1_96; \ + float32x2_t __ret_96; \ + __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \ + __ret_96; \ +}) +#else +#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ + float32x2_t __s0_97 = __p0_97; \ + float32x2_t __s1_97 = __p1_97; \ + float32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ + float32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ + float32x2_t __ret_97; \ + __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \ + __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ + __ret_97; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \ + int32x2_t __s0_98 = __p0_98; \ + int32x2_t __s1_98 = __p1_98; \ + int32x2_t __ret_98; \ + __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \ + __ret_98; \ +}) +#else +#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \ + int32x2_t __s0_99 = __p0_99; \ + int32x2_t __s1_99 = __p1_99; \ + int32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \ + int32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ + int32x2_t __ret_99; \ + __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \ + __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \ + __ret_99; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \ + int16x4_t __s0_100 = __p0_100; \ + int16x4_t __s1_100 = __p1_100; \ + int16x4_t __ret_100; \ + __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \ + __ret_100; \ +}) +#else +#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \ + int16x4_t __s0_101 = __p0_101; \ + int16x4_t __s1_101 = __p1_101; \ + int16x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \ + int16x4_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \ + int16x4_t __ret_101; \ + __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \ + __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \ + __ret_101; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __ret; + __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __ret; + __ret = __p0 * (uint32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = __rev0 * (uint32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __ret; + __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + __ret = __p0 * (float32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = __rev0 * (float32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = __p0 * (int32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = __rev0 * (int32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ + uint32x2_t __s0_102 = __p0_102; \ + uint32x2_t __s1_102 = __p1_102; \ + uint64x2_t __ret_102; \ + __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \ + __ret_102; \ +}) +#else +#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ + uint32x2_t __s0_103 = __p0_103; \ + uint32x2_t __s1_103 = __p1_103; \ + uint32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ + uint32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ + uint64x2_t __ret_103; \ + __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \ + __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ + __ret_103; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ + uint16x4_t __s0_104 = __p0_104; \ + uint16x4_t __s1_104 = __p1_104; \ + uint32x4_t __ret_104; \ + __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \ + __ret_104; \ +}) +#else +#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ + uint16x4_t __s0_105 = __p0_105; \ + uint16x4_t __s1_105 = __p1_105; \ + uint16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ + uint16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ + uint32x4_t __ret_105; \ + __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ + __ret_105; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ + int32x2_t __s0_106 = __p0_106; \ + int32x2_t __s1_106 = __p1_106; \ + int64x2_t __ret_106; \ + __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \ + __ret_106; \ +}) +#else +#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ + int32x2_t __s0_107 = __p0_107; \ + int32x2_t __s1_107 = __p1_107; \ + int32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ + int32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ + int64x2_t __ret_107; \ + __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \ + __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \ + __ret_107; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ + int16x4_t __s0_108 = __p0_108; \ + int16x4_t __s1_108 = __p1_108; \ + int32x4_t __ret_108; \ + __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \ + __ret_108; \ +}) +#else +#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ + int16x4_t __s0_109 = __p0_109; \ + int16x4_t __s1_109 = __p1_109; \ + int16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ + int16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ + int32x4_t __ret_109; \ + __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \ + __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \ + __ret_109; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vpaddl_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vpaddl_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vpaddl_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \ + int64x2_t __s0_110 = __p0_110; \ + int32x2_t __s1_110 = __p1_110; \ + int32x2_t __s2_110 = __p2_110; \ + int64x2_t __ret_110; \ + __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \ + __ret_110; \ +}) +#else +#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \ + int64x2_t __s0_111 = __p0_111; \ + int32x2_t __s1_111 = __p1_111; \ + int32x2_t __s2_111 = __p2_111; \ + int64x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + int32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \ + int64x2_t __ret_111; \ + __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \ + int32x4_t __s0_112 = __p0_112; \ + int16x4_t __s1_112 = __p1_112; \ + int16x4_t __s2_112 = __p2_112; \ + int32x4_t __ret_112; \ + __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \ + __ret_112; \ +}) +#else +#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \ + int32x4_t __s0_113 = __p0_113; \ + int16x4_t __s1_113 = __p1_113; \ + int16x4_t __s2_113 = __p2_113; \ + int32x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ + int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ + int16x4_t __rev2_113; __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \ + int32x4_t __ret_113; \ + __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \ + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ + __ret_113; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ + int64x2_t __s0_114 = __p0_114; \ + int32x2_t __s1_114 = __p1_114; \ + int32x2_t __s2_114 = __p2_114; \ + int64x2_t __ret_114; \ + __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ + __ret_114; \ +}) +#else +#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ + int64x2_t __s0_115 = __p0_115; \ + int32x2_t __s1_115 = __p1_115; \ + int32x2_t __s2_115 = __p2_115; \ + int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ + int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ + int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ + int64x2_t __ret_115; \ + __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ + __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ + __ret_115; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ + int32x4_t __s0_116 = __p0_116; \ + int16x4_t __s1_116 = __p1_116; \ + int16x4_t __s2_116 = __p2_116; \ + int32x4_t __ret_116; \ + __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ + __ret_116; \ +}) +#else +#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ + int32x4_t __s0_117 = __p0_117; \ + int16x4_t __s1_117 = __p1_117; \ + int16x4_t __s2_117 = __p2_117; \ + int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ + int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ + int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ + int32x4_t __ret_117; \ + __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ + __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ + __ret_117; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \ + int32x2_t __s0_118 = __p0_118; \ + int32x2_t __s1_118 = __p1_118; \ + int64x2_t __ret_118; \ + __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \ + __ret_118; \ +}) +#else +#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \ + int32x2_t __s0_119 = __p0_119; \ + int32x2_t __s1_119 = __p1_119; \ + int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ + int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ + int64x2_t __ret_119; \ + __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \ + __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ + __ret_119; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \ + int16x4_t __s0_120 = __p0_120; \ + int16x4_t __s1_120 = __p1_120; \ + int32x4_t __ret_120; \ + __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \ + __ret_120; \ +}) +#else +#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \ + int16x4_t __s0_121 = __p0_121; \ + int16x4_t __s1_121 = __p1_121; \ + int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ + int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ + int32x4_t __ret_121; \ + __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \ + __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ + __ret_121; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqmovn_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqmovn_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqmovn_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ +}) +#else +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ +}) +#else +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ +}) +#else +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ +}) +#else +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ +}) +#else +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ +}) +#else +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ +}) +#else +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ +}) +#else +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ +}) +#else +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ +}) +#else +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ +}) +#else +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ +}) +#else +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ +}) +#else +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ +}) +#else +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ +}) +#else +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ +}) +#endif + +#define vst1_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ +}) +#else +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ +}) +#else +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ +}) +#else +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ +}) +#else +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ +}) +#endif + +#define vst1_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ +}) +#else +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ +}) +#else +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ +}) +#else +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ +}) +#else +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ +}) +#else +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ +}) +#else +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ +}) +#else +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ +}) +#else +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ +}) +#else +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ +}) +#else +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ +}) +#else +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ +}) +#else +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ +}) +#else +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ +}) +#else +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ +}) +#else +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ +}) +#else +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ +}) +#endif + +#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ +}) +#else +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ +}) +#else +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ +}) +#else +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ +}) +#else +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ +}) +#endif + +#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ +}) +#else +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) +#else +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) +#else +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) +#else +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) +#else +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) +#else +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) +#else +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) +#endif + +#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) +#else +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) +#else +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) +#else +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) +#else +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) +#else +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) +#else +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) +#endif + +#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) +#else +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) +#else +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) +#else +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) +#else +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) +#else +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) +#else +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) +#else +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#define vst2_u64(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) +#else +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) +#else +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) +#endif + +#define vst2_s64(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) +#else +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ +}) +#else +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ +}) +#else +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ +}) +#else +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ +}) +#else +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ +}) +#else +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ +}) +#else +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ +}) +#else +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ +}) +#else +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +}) +#else +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +}) +#else +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +}) +#else +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +}) +#else +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ +}) +#else +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ +}) +#else +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ +}) +#else +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) +#else +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) +#else +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#define vst3_u64(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) +#else +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) +#else +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) +#endif + +#define vst3_s64(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) +#else +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +}) +#else +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +}) +#else +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +}) +#else +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +}) +#else +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +}) +#else +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ +}) +#else +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ +}) +#else +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ +}) +#else +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +}) +#else +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +}) +#else +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +}) +#else +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +}) +#else +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ +}) +#else +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ +}) +#else +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ +}) +#else +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) +#else +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) +#else +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) +#else +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst4_u64(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst4_s64(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ +}) +#else +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ +}) +#else +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ +}) +#else +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ +}) +#else +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ +}) +#else +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ +}) +#else +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ +}) +#else +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ +}) +#else +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ +}) +#else +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ +}) +#else +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ +}) +#else +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ +}) +#else +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ +}) +#else +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ +}) +#else +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ +}) +#else +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) - vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) - vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) - vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) - vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) - vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) - vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#if !defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \ + float16x4_t __s0_122 = __p0_122; \ + float16x8_t __ret_122; \ + __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \ + __ret_122; \ +}) +#else +#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \ + float16x4_t __s0_123 = __p0_123; \ + float16x4_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \ + float16x8_t __ret_123; \ + __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \ + __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_123; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \ + float16x4_t __s0_124 = __p0_124; \ + float16x4_t __ret_124; \ + __ret_124 = splat_lane_f16(__s0_124, __p1_124); \ + __ret_124; \ +}) +#else +#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \ + float16x4_t __s0_125 = __p0_125; \ + float16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ + float16x4_t __ret_125; \ + __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \ + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ + __ret_125; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdup_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmov_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmov_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \ + int32x4_t __s0_126 = __p0_126; \ + int32x2_t __s1_126 = __p1_126; \ + int32x4_t __ret_126; \ + __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \ + __ret_126; \ +}) +#else +#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \ + int32x4_t __s0_127 = __p0_127; \ + int32x2_t __s1_127 = __p1_127; \ + int32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ + int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ + int32x4_t __ret_127; \ + __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \ + __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \ + __ret_127; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \ + int16x8_t __s0_128 = __p0_128; \ + int16x4_t __s1_128 = __p1_128; \ + int16x8_t __ret_128; \ + __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \ + __ret_128; \ +}) +#else +#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \ + int16x8_t __s0_129 = __p0_129; \ + int16x4_t __s1_129 = __p1_129; \ + int16x8_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ + int16x8_t __ret_129; \ + __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \ + __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_129; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \ + int32x2_t __s0_130 = __p0_130; \ + int32x2_t __s1_130 = __p1_130; \ + int32x2_t __ret_130; \ + __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \ + __ret_130; \ +}) +#else +#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \ + int32x2_t __s0_131 = __p0_131; \ + int32x2_t __s1_131 = __p1_131; \ + int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \ + int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \ + int32x2_t __ret_131; \ + __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \ + __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \ + __ret_131; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \ + int16x4_t __s0_132 = __p0_132; \ + int16x4_t __s1_132 = __p1_132; \ + int16x4_t __ret_132; \ + __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \ + __ret_132; \ +}) +#else +#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \ + int16x4_t __s0_133 = __p0_133; \ + int16x4_t __s1_133 = __p1_133; \ + int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \ + int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ + int16x4_t __ret_133; \ + __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \ + __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \ + __ret_133; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \ + int32x4_t __s0_134 = __p0_134; \ + int32x2_t __s1_134 = __p1_134; \ + int32x4_t __ret_134; \ + __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \ + __ret_134; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \ + int32x4_t __s0_135 = __p0_135; \ + int32x2_t __s1_135 = __p1_135; \ + int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \ + int32x2_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \ + int32x4_t __ret_135; \ + __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \ + __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \ + __ret_135; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \ + int16x8_t __s0_136 = __p0_136; \ + int16x4_t __s1_136 = __p1_136; \ + int16x8_t __ret_136; \ + __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \ + __ret_136; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \ + int16x8_t __s0_137 = __p0_137; \ + int16x4_t __s1_137 = __p1_137; \ + int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \ + int16x8_t __ret_137; \ + __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \ + __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_137; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \ + int32x2_t __s0_138 = __p0_138; \ + int32x2_t __s1_138 = __p1_138; \ + int32x2_t __ret_138; \ + __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \ + __ret_138; \ +}) +#else +#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \ + int32x2_t __s0_139 = __p0_139; \ + int32x2_t __s1_139 = __p1_139; \ + int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \ + int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \ + int32x2_t __ret_139; \ + __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \ + __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \ + __ret_139; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \ + int16x4_t __s0_140 = __p0_140; \ + int16x4_t __s1_140 = __p1_140; \ + int16x4_t __ret_140; \ + __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \ + __ret_140; \ +}) +#else +#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \ + int16x4_t __s0_141 = __p0_141; \ + int16x4_t __s1_141 = __p1_141; \ + int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \ + int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \ + int16x4_t __ret_141; \ + __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \ + __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \ + __ret_141; \ +}) +#endif + +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif +#if (__ARM_FP & 2) +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ + __ret; \ +}) +#else +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ + __ret; \ +}) +#else +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ + __ret; \ +}) +#else +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ + __ret; \ +}) +#else +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ + __ret; \ +}) +#else +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ + __ret; \ +}) +#else +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \ +}) +#else +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \ +}) +#else +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ +}) +#else +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ +}) +#else +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ +}) +#else +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ +}) +#else +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ +}) +#else +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ +}) +#else +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ +}) +#else +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ +}) +#else +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ +}) +#else +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ +}) +#else +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ +}) +#else +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ +}) +#else +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ +}) +#else +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ +}) +#else +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ +}) +#else +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ +}) +#else +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ +}) +#else +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ +}) +#else +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ +}) +#else +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ +}) +#else +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ +}) +#endif + +#endif +#if __ARM_ARCH >= 8 +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES) +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float32_t vrndns_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2) +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint32_t vsha1h_u32(uint32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif +#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__) +__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#endif +#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__) +__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#endif +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \ + float32x4_t __s0_142 = __p0_142; \ + bfloat16x8_t __s1_142 = __p1_142; \ + bfloat16x4_t __s2_142 = __p2_142; \ + float32x4_t __ret_142; \ +bfloat16x4_t __reint_142 = __s2_142; \ +float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \ + __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \ + __ret_142; \ +}) +#else +#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \ + float32x4_t __s0_143 = __p0_143; \ + bfloat16x8_t __s1_143 = __p1_143; \ + bfloat16x4_t __s2_143 = __p2_143; \ + float32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \ + float32x4_t __ret_143; \ +bfloat16x4_t __reint_143 = __rev2_143; \ +float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \ + __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \ + __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \ + __ret_143; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \ + float32x2_t __s0_144 = __p0_144; \ + bfloat16x4_t __s1_144 = __p1_144; \ + bfloat16x4_t __s2_144 = __p2_144; \ + float32x2_t __ret_144; \ +bfloat16x4_t __reint_144 = __s2_144; \ +float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \ + __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \ + __ret_144; \ +}) +#else +#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ + float32x2_t __s0_145 = __p0_145; \ + bfloat16x4_t __s1_145 = __p1_145; \ + bfloat16x4_t __s2_145 = __p2_145; \ + float32x2_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \ + bfloat16x4_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \ + float32x2_t __ret_145; \ +bfloat16x4_t __reint_145 = __rev2_145; \ +float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \ + __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \ + __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \ + __ret_145; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ + float32x4_t __s0_146 = __p0_146; \ + bfloat16x8_t __s1_146 = __p1_146; \ + bfloat16x8_t __s2_146 = __p2_146; \ + float32x4_t __ret_146; \ +bfloat16x8_t __reint_146 = __s2_146; \ +float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \ + __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \ + __ret_146; \ +}) +#else +#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ + float32x4_t __s0_147 = __p0_147; \ + bfloat16x8_t __s1_147 = __p1_147; \ + bfloat16x8_t __s2_147 = __p2_147; \ + float32x4_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_147; \ +bfloat16x8_t __reint_147 = __rev2_147; \ +float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \ + __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \ + __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \ + __ret_147; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ + float32x2_t __s0_148 = __p0_148; \ + bfloat16x4_t __s1_148 = __p1_148; \ + bfloat16x8_t __s2_148 = __p2_148; \ + float32x2_t __ret_148; \ +bfloat16x8_t __reint_148 = __s2_148; \ +float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \ + __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \ + __ret_148; \ +}) +#else +#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ + float32x2_t __s0_149 = __p0_149; \ + bfloat16x4_t __s1_149 = __p1_149; \ + bfloat16x8_t __s2_149 = __p2_149; \ + float32x2_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \ + bfloat16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x2_t __ret_149; \ +bfloat16x8_t __reint_149 = __rev2_149; \ +float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \ + __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \ + __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \ + __ret_149; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (bfloat16x4_t)(__promote); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) { + float32x4_t __ret_150; +bfloat16x4_t __reint_150 = __p0_150; +int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16); + __ret_150 = *(float32x4_t *) &__reint1_150; + return __ret_150; +} +#else +__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) { + bfloat16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0); + float32x4_t __ret_151; +bfloat16x4_t __reint_151 = __rev0_151; +int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16); + __ret_151 = *(float32x4_t *) &__reint1_151; + __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0); + return __ret_151; +} +__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) { + float32x4_t __ret_152; +bfloat16x4_t __reint_152 = __p0_152; +int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16); + __ret_152 = *(float32x4_t *) &__reint1_152; + return __ret_152; +} +#endif + +__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) { + float32_t __ret; +bfloat16_t __reint = __p0; +int32_t __reint1 = *(int32_t *) &__reint << 16; + __ret = *(float32_t *) &__reint1; + return __ret; +} +__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) { + bfloat16_t __ret; + __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \ + bfloat16x4_t __s0_153 = __p0_153; \ + bfloat16x8_t __ret_153; \ + __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \ + __ret_153; \ +}) +#else +#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \ + bfloat16x4_t __s0_154 = __p0_154; \ + bfloat16x4_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \ + bfloat16x8_t __ret_154; \ + __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \ + __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_154; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \ + bfloat16x4_t __s0_155 = __p0_155; \ + bfloat16x4_t __ret_155; \ + __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \ + __ret_155; \ +}) +#else +#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \ + bfloat16x4_t __s0_156 = __p0_156; \ + bfloat16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ + bfloat16x4_t __ret_156; \ + __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \ + __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ + __ret_156; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \ + bfloat16x8_t __s0_157 = __p0_157; \ + bfloat16x8_t __ret_157; \ + __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \ + __ret_157; \ +}) +#else +#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \ + bfloat16x8_t __s0_158 = __p0_158; \ + bfloat16x8_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __ret_158; \ + __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \ + __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_158; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \ + bfloat16x8_t __s0_159 = __p0_159; \ + bfloat16x4_t __ret_159; \ + __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \ + __ret_159; \ +}) +#else +#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \ + bfloat16x8_t __s0_160 = __p0_160; \ + bfloat16x8_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __ret_160; \ + __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \ + __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \ + __ret_160; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s0 = __p0; \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s0 = __p0; \ + bfloat16_t __ret; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \ + __ret; \ +}) +#else +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \ + __ret; \ +}) +#else +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ + __ret; \ +}) +#else +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ + __ret; \ +}) +#else +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ + __ret; \ +}) +#else +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ + __ret; \ +}) +#else +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ + __ret; \ +}) +#else +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ + __ret; \ +}) +#else +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \ +}) +#else +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \ +}) +#else +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \ +}) +#else +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \ +}) +#else +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ +}) +#else +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ +}) +#else +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ +}) +#else +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ +}) +#else +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ +}) +#else +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ +}) +#else +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11); + return __ret; +} +#else +__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = __a32_vcvt_bf16_f32(__p0); + return __ret; +} +#else +__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __ret; + __ret = __noswap___a32_vcvt_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); + return __ret; +} +#else +__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x8_t __ret; + __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); + return __ret; +} +#else +__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __ret; + __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43); + return __ret; +} +#else +__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ + bfloat16x8_t __s0_161 = __p0_161; \ + bfloat16x4_t __s2_161 = __p2_161; \ + bfloat16x8_t __ret_161; \ + __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \ + __ret_161; \ +}) +#else +#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ + bfloat16x8_t __s0_162 = __p0_162; \ + bfloat16x4_t __s2_162 = __p2_162; \ + bfloat16x8_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \ + bfloat16x8_t __ret_162; \ + __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \ + __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_162; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ + bfloat16x4_t __s0_163 = __p0_163; \ + bfloat16x4_t __s2_163 = __p2_163; \ + bfloat16x4_t __ret_163; \ + __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \ + __ret_163; \ +}) +#else +#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ + bfloat16x4_t __s0_164 = __p0_164; \ + bfloat16x4_t __s2_164 = __p2_164; \ + bfloat16x4_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ + bfloat16x4_t __ret_164; \ + __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \ + __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \ + __ret_164; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ + bfloat16x8_t __s0_165 = __p0_165; \ + bfloat16x8_t __s2_165 = __p2_165; \ + bfloat16x8_t __ret_165; \ + __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \ + __ret_165; \ +}) +#else +#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ + bfloat16x8_t __s0_166 = __p0_166; \ + bfloat16x8_t __s2_166 = __p2_166; \ + bfloat16x8_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __ret_166; \ + __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \ + __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_166; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ + bfloat16x4_t __s0_167 = __p0_167; \ + bfloat16x8_t __s2_167 = __p2_167; \ + bfloat16x4_t __ret_167; \ + __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \ + __ret_167; \ +}) +#else +#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ + bfloat16x4_t __s0_168 = __p0_168; \ + bfloat16x8_t __s2_168 = __p2_168; \ + bfloat16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __ret_168; \ + __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \ + __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ + __ret_168; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); + return __ret; +} +#else +__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __ret; + __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43); + return __ret; +} +#else +__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = __a64_vcvtq_low_bf16_f32(__p0); + return __ret; +} +#else +__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __ret; + __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_COMPLEX) +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ + float32x2_t __s0_169 = __p0_169; \ + float32x2_t __s1_169 = __p1_169; \ + float32x2_t __s2_169 = __p2_169; \ + float32x2_t __ret_169; \ +float32x2_t __reint_169 = __s2_169; \ +uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \ + __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \ + __ret_169; \ +}) +#else +#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ + float32x2_t __s0_170 = __p0_170; \ + float32x2_t __s1_170 = __p1_170; \ + float32x2_t __s2_170 = __p2_170; \ + float32x2_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \ + float32x2_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \ + float32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ + float32x2_t __ret_170; \ +float32x2_t __reint_170 = __rev2_170; \ +uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \ + __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \ + __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \ + __ret_170; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ + float32x4_t __s0_171 = __p0_171; \ + float32x4_t __s1_171 = __p1_171; \ + float32x2_t __s2_171 = __p2_171; \ + float32x4_t __ret_171; \ +float32x2_t __reint_171 = __s2_171; \ +uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \ + __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \ + __ret_171; \ +}) +#else +#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ + float32x4_t __s0_172 = __p0_172; \ + float32x4_t __s1_172 = __p1_172; \ + float32x2_t __s2_172 = __p2_172; \ + float32x4_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \ + float32x4_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \ + float32x2_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \ + float32x4_t __ret_172; \ +float32x2_t __reint_172 = __rev2_172; \ +uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \ + __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \ + __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \ + __ret_172; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ + float32x2_t __s0_173 = __p0_173; \ + float32x2_t __s1_173 = __p1_173; \ + float32x4_t __s2_173 = __p2_173; \ + float32x2_t __ret_173; \ +float32x4_t __reint_173 = __s2_173; \ +uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \ + __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \ + __ret_173; \ +}) +#else +#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ + float32x2_t __s0_174 = __p0_174; \ + float32x2_t __s1_174 = __p1_174; \ + float32x4_t __s2_174 = __p2_174; \ + float32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ + float32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ + float32x4_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \ + float32x2_t __ret_174; \ +float32x4_t __reint_174 = __rev2_174; \ +uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \ + __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \ + __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ + __ret_174; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ + float32x4_t __s0_175 = __p0_175; \ + float32x4_t __s1_175 = __p1_175; \ + float32x4_t __s2_175 = __p2_175; \ + float32x4_t __ret_175; \ +float32x4_t __reint_175 = __s2_175; \ +uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \ + __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \ + __ret_175; \ +}) +#else +#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ + float32x4_t __s0_176 = __p0_176; \ + float32x4_t __s1_176 = __p1_176; \ + float32x4_t __s2_176 = __p2_176; \ + float32x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ + float32x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ + float32x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ + float32x4_t __ret_176; \ +float32x4_t __reint_176 = __rev2_176; \ +uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \ + __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \ + __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ + __ret_176; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ + float32x2_t __s0_177 = __p0_177; \ + float32x2_t __s1_177 = __p1_177; \ + float32x2_t __s2_177 = __p2_177; \ + float32x2_t __ret_177; \ +float32x2_t __reint_177 = __s2_177; \ +uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ + __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ + __ret_177; \ +}) +#else +#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ + float32x2_t __s0_178 = __p0_178; \ + float32x2_t __s1_178 = __p1_178; \ + float32x2_t __s2_178 = __p2_178; \ + float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ + float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ + float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ + float32x2_t __ret_178; \ +float32x2_t __reint_178 = __rev2_178; \ +uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ + __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ + __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ + __ret_178; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ + float32x4_t __s0_179 = __p0_179; \ + float32x4_t __s1_179 = __p1_179; \ + float32x2_t __s2_179 = __p2_179; \ + float32x4_t __ret_179; \ +float32x2_t __reint_179 = __s2_179; \ +uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ + __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ + __ret_179; \ +}) +#else +#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ + float32x4_t __s0_180 = __p0_180; \ + float32x4_t __s1_180 = __p1_180; \ + float32x2_t __s2_180 = __p2_180; \ + float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ + float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ + float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ + float32x4_t __ret_180; \ +float32x2_t __reint_180 = __rev2_180; \ +uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ + __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ + __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ + __ret_180; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ + float32x2_t __s0_181 = __p0_181; \ + float32x2_t __s1_181 = __p1_181; \ + float32x4_t __s2_181 = __p2_181; \ + float32x2_t __ret_181; \ +float32x4_t __reint_181 = __s2_181; \ +uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ + __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ + __ret_181; \ +}) +#else +#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ + float32x2_t __s0_182 = __p0_182; \ + float32x2_t __s1_182 = __p1_182; \ + float32x4_t __s2_182 = __p2_182; \ + float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ + float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ + float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ + float32x2_t __ret_182; \ +float32x4_t __reint_182 = __rev2_182; \ +uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ + __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ + __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ + __ret_182; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ + float32x4_t __s0_183 = __p0_183; \ + float32x4_t __s1_183 = __p1_183; \ + float32x4_t __s2_183 = __p2_183; \ + float32x4_t __ret_183; \ +float32x4_t __reint_183 = __s2_183; \ +uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ + __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ + __ret_183; \ +}) +#else +#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ + float32x4_t __s0_184 = __p0_184; \ + float32x4_t __s1_184 = __p1_184; \ + float32x4_t __s2_184 = __p2_184; \ + float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ + float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ + float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ + float32x4_t __ret_184; \ +float32x4_t __reint_184 = __rev2_184; \ +uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ + __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ + __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ + __ret_184; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ + float32x2_t __s0_185 = __p0_185; \ + float32x2_t __s1_185 = __p1_185; \ + float32x2_t __s2_185 = __p2_185; \ + float32x2_t __ret_185; \ +float32x2_t __reint_185 = __s2_185; \ +uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ + __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ + __ret_185; \ +}) +#else +#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ + float32x2_t __s0_186 = __p0_186; \ + float32x2_t __s1_186 = __p1_186; \ + float32x2_t __s2_186 = __p2_186; \ + float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ + float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ + float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ + float32x2_t __ret_186; \ +float32x2_t __reint_186 = __rev2_186; \ +uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ + __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ + __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ + __ret_186; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ + float32x4_t __s0_187 = __p0_187; \ + float32x4_t __s1_187 = __p1_187; \ + float32x2_t __s2_187 = __p2_187; \ + float32x4_t __ret_187; \ +float32x2_t __reint_187 = __s2_187; \ +uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ + __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ + __ret_187; \ +}) +#else +#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ + float32x4_t __s0_188 = __p0_188; \ + float32x4_t __s1_188 = __p1_188; \ + float32x2_t __s2_188 = __p2_188; \ + float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ + float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ + float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ + float32x4_t __ret_188; \ +float32x2_t __reint_188 = __rev2_188; \ +uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ + __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ + __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ + __ret_188; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ + float32x2_t __s0_189 = __p0_189; \ + float32x2_t __s1_189 = __p1_189; \ + float32x4_t __s2_189 = __p2_189; \ + float32x2_t __ret_189; \ +float32x4_t __reint_189 = __s2_189; \ +uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ + __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ + __ret_189; \ +}) +#else +#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ + float32x2_t __s0_190 = __p0_190; \ + float32x2_t __s1_190 = __p1_190; \ + float32x4_t __s2_190 = __p2_190; \ + float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ + float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ + float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ + float32x2_t __ret_190; \ +float32x4_t __reint_190 = __rev2_190; \ +uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ + __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ + __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ + __ret_190; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ + float32x4_t __s0_191 = __p0_191; \ + float32x4_t __s1_191 = __p1_191; \ + float32x4_t __s2_191 = __p2_191; \ + float32x4_t __ret_191; \ +float32x4_t __reint_191 = __s2_191; \ +uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ + __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ + __ret_191; \ +}) +#else +#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ + float32x4_t __s0_192 = __p0_192; \ + float32x4_t __s1_192 = __p1_192; \ + float32x4_t __s2_192 = __p2_192; \ + float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ + float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ + float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ + float32x4_t __ret_192; \ +float32x4_t __reint_192 = __rev2_192; \ +uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ + __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ + __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ + __ret_192; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ + float32x2_t __s0_193 = __p0_193; \ + float32x2_t __s1_193 = __p1_193; \ + float32x2_t __s2_193 = __p2_193; \ + float32x2_t __ret_193; \ +float32x2_t __reint_193 = __s2_193; \ +uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ + __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ + __ret_193; \ +}) +#else +#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ + float32x2_t __s0_194 = __p0_194; \ + float32x2_t __s1_194 = __p1_194; \ + float32x2_t __s2_194 = __p2_194; \ + float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ + float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ + float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ + float32x2_t __ret_194; \ +float32x2_t __reint_194 = __rev2_194; \ +uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ + __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ + __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ + __ret_194; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ + float32x4_t __s0_195 = __p0_195; \ + float32x4_t __s1_195 = __p1_195; \ + float32x2_t __s2_195 = __p2_195; \ + float32x4_t __ret_195; \ +float32x2_t __reint_195 = __s2_195; \ +uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ + __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ + __ret_195; \ +}) +#else +#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ + float32x4_t __s0_196 = __p0_196; \ + float32x4_t __s1_196 = __p1_196; \ + float32x2_t __s2_196 = __p2_196; \ + float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ + float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ + float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ + float32x4_t __ret_196; \ +float32x2_t __reint_196 = __rev2_196; \ +uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ + __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ + __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ + __ret_196; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ + float32x2_t __s0_197 = __p0_197; \ + float32x2_t __s1_197 = __p1_197; \ + float32x4_t __s2_197 = __p2_197; \ + float32x2_t __ret_197; \ +float32x4_t __reint_197 = __s2_197; \ +uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ + __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ + __ret_197; \ +}) +#else +#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ + float32x2_t __s0_198 = __p0_198; \ + float32x2_t __s1_198 = __p1_198; \ + float32x4_t __s2_198 = __p2_198; \ + float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ + float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ + float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ + float32x2_t __ret_198; \ +float32x4_t __reint_198 = __rev2_198; \ +uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ + __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ + __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ + __ret_198; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ + float32x4_t __s0_199 = __p0_199; \ + float32x4_t __s1_199 = __p1_199; \ + float32x4_t __s2_199 = __p2_199; \ + float32x4_t __ret_199; \ +float32x4_t __reint_199 = __s2_199; \ +uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ + __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ + __ret_199; \ +}) +#else +#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ + float32x4_t __s0_200 = __p0_200; \ + float32x4_t __s1_200 = __p1_200; \ + float32x4_t __s2_200 = __p2_200; \ + float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ + float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ + float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ + float32x4_t __ret_200; \ +float32x4_t __reint_200 = __rev2_200; \ +uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ + __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ + __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ + __ret_200; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ + float16x4_t __s0_201 = __p0_201; \ + float16x4_t __s1_201 = __p1_201; \ + float16x4_t __s2_201 = __p2_201; \ + float16x4_t __ret_201; \ +float16x4_t __reint_201 = __s2_201; \ +uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \ + __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \ + __ret_201; \ +}) +#else +#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ + float16x4_t __s0_202 = __p0_202; \ + float16x4_t __s1_202 = __p1_202; \ + float16x4_t __s2_202 = __p2_202; \ + float16x4_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \ + float16x4_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \ + float16x4_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \ + float16x4_t __ret_202; \ +float16x4_t __reint_202 = __rev2_202; \ +uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \ + __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \ + __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \ + __ret_202; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ + float16x8_t __s0_203 = __p0_203; \ + float16x8_t __s1_203 = __p1_203; \ + float16x4_t __s2_203 = __p2_203; \ + float16x8_t __ret_203; \ +float16x4_t __reint_203 = __s2_203; \ +uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \ + __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \ + __ret_203; \ +}) +#else +#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ + float16x8_t __s0_204 = __p0_204; \ + float16x8_t __s1_204 = __p1_204; \ + float16x4_t __s2_204 = __p2_204; \ + float16x8_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \ + float16x8_t __ret_204; \ +float16x4_t __reint_204 = __rev2_204; \ +uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \ + __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \ + __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_204; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ + float16x4_t __s0_205 = __p0_205; \ + float16x4_t __s1_205 = __p1_205; \ + float16x8_t __s2_205 = __p2_205; \ + float16x4_t __ret_205; \ +float16x8_t __reint_205 = __s2_205; \ +uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \ + __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \ + __ret_205; \ +}) +#else +#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ + float16x4_t __s0_206 = __p0_206; \ + float16x4_t __s1_206 = __p1_206; \ + float16x8_t __s2_206 = __p2_206; \ + float16x4_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \ + float16x4_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \ + float16x8_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_206; \ +float16x8_t __reint_206 = __rev2_206; \ +uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \ + __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \ + __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \ + __ret_206; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ + float16x8_t __s0_207 = __p0_207; \ + float16x8_t __s1_207 = __p1_207; \ + float16x8_t __s2_207 = __p2_207; \ + float16x8_t __ret_207; \ +float16x8_t __reint_207 = __s2_207; \ +uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \ + __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \ + __ret_207; \ +}) +#else +#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ + float16x8_t __s0_208 = __p0_208; \ + float16x8_t __s1_208 = __p1_208; \ + float16x8_t __s2_208 = __p2_208; \ + float16x8_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_208; \ +float16x8_t __reint_208 = __rev2_208; \ +uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \ + __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \ + __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_208; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ + float16x4_t __s0_209 = __p0_209; \ + float16x4_t __s1_209 = __p1_209; \ + float16x4_t __s2_209 = __p2_209; \ + float16x4_t __ret_209; \ +float16x4_t __reint_209 = __s2_209; \ +uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ + __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ + __ret_209; \ +}) +#else +#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ + float16x4_t __s0_210 = __p0_210; \ + float16x4_t __s1_210 = __p1_210; \ + float16x4_t __s2_210 = __p2_210; \ + float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ + float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ + float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ + float16x4_t __ret_210; \ +float16x4_t __reint_210 = __rev2_210; \ +uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ + __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ + __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ + __ret_210; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ + float16x8_t __s0_211 = __p0_211; \ + float16x8_t __s1_211 = __p1_211; \ + float16x4_t __s2_211 = __p2_211; \ + float16x8_t __ret_211; \ +float16x4_t __reint_211 = __s2_211; \ +uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ + __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ + __ret_211; \ +}) +#else +#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ + float16x8_t __s0_212 = __p0_212; \ + float16x8_t __s1_212 = __p1_212; \ + float16x4_t __s2_212 = __p2_212; \ + float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ + float16x8_t __ret_212; \ +float16x4_t __reint_212 = __rev2_212; \ +uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ + __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ + __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_212; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ + float16x4_t __s0_213 = __p0_213; \ + float16x4_t __s1_213 = __p1_213; \ + float16x8_t __s2_213 = __p2_213; \ + float16x4_t __ret_213; \ +float16x8_t __reint_213 = __s2_213; \ +uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ + __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ + __ret_213; \ +}) +#else +#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ + float16x4_t __s0_214 = __p0_214; \ + float16x4_t __s1_214 = __p1_214; \ + float16x8_t __s2_214 = __p2_214; \ + float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ + float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ + float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_214; \ +float16x8_t __reint_214 = __rev2_214; \ +uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ + __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ + __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ + __ret_214; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ + float16x8_t __s0_215 = __p0_215; \ + float16x8_t __s1_215 = __p1_215; \ + float16x8_t __s2_215 = __p2_215; \ + float16x8_t __ret_215; \ +float16x8_t __reint_215 = __s2_215; \ +uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ + __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ + __ret_215; \ +}) +#else +#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ + float16x8_t __s0_216 = __p0_216; \ + float16x8_t __s1_216 = __p1_216; \ + float16x8_t __s2_216 = __p2_216; \ + float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_216; \ +float16x8_t __reint_216 = __rev2_216; \ +uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ + __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ + __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_216; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ + float16x4_t __s0_217 = __p0_217; \ + float16x4_t __s1_217 = __p1_217; \ + float16x4_t __s2_217 = __p2_217; \ + float16x4_t __ret_217; \ +float16x4_t __reint_217 = __s2_217; \ +uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ + __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ + __ret_217; \ +}) +#else +#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ + float16x4_t __s0_218 = __p0_218; \ + float16x4_t __s1_218 = __p1_218; \ + float16x4_t __s2_218 = __p2_218; \ + float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ + float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ + float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ + float16x4_t __ret_218; \ +float16x4_t __reint_218 = __rev2_218; \ +uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ + __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ + __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ + __ret_218; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ + float16x8_t __s0_219 = __p0_219; \ + float16x8_t __s1_219 = __p1_219; \ + float16x4_t __s2_219 = __p2_219; \ + float16x8_t __ret_219; \ +float16x4_t __reint_219 = __s2_219; \ +uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ + __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ + __ret_219; \ +}) +#else +#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ + float16x8_t __s0_220 = __p0_220; \ + float16x8_t __s1_220 = __p1_220; \ + float16x4_t __s2_220 = __p2_220; \ + float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ + float16x8_t __ret_220; \ +float16x4_t __reint_220 = __rev2_220; \ +uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ + __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ + __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_220; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ + float16x4_t __s0_221 = __p0_221; \ + float16x4_t __s1_221 = __p1_221; \ + float16x8_t __s2_221 = __p2_221; \ + float16x4_t __ret_221; \ +float16x8_t __reint_221 = __s2_221; \ +uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ + __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ + __ret_221; \ +}) +#else +#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ + float16x4_t __s0_222 = __p0_222; \ + float16x4_t __s1_222 = __p1_222; \ + float16x8_t __s2_222 = __p2_222; \ + float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ + float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ + float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_222; \ +float16x8_t __reint_222 = __rev2_222; \ +uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ + __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ + __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ + __ret_222; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ + float16x8_t __s0_223 = __p0_223; \ + float16x8_t __s1_223 = __p1_223; \ + float16x8_t __s2_223 = __p2_223; \ + float16x8_t __ret_223; \ +float16x8_t __reint_223 = __s2_223; \ +uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ + __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ + __ret_223; \ +}) +#else +#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ + float16x8_t __s0_224 = __p0_224; \ + float16x8_t __s1_224 = __p1_224; \ + float16x8_t __s2_224 = __p2_224; \ + float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_224; \ +float16x8_t __reint_224 = __rev2_224; \ +uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ + __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ + __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_224; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ + float16x4_t __s0_225 = __p0_225; \ + float16x4_t __s1_225 = __p1_225; \ + float16x4_t __s2_225 = __p2_225; \ + float16x4_t __ret_225; \ +float16x4_t __reint_225 = __s2_225; \ +uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ + __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ + __ret_225; \ +}) +#else +#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ + float16x4_t __s0_226 = __p0_226; \ + float16x4_t __s1_226 = __p1_226; \ + float16x4_t __s2_226 = __p2_226; \ + float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ + float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ + float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ + float16x4_t __ret_226; \ +float16x4_t __reint_226 = __rev2_226; \ +uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ + __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ + __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ + __ret_226; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ + float16x8_t __s0_227 = __p0_227; \ + float16x8_t __s1_227 = __p1_227; \ + float16x4_t __s2_227 = __p2_227; \ + float16x8_t __ret_227; \ +float16x4_t __reint_227 = __s2_227; \ +uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ + __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ + __ret_227; \ +}) +#else +#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ + float16x8_t __s0_228 = __p0_228; \ + float16x8_t __s1_228 = __p1_228; \ + float16x4_t __s2_228 = __p2_228; \ + float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ + float16x8_t __ret_228; \ +float16x4_t __reint_228 = __rev2_228; \ +uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ + __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ + __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_228; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ + float16x4_t __s0_229 = __p0_229; \ + float16x4_t __s1_229 = __p1_229; \ + float16x8_t __s2_229 = __p2_229; \ + float16x4_t __ret_229; \ +float16x8_t __reint_229 = __s2_229; \ +uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ + __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ + __ret_229; \ +}) +#else +#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ + float16x4_t __s0_230 = __p0_230; \ + float16x4_t __s1_230 = __p1_230; \ + float16x8_t __s2_230 = __p2_230; \ + float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ + float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ + float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_230; \ +float16x8_t __reint_230 = __rev2_230; \ +uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ + __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ + __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ + __ret_230; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ + float16x8_t __s0_231 = __p0_231; \ + float16x8_t __s1_231 = __p1_231; \ + float16x8_t __s2_231 = __p2_231; \ + float16x8_t __ret_231; \ +float16x8_t __reint_231 = __s2_231; \ +uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ + __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ + __ret_231; \ +}) +#else +#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ + float16x8_t __s0_232 = __p0_232; \ + float16x8_t __s1_232 = __p1_232; \ + float16x8_t __s2_232 = __p2_232; \ + float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_232; \ +float16x8_t __reint_232 = __rev2_232; \ +uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ + __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ + __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_232; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ + float64x1_t __s0_233 = __p0_233; \ + float64x1_t __s1_233 = __p1_233; \ + float64x1_t __s2_233 = __p2_233; \ + float64x1_t __ret_233; \ +float64x1_t __reint_233 = __s2_233; \ +uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \ + __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \ + __ret_233; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ + float64x2_t __s0_234 = __p0_234; \ + float64x2_t __s1_234 = __p1_234; \ + float64x1_t __s2_234 = __p2_234; \ + float64x2_t __ret_234; \ +float64x1_t __reint_234 = __s2_234; \ +uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \ + __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \ + __ret_234; \ +}) +#else +#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ + float64x2_t __s0_235 = __p0_235; \ + float64x2_t __s1_235 = __p1_235; \ + float64x1_t __s2_235 = __p2_235; \ + float64x2_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \ + float64x2_t __rev1_235; __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \ + float64x2_t __ret_235; \ +float64x1_t __reint_235 = __s2_235; \ +uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \ + __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \ + __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \ + __ret_235; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ + float64x1_t __s0_236 = __p0_236; \ + float64x1_t __s1_236 = __p1_236; \ + float64x2_t __s2_236 = __p2_236; \ + float64x1_t __ret_236; \ +float64x2_t __reint_236 = __s2_236; \ +uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \ + __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \ + __ret_236; \ +}) +#else +#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ + float64x1_t __s0_237 = __p0_237; \ + float64x1_t __s1_237 = __p1_237; \ + float64x2_t __s2_237 = __p2_237; \ + float64x2_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \ + float64x1_t __ret_237; \ +float64x2_t __reint_237 = __rev2_237; \ +uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \ + __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \ + __ret_237; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ + float64x2_t __s0_238 = __p0_238; \ + float64x2_t __s1_238 = __p1_238; \ + float64x2_t __s2_238 = __p2_238; \ + float64x2_t __ret_238; \ +float64x2_t __reint_238 = __s2_238; \ +uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \ + __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \ + __ret_238; \ +}) +#else +#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ + float64x2_t __s0_239 = __p0_239; \ + float64x2_t __s1_239 = __p1_239; \ + float64x2_t __s2_239 = __p2_239; \ + float64x2_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \ + float64x2_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \ + float64x2_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \ + float64x2_t __ret_239; \ +float64x2_t __reint_239 = __rev2_239; \ +uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \ + __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \ + __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \ + __ret_239; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ + float64x1_t __s0_240 = __p0_240; \ + float64x1_t __s1_240 = __p1_240; \ + float64x1_t __s2_240 = __p2_240; \ + float64x1_t __ret_240; \ +float64x1_t __reint_240 = __s2_240; \ +uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \ + __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \ + __ret_240; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \ + float64x2_t __s0_241 = __p0_241; \ + float64x2_t __s1_241 = __p1_241; \ + float64x1_t __s2_241 = __p2_241; \ + float64x2_t __ret_241; \ +float64x1_t __reint_241 = __s2_241; \ +uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \ + __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \ + __ret_241; \ +}) +#else +#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \ + float64x2_t __s0_242 = __p0_242; \ + float64x2_t __s1_242 = __p1_242; \ + float64x1_t __s2_242 = __p2_242; \ + float64x2_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \ + float64x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ + float64x2_t __ret_242; \ +float64x1_t __reint_242 = __s2_242; \ +uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \ + __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \ + __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \ + __ret_242; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \ + float64x1_t __s0_243 = __p0_243; \ + float64x1_t __s1_243 = __p1_243; \ + float64x2_t __s2_243 = __p2_243; \ + float64x1_t __ret_243; \ +float64x2_t __reint_243 = __s2_243; \ +uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \ + __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \ + __ret_243; \ +}) +#else +#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \ + float64x1_t __s0_244 = __p0_244; \ + float64x1_t __s1_244 = __p1_244; \ + float64x2_t __s2_244 = __p2_244; \ + float64x2_t __rev2_244; __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \ + float64x1_t __ret_244; \ +float64x2_t __reint_244 = __rev2_244; \ +uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \ + __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \ + __ret_244; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \ + float64x2_t __s0_245 = __p0_245; \ + float64x2_t __s1_245 = __p1_245; \ + float64x2_t __s2_245 = __p2_245; \ + float64x2_t __ret_245; \ +float64x2_t __reint_245 = __s2_245; \ +uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \ + __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \ + __ret_245; \ +}) +#else +#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \ + float64x2_t __s0_246 = __p0_246; \ + float64x2_t __s1_246 = __p1_246; \ + float64x2_t __s2_246 = __p2_246; \ + float64x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ + float64x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ + float64x2_t __rev2_246; __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \ + float64x2_t __ret_246; \ +float64x2_t __reint_246 = __rev2_246; \ +uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \ + __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \ + __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ + __ret_246; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \ + float64x1_t __s0_247 = __p0_247; \ + float64x1_t __s1_247 = __p1_247; \ + float64x1_t __s2_247 = __p2_247; \ + float64x1_t __ret_247; \ +float64x1_t __reint_247 = __s2_247; \ +uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \ + __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \ + __ret_247; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \ + float64x2_t __s0_248 = __p0_248; \ + float64x2_t __s1_248 = __p1_248; \ + float64x1_t __s2_248 = __p2_248; \ + float64x2_t __ret_248; \ +float64x1_t __reint_248 = __s2_248; \ +uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \ + __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \ + __ret_248; \ +}) +#else +#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \ + float64x2_t __s0_249 = __p0_249; \ + float64x2_t __s1_249 = __p1_249; \ + float64x1_t __s2_249 = __p2_249; \ + float64x2_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \ + float64x2_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \ + float64x2_t __ret_249; \ +float64x1_t __reint_249 = __s2_249; \ +uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \ + __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \ + __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \ + __ret_249; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ + float64x1_t __s0_250 = __p0_250; \ + float64x1_t __s1_250 = __p1_250; \ + float64x2_t __s2_250 = __p2_250; \ + float64x1_t __ret_250; \ +float64x2_t __reint_250 = __s2_250; \ +uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \ + __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \ + __ret_250; \ +}) +#else +#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ + float64x1_t __s0_251 = __p0_251; \ + float64x1_t __s1_251 = __p1_251; \ + float64x2_t __s2_251 = __p2_251; \ + float64x2_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \ + float64x1_t __ret_251; \ +float64x2_t __reint_251 = __rev2_251; \ +uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \ + __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \ + __ret_251; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ + float64x2_t __s0_252 = __p0_252; \ + float64x2_t __s1_252 = __p1_252; \ + float64x2_t __s2_252 = __p2_252; \ + float64x2_t __ret_252; \ +float64x2_t __reint_252 = __s2_252; \ +uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \ + __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \ + __ret_252; \ +}) +#else +#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ + float64x2_t __s0_253 = __p0_253; \ + float64x2_t __s1_253 = __p1_253; \ + float64x2_t __s2_253 = __p2_253; \ + float64x2_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \ + float64x2_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \ + float64x2_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \ + float64x2_t __ret_253; \ +float64x2_t __reint_253 = __rev2_253; \ +uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \ + __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \ + __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \ + __ret_253; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ + float64x1_t __s0_254 = __p0_254; \ + float64x1_t __s1_254 = __p1_254; \ + float64x1_t __s2_254 = __p2_254; \ + float64x1_t __ret_254; \ +float64x1_t __reint_254 = __s2_254; \ +uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \ + __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \ + __ret_254; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ + float64x2_t __s0_255 = __p0_255; \ + float64x2_t __s1_255 = __p1_255; \ + float64x1_t __s2_255 = __p2_255; \ + float64x2_t __ret_255; \ +float64x1_t __reint_255 = __s2_255; \ +uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \ + __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \ + __ret_255; \ +}) +#else +#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ + float64x2_t __s0_256 = __p0_256; \ + float64x2_t __s1_256 = __p1_256; \ + float64x1_t __s2_256 = __p2_256; \ + float64x2_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \ + float64x2_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \ + float64x2_t __ret_256; \ +float64x1_t __reint_256 = __s2_256; \ +uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \ + __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \ + __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \ + __ret_256; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + float64x1_t __s0_257 = __p0_257; \ + float64x1_t __s1_257 = __p1_257; \ + float64x2_t __s2_257 = __p2_257; \ + float64x1_t __ret_257; \ +float64x2_t __reint_257 = __s2_257; \ +uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \ + __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \ + __ret_257; \ +}) +#else +#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ + float64x1_t __s0_258 = __p0_258; \ + float64x1_t __s1_258 = __p1_258; \ + float64x2_t __s2_258 = __p2_258; \ + float64x2_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \ + float64x1_t __ret_258; \ +float64x2_t __reint_258 = __rev2_258; \ +uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \ + __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \ + __ret_258; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ + float64x2_t __s0_259 = __p0_259; \ + float64x2_t __s1_259 = __p1_259; \ + float64x2_t __s2_259 = __p2_259; \ + float64x2_t __ret_259; \ +float64x2_t __reint_259 = __s2_259; \ +uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \ + __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \ + __ret_259; \ +}) +#else +#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ + float64x2_t __s0_260 = __p0_260; \ + float64x2_t __s1_260 = __p1_260; \ + float64x2_t __s2_260 = __p2_260; \ + float64x2_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \ + float64x2_t __rev1_260; __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \ + float64x2_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \ + float64x2_t __ret_260; \ +float64x2_t __reint_260 = __rev2_260; \ +uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \ + __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \ + __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \ + __ret_260; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_DOTPROD) +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ + uint32x4_t __s0_261 = __p0_261; \ + uint8x16_t __s1_261 = __p1_261; \ + uint8x8_t __s2_261 = __p2_261; \ + uint32x4_t __ret_261; \ +uint8x8_t __reint_261 = __s2_261; \ +uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \ + __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \ + __ret_261; \ +}) +#else +#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ + uint32x4_t __s0_262 = __p0_262; \ + uint8x16_t __s1_262 = __p1_262; \ + uint8x8_t __s2_262 = __p2_262; \ + uint32x4_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \ + uint8x16_t __rev1_262; __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_262; \ +uint8x8_t __reint_262 = __rev2_262; \ +uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \ + __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \ + __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \ + __ret_262; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ + int32x4_t __s0_263 = __p0_263; \ + int8x16_t __s1_263 = __p1_263; \ + int8x8_t __s2_263 = __p2_263; \ + int32x4_t __ret_263; \ +int8x8_t __reint_263 = __s2_263; \ +int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \ + __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \ + __ret_263; \ +}) +#else +#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ + int32x4_t __s0_264 = __p0_264; \ + int8x16_t __s1_264 = __p1_264; \ + int8x8_t __s2_264 = __p2_264; \ + int32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ + int8x16_t __rev1_264; __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_264; \ +int8x8_t __reint_264 = __rev2_264; \ +int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \ + __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \ + __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ + __ret_264; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ + uint32x2_t __s0_265 = __p0_265; \ + uint8x8_t __s1_265 = __p1_265; \ + uint8x8_t __s2_265 = __p2_265; \ + uint32x2_t __ret_265; \ +uint8x8_t __reint_265 = __s2_265; \ +uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \ + __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \ + __ret_265; \ +}) +#else +#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ + uint32x2_t __s0_266 = __p0_266; \ + uint8x8_t __s1_266 = __p1_266; \ + uint8x8_t __s2_266 = __p2_266; \ + uint32x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ + uint8x8_t __rev1_266; __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_266; __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x2_t __ret_266; \ +uint8x8_t __reint_266 = __rev2_266; \ +uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \ + __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \ + __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ + __ret_266; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ + int32x2_t __s0_267 = __p0_267; \ + int8x8_t __s1_267 = __p1_267; \ + int8x8_t __s2_267 = __p2_267; \ + int32x2_t __ret_267; \ +int8x8_t __reint_267 = __s2_267; \ +int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \ + __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \ + __ret_267; \ +}) +#else +#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ + int32x2_t __s0_268 = __p0_268; \ + int8x8_t __s1_268 = __p1_268; \ + int8x8_t __s2_268 = __p2_268; \ + int32x2_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \ + int8x8_t __rev1_268; __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x2_t __ret_268; \ +int8x8_t __reint_268 = __rev2_268; \ +int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \ + __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \ + __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \ + __ret_268; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ + uint32x4_t __s0_269 = __p0_269; \ + uint8x16_t __s1_269 = __p1_269; \ + uint8x16_t __s2_269 = __p2_269; \ + uint32x4_t __ret_269; \ +uint8x16_t __reint_269 = __s2_269; \ +uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \ + __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \ + __ret_269; \ +}) +#else +#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + uint32x4_t __s0_270 = __p0_270; \ + uint8x16_t __s1_270 = __p1_270; \ + uint8x16_t __s2_270 = __p2_270; \ + uint32x4_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \ + uint8x16_t __rev1_270; __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_270; \ +uint8x16_t __reint_270 = __rev2_270; \ +uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \ + __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \ + __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \ + __ret_270; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + int32x4_t __s0_271 = __p0_271; \ + int8x16_t __s1_271 = __p1_271; \ + int8x16_t __s2_271 = __p2_271; \ + int32x4_t __ret_271; \ +int8x16_t __reint_271 = __s2_271; \ +int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \ + __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \ + __ret_271; \ +}) +#else +#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + int32x4_t __s0_272 = __p0_272; \ + int8x16_t __s1_272 = __p1_272; \ + int8x16_t __s2_272 = __p2_272; \ + int32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ + int8x16_t __rev1_272; __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_272; \ +int8x16_t __reint_272 = __rev2_272; \ +int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \ + __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \ + __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ + __ret_272; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + uint32x2_t __s0_273 = __p0_273; \ + uint8x8_t __s1_273 = __p1_273; \ + uint8x16_t __s2_273 = __p2_273; \ + uint32x2_t __ret_273; \ +uint8x16_t __reint_273 = __s2_273; \ +uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \ + __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \ + __ret_273; \ +}) +#else +#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + uint32x2_t __s0_274 = __p0_274; \ + uint8x8_t __s1_274 = __p1_274; \ + uint8x16_t __s2_274 = __p2_274; \ + uint32x2_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \ + uint8x8_t __rev1_274; __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x2_t __ret_274; \ +uint8x16_t __reint_274 = __rev2_274; \ +uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \ + __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \ + __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \ + __ret_274; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + int32x2_t __s0_275 = __p0_275; \ + int8x8_t __s1_275 = __p1_275; \ + int8x16_t __s2_275 = __p2_275; \ + int32x2_t __ret_275; \ +int8x16_t __reint_275 = __s2_275; \ +int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \ + __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \ + __ret_275; \ +}) +#else +#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + int32x2_t __s0_276 = __p0_276; \ + int8x8_t __s1_276 = __p1_276; \ + int8x16_t __s2_276 = __p2_276; \ + int32x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ + int8x8_t __rev1_276; __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_276; __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x2_t __ret_276; \ +int8x16_t __reint_276 = __rev2_276; \ +int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \ + __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \ + __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ + __ret_276; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_FMA) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgezq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgez_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgtz_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclezq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclez_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcltzq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcltz_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = vfmaq_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = vfma_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \ + float16x8_t __s0_277 = __p0_277; \ + float16x4_t __s1_277 = __p1_277; \ + float16x8_t __ret_277; \ + __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \ + __ret_277; \ +}) +#else +#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \ + float16x8_t __s0_278 = __p0_278; \ + float16x4_t __s1_278 = __p1_278; \ + float16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_278; __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \ + float16x8_t __ret_278; \ + __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \ + __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_278; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \ + float16x4_t __s0_279 = __p0_279; \ + float16x4_t __s1_279 = __p1_279; \ + float16x4_t __ret_279; \ + __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \ + __ret_279; \ +}) +#else +#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \ + float16x4_t __s0_280 = __p0_280; \ + float16x4_t __s1_280 = __p1_280; \ + float16x4_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \ + float16x4_t __rev1_280; __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \ + float16x4_t __ret_280; \ + __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \ + __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \ + __ret_280; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __ret; \ + __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __ret; \ + __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __ret; \ + __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __ret; \ + __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + float16_t __s0_281 = __p0_281; \ + float16_t __s1_281 = __p1_281; \ + float16x4_t __s2_281 = __p2_281; \ + float16_t __ret_281; \ + __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \ + __ret_281; \ +}) +#else +#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ + float16_t __s0_282 = __p0_282; \ + float16_t __s1_282 = __p1_282; \ + float16x4_t __s2_282 = __p2_282; \ + float16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ + float16_t __ret_282; \ + __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \ + __ret_282; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ + float16x8_t __s0_283 = __p0_283; \ + float16x8_t __s1_283 = __p1_283; \ + float16x4_t __s2_283 = __p2_283; \ + float16x8_t __ret_283; \ + __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \ + __ret_283; \ +}) +#else +#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ + float16x8_t __s0_284 = __p0_284; \ + float16x8_t __s1_284 = __p1_284; \ + float16x4_t __s2_284 = __p2_284; \ + float16x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_284; __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \ + float16x8_t __ret_284; \ + __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \ + __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_284; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ + float16x4_t __s0_285 = __p0_285; \ + float16x4_t __s1_285 = __p1_285; \ + float16x4_t __s2_285 = __p2_285; \ + float16x4_t __ret_285; \ + __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \ + __ret_285; \ +}) +#else +#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ + float16x4_t __s0_286 = __p0_286; \ + float16x4_t __s1_286 = __p1_286; \ + float16x4_t __s2_286 = __p2_286; \ + float16x4_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \ + float16x4_t __rev1_286; __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \ + float16x4_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \ + float16x4_t __ret_286; \ + __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \ + __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \ + __ret_286; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ + float16_t __s0_287 = __p0_287; \ + float16_t __s1_287 = __p1_287; \ + float16x8_t __s2_287 = __p2_287; \ + float16_t __ret_287; \ + __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \ + __ret_287; \ +}) +#else +#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ + float16_t __s0_288 = __p0_288; \ + float16_t __s1_288 = __p1_288; \ + float16x8_t __s2_288 = __p2_288; \ + float16x8_t __rev2_288; __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret_288; \ + __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \ + __ret_288; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ + float16x8_t __s0_289 = __p0_289; \ + float16x8_t __s1_289 = __p1_289; \ + float16x8_t __s2_289 = __p2_289; \ + float16x8_t __ret_289; \ + __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \ + __ret_289; \ +}) +#else +#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ + float16x8_t __s0_290 = __p0_290; \ + float16x8_t __s1_290 = __p1_290; \ + float16x8_t __s2_290 = __p2_290; \ + float16x8_t __rev0_290; __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_290; __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_290; __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_290; \ + __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \ + __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_290; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ + float16x4_t __s0_291 = __p0_291; \ + float16x4_t __s1_291 = __p1_291; \ + float16x8_t __s2_291 = __p2_291; \ + float16x4_t __ret_291; \ + __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \ + __ret_291; \ +}) +#else +#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ + float16x4_t __s0_292 = __p0_292; \ + float16x4_t __s1_292 = __p1_292; \ + float16x8_t __s2_292 = __p2_292; \ + float16x4_t __rev0_292; __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \ + float16x4_t __rev1_292; __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \ + float16x8_t __rev2_292; __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_292; \ + __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \ + __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \ + __ret_292; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __ret; \ + __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __ret; \ + __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminnmv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminvq_f16(__p0) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminv_f16(__p0) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \ + float16x8_t __s0_293 = __p0_293; \ + float16x8_t __s1_293 = __p1_293; \ + float16x8_t __ret_293; \ + __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \ + __ret_293; \ +}) +#else +#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \ + float16x8_t __s0_294 = __p0_294; \ + float16x8_t __s1_294 = __p1_294; \ + float16x8_t __rev0_294; __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_294; __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_294; \ + __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \ + __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_294; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \ + float16x4_t __s0_295 = __p0_295; \ + float16x8_t __s1_295 = __p1_295; \ + float16x4_t __ret_295; \ + __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \ + __ret_295; \ +}) +#else +#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \ + float16x4_t __s0_296 = __p0_296; \ + float16x8_t __s1_296 = __p1_296; \ + float16x4_t __rev0_296; __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \ + float16x8_t __rev1_296; __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_296; \ + __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \ + __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \ + __ret_296; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \ + float16x8_t __s0_297 = __p0_297; \ + float16x4_t __s1_297 = __p1_297; \ + float16x8_t __ret_297; \ + __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \ + __ret_297; \ +}) +#else +#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \ + float16x8_t __s0_298 = __p0_298; \ + float16x4_t __s1_298 = __p1_298; \ + float16x8_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_298; __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \ + float16x8_t __ret_298; \ + __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \ + __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_298; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \ + float16x4_t __s0_299 = __p0_299; \ + float16x4_t __s1_299 = __p1_299; \ + float16x4_t __ret_299; \ + __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \ + __ret_299; \ +}) +#else +#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \ + float16x4_t __s0_300 = __p0_300; \ + float16x4_t __s1_300 = __p1_300; \ + float16x4_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \ + float16x4_t __rev1_300; __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \ + float16x4_t __ret_300; \ + __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \ + __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \ + __ret_300; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \ + float16x8_t __s0_301 = __p0_301; \ + float16x8_t __s1_301 = __p1_301; \ + float16x8_t __ret_301; \ + __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \ + __ret_301; \ +}) +#else +#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \ + float16x8_t __s0_302 = __p0_302; \ + float16x8_t __s1_302 = __p1_302; \ + float16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_302; __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_302; \ + __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \ + __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_302; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \ + float16x4_t __s0_303 = __p0_303; \ + float16x8_t __s1_303 = __p1_303; \ + float16x4_t __ret_303; \ + __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \ + __ret_303; \ +}) +#else +#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \ + float16x4_t __s0_304 = __p0_304; \ + float16x8_t __s1_304 = __p1_304; \ + float16x4_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \ + float16x8_t __rev1_304; __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_304; \ + __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \ + __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \ + __ret_304; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __ret; \ + __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __ret; \ + __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_MATMUL_INT8) +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ + int32x4_t __s0_305 = __p0_305; \ + uint8x16_t __s1_305 = __p1_305; \ + int8x8_t __s2_305 = __p2_305; \ + int32x4_t __ret_305; \ +int8x8_t __reint_305 = __s2_305; \ + __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \ + __ret_305; \ +}) +#else +#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ + int32x4_t __s0_306 = __p0_306; \ + uint8x16_t __s1_306 = __p1_306; \ + int8x8_t __s2_306 = __p2_306; \ + int32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ + uint8x16_t __rev1_306; __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_306; \ +int8x8_t __reint_306 = __rev2_306; \ + __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \ + __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ + __ret_306; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ + int32x2_t __s0_307 = __p0_307; \ + uint8x8_t __s1_307 = __p1_307; \ + int8x8_t __s2_307 = __p2_307; \ + int32x2_t __ret_307; \ +int8x8_t __reint_307 = __s2_307; \ + __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \ + __ret_307; \ +}) +#else +#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ + int32x2_t __s0_308 = __p0_308; \ + uint8x8_t __s1_308 = __p1_308; \ + int8x8_t __s2_308 = __p2_308; \ + int32x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ + uint8x8_t __rev1_308; __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x2_t __ret_308; \ +int8x8_t __reint_308 = __rev2_308; \ + __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \ + __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ + __ret_308; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_QRDMX) +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ + int32x4_t __s0_309 = __p0_309; \ + int32x4_t __s1_309 = __p1_309; \ + int32x2_t __s2_309 = __p2_309; \ + int32x4_t __ret_309; \ + __ret_309 = vqaddq_s32(__s0_309, vqrdmulhq_s32(__s1_309, splatq_lane_s32(__s2_309, __p3_309))); \ + __ret_309; \ +}) +#else +#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ + int32x4_t __s0_310 = __p0_310; \ + int32x4_t __s1_310 = __p1_310; \ + int32x2_t __s2_310 = __p2_310; \ + int32x4_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \ + int32x4_t __rev1_310; __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \ + int32x2_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \ + int32x4_t __ret_310; \ + __ret_310 = __noswap_vqaddq_s32(__rev0_310, __noswap_vqrdmulhq_s32(__rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310))); \ + __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \ + __ret_310; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ + int16x8_t __s0_311 = __p0_311; \ + int16x8_t __s1_311 = __p1_311; \ + int16x4_t __s2_311 = __p2_311; \ + int16x8_t __ret_311; \ + __ret_311 = vqaddq_s16(__s0_311, vqrdmulhq_s16(__s1_311, splatq_lane_s16(__s2_311, __p3_311))); \ + __ret_311; \ +}) +#else +#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ + int16x8_t __s0_312 = __p0_312; \ + int16x8_t __s1_312 = __p1_312; \ + int16x4_t __s2_312 = __p2_312; \ + int16x8_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_312; __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \ + int16x8_t __ret_312; \ + __ret_312 = __noswap_vqaddq_s16(__rev0_312, __noswap_vqrdmulhq_s16(__rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312))); \ + __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ + int32x2_t __s0_313 = __p0_313; \ + int32x2_t __s1_313 = __p1_313; \ + int32x2_t __s2_313 = __p2_313; \ + int32x2_t __ret_313; \ + __ret_313 = vqadd_s32(__s0_313, vqrdmulh_s32(__s1_313, splat_lane_s32(__s2_313, __p3_313))); \ + __ret_313; \ +}) +#else +#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ + int32x2_t __s0_314 = __p0_314; \ + int32x2_t __s1_314 = __p1_314; \ + int32x2_t __s2_314 = __p2_314; \ + int32x2_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \ + int32x2_t __rev1_314; __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \ + int32x2_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \ + int32x2_t __ret_314; \ + __ret_314 = __noswap_vqadd_s32(__rev0_314, __noswap_vqrdmulh_s32(__rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314))); \ + __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \ + __ret_314; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ + int16x4_t __s0_315 = __p0_315; \ + int16x4_t __s1_315 = __p1_315; \ + int16x4_t __s2_315 = __p2_315; \ + int16x4_t __ret_315; \ + __ret_315 = vqadd_s16(__s0_315, vqrdmulh_s16(__s1_315, splat_lane_s16(__s2_315, __p3_315))); \ + __ret_315; \ +}) +#else +#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ + int16x4_t __s0_316 = __p0_316; \ + int16x4_t __s1_316 = __p1_316; \ + int16x4_t __s2_316 = __p2_316; \ + int16x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ + int16x4_t __rev1_316; __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \ + int16x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ + int16x4_t __ret_316; \ + __ret_316 = __noswap_vqadd_s16(__rev0_316, __noswap_vqrdmulh_s16(__rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316))); \ + __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ + __ret_316; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ + int32x4_t __s0_317 = __p0_317; \ + int32x4_t __s1_317 = __p1_317; \ + int32x2_t __s2_317 = __p2_317; \ + int32x4_t __ret_317; \ + __ret_317 = vqsubq_s32(__s0_317, vqrdmulhq_s32(__s1_317, splatq_lane_s32(__s2_317, __p3_317))); \ + __ret_317; \ +}) +#else +#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ + int32x4_t __s0_318 = __p0_318; \ + int32x4_t __s1_318 = __p1_318; \ + int32x2_t __s2_318 = __p2_318; \ + int32x4_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \ + int32x4_t __rev1_318; __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \ + int32x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ + int32x4_t __ret_318; \ + __ret_318 = __noswap_vqsubq_s32(__rev0_318, __noswap_vqrdmulhq_s32(__rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318))); \ + __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \ + __ret_318; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ + int16x8_t __s0_319 = __p0_319; \ + int16x8_t __s1_319 = __p1_319; \ + int16x4_t __s2_319 = __p2_319; \ + int16x8_t __ret_319; \ + __ret_319 = vqsubq_s16(__s0_319, vqrdmulhq_s16(__s1_319, splatq_lane_s16(__s2_319, __p3_319))); \ + __ret_319; \ +}) +#else +#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ + int16x8_t __s0_320 = __p0_320; \ + int16x8_t __s1_320 = __p1_320; \ + int16x4_t __s2_320 = __p2_320; \ + int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_320; __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \ + int16x8_t __ret_320; \ + __ret_320 = __noswap_vqsubq_s16(__rev0_320, __noswap_vqrdmulhq_s16(__rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320))); \ + __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_320; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ + int32x2_t __s0_321 = __p0_321; \ + int32x2_t __s1_321 = __p1_321; \ + int32x2_t __s2_321 = __p2_321; \ + int32x2_t __ret_321; \ + __ret_321 = vqsub_s32(__s0_321, vqrdmulh_s32(__s1_321, splat_lane_s32(__s2_321, __p3_321))); \ + __ret_321; \ +}) +#else +#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ + int32x2_t __s0_322 = __p0_322; \ + int32x2_t __s1_322 = __p1_322; \ + int32x2_t __s2_322 = __p2_322; \ + int32x2_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \ + int32x2_t __rev1_322; __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \ + int32x2_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \ + int32x2_t __ret_322; \ + __ret_322 = __noswap_vqsub_s32(__rev0_322, __noswap_vqrdmulh_s32(__rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322))); \ + __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \ + __ret_322; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ + int16x4_t __s0_323 = __p0_323; \ + int16x4_t __s1_323 = __p1_323; \ + int16x4_t __s2_323 = __p2_323; \ + int16x4_t __ret_323; \ + __ret_323 = vqsub_s16(__s0_323, vqrdmulh_s16(__s1_323, splat_lane_s16(__s2_323, __p3_323))); \ + __ret_323; \ +}) +#else +#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ + int16x4_t __s0_324 = __p0_324; \ + int16x4_t __s1_324 = __p1_324; \ + int16x4_t __s2_324 = __p2_324; \ + int16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ + int16x4_t __rev1_324; __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \ + int16x4_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \ + int16x4_t __ret_324; \ + __ret_324 = __noswap_vqsub_s16(__rev0_324, __noswap_vqrdmulh_s16(__rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324))); \ + __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ + __ret_324; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ + int32x4_t __s0_325 = __p0_325; \ + int32x4_t __s1_325 = __p1_325; \ + int32x4_t __s2_325 = __p2_325; \ + int32x4_t __ret_325; \ + __ret_325 = vqaddq_s32(__s0_325, vqrdmulhq_s32(__s1_325, splatq_laneq_s32(__s2_325, __p3_325))); \ + __ret_325; \ +}) +#else +#define vqrdmlahq_laneq_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ + int32x4_t __s0_326 = __p0_326; \ + int32x4_t __s1_326 = __p1_326; \ + int32x4_t __s2_326 = __p2_326; \ + int32x4_t __rev0_326; __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 3, 2, 1, 0); \ + int32x4_t __rev1_326; __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 3, 2, 1, 0); \ + int32x4_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 3, 2, 1, 0); \ + int32x4_t __ret_326; \ + __ret_326 = __noswap_vqaddq_s32(__rev0_326, __noswap_vqrdmulhq_s32(__rev1_326, __noswap_splatq_laneq_s32(__rev2_326, __p3_326))); \ + __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 3, 2, 1, 0); \ + __ret_326; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ + int16x8_t __s0_327 = __p0_327; \ + int16x8_t __s1_327 = __p1_327; \ + int16x8_t __s2_327 = __p2_327; \ + int16x8_t __ret_327; \ + __ret_327 = vqaddq_s16(__s0_327, vqrdmulhq_s16(__s1_327, splatq_laneq_s16(__s2_327, __p3_327))); \ + __ret_327; \ +}) +#else +#define vqrdmlahq_laneq_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ + int16x8_t __s0_328 = __p0_328; \ + int16x8_t __s1_328 = __p1_328; \ + int16x8_t __s2_328 = __p2_328; \ + int16x8_t __rev0_328; __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_328; __rev1_328 = __builtin_shufflevector(__s1_328, __s1_328, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_328; \ + __ret_328 = __noswap_vqaddq_s16(__rev0_328, __noswap_vqrdmulhq_s16(__rev1_328, __noswap_splatq_laneq_s16(__rev2_328, __p3_328))); \ + __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_328; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ + int32x2_t __s0_329 = __p0_329; \ + int32x2_t __s1_329 = __p1_329; \ + int32x4_t __s2_329 = __p2_329; \ + int32x2_t __ret_329; \ + __ret_329 = vqadd_s32(__s0_329, vqrdmulh_s32(__s1_329, splat_laneq_s32(__s2_329, __p3_329))); \ + __ret_329; \ +}) +#else +#define vqrdmlah_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ + int32x2_t __s0_330 = __p0_330; \ + int32x2_t __s1_330 = __p1_330; \ + int32x4_t __s2_330 = __p2_330; \ + int32x2_t __rev0_330; __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \ + int32x2_t __rev1_330; __rev1_330 = __builtin_shufflevector(__s1_330, __s1_330, 1, 0); \ + int32x4_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \ + int32x2_t __ret_330; \ + __ret_330 = __noswap_vqadd_s32(__rev0_330, __noswap_vqrdmulh_s32(__rev1_330, __noswap_splat_laneq_s32(__rev2_330, __p3_330))); \ + __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \ + __ret_330; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ + int16x4_t __s0_331 = __p0_331; \ + int16x4_t __s1_331 = __p1_331; \ + int16x8_t __s2_331 = __p2_331; \ + int16x4_t __ret_331; \ + __ret_331 = vqadd_s16(__s0_331, vqrdmulh_s16(__s1_331, splat_laneq_s16(__s2_331, __p3_331))); \ + __ret_331; \ +}) +#else +#define vqrdmlah_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ + int16x4_t __s0_332 = __p0_332; \ + int16x4_t __s1_332 = __p1_332; \ + int16x8_t __s2_332 = __p2_332; \ + int16x4_t __rev0_332; __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \ + int16x4_t __rev1_332; __rev1_332 = __builtin_shufflevector(__s1_332, __s1_332, 3, 2, 1, 0); \ + int16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_332; \ + __ret_332 = __noswap_vqadd_s16(__rev0_332, __noswap_vqrdmulh_s16(__rev1_332, __noswap_splat_laneq_s16(__rev2_332, __p3_332))); \ + __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \ + __ret_332; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ + int32x4_t __s0_333 = __p0_333; \ + int32x4_t __s1_333 = __p1_333; \ + int32x4_t __s2_333 = __p2_333; \ + int32x4_t __ret_333; \ + __ret_333 = vqsubq_s32(__s0_333, vqrdmulhq_s32(__s1_333, splatq_laneq_s32(__s2_333, __p3_333))); \ + __ret_333; \ +}) +#else +#define vqrdmlshq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ + int32x4_t __s0_334 = __p0_334; \ + int32x4_t __s1_334 = __p1_334; \ + int32x4_t __s2_334 = __p2_334; \ + int32x4_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \ + int32x4_t __rev1_334; __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \ + int32x4_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \ + int32x4_t __ret_334; \ + __ret_334 = __noswap_vqsubq_s32(__rev0_334, __noswap_vqrdmulhq_s32(__rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334))); \ + __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \ + __ret_334; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ + int16x8_t __s0_335 = __p0_335; \ + int16x8_t __s1_335 = __p1_335; \ + int16x8_t __s2_335 = __p2_335; \ + int16x8_t __ret_335; \ + __ret_335 = vqsubq_s16(__s0_335, vqrdmulhq_s16(__s1_335, splatq_laneq_s16(__s2_335, __p3_335))); \ + __ret_335; \ +}) +#else +#define vqrdmlshq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ + int16x8_t __s0_336 = __p0_336; \ + int16x8_t __s1_336 = __p1_336; \ + int16x8_t __s2_336 = __p2_336; \ + int16x8_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_336; __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_336; \ + __ret_336 = __noswap_vqsubq_s16(__rev0_336, __noswap_vqrdmulhq_s16(__rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336))); \ + __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_336; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ + int32x2_t __s0_337 = __p0_337; \ + int32x2_t __s1_337 = __p1_337; \ + int32x4_t __s2_337 = __p2_337; \ + int32x2_t __ret_337; \ + __ret_337 = vqsub_s32(__s0_337, vqrdmulh_s32(__s1_337, splat_laneq_s32(__s2_337, __p3_337))); \ + __ret_337; \ +}) +#else +#define vqrdmlsh_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ + int32x2_t __s0_338 = __p0_338; \ + int32x2_t __s1_338 = __p1_338; \ + int32x4_t __s2_338 = __p2_338; \ + int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ + int32x2_t __rev1_338; __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \ + int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ + int32x2_t __ret_338; \ + __ret_338 = __noswap_vqsub_s32(__rev0_338, __noswap_vqrdmulh_s32(__rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338))); \ + __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ + __ret_338; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ + int16x4_t __s0_339 = __p0_339; \ + int16x4_t __s1_339 = __p1_339; \ + int16x8_t __s2_339 = __p2_339; \ + int16x4_t __ret_339; \ + __ret_339 = vqsub_s16(__s0_339, vqrdmulh_s16(__s1_339, splat_laneq_s16(__s2_339, __p3_339))); \ + __ret_339; \ +}) +#else +#define vqrdmlsh_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ + int16x4_t __s0_340 = __p0_340; \ + int16x4_t __s1_340 = __p1_340; \ + int16x8_t __s2_340 = __p2_340; \ + int16x4_t __rev0_340; __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \ + int16x4_t __rev1_340; __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \ + int16x8_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_340; \ + __ret_340 = __noswap_vqsub_s16(__rev0_340, __noswap_vqrdmulh_s16(__rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340))); \ + __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \ + __ret_340; \ +}) +#endif + +#endif +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vabs_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); + return __ret; +} +__ai int64x1_t vabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int64_t vabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); + return __ret; +} +__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); + return __ret; +} +__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); + return __ret; +} +#else +__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); + return __ret; +} +#else +__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); + return __ret; +} +#else +__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); + return __ret; +} +#else +__ai int16_t vaddlvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); + return __ret; +} +#else +__ai int64_t vaddlvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); + return __ret; +} +#else +__ai int32_t vaddlvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); + return __ret; +} +#else +__ai uint16_t vaddlv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); + return __ret; +} +#else +__ai uint64_t vaddlv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); + return __ret; +} +#else +__ai uint32_t vaddlv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); + return __ret; +} +#else +__ai int16_t vaddlv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); + return __ret; +} +#else +__ai int64_t vaddlv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); + return __ret; +} +#else +__ai int32_t vaddlv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); + return __ret; +} +#else +__ai uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); + return __ret; +} +#else +__ai uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); + return __ret; +} +#else +__ai uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); + return __ret; +} +#else +__ai uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); + return __ret; +} +#else +__ai int8_t vaddvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vaddvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vaddvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); + return __ret; +} +#else +__ai int32_t vaddvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); + return __ret; +} +#else +__ai int64_t vaddvq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); + return __ret; +} +#else +__ai int16_t vaddvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); + return __ret; +} +#else +__ai uint8_t vaddv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); + return __ret; +} +#else +__ai uint32_t vaddv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); + return __ret; +} +#else +__ai uint16_t vaddv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); + return __ret; +} +#else +__ai int8_t vaddv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); + return __ret; +} +#else +__ai float32_t vaddv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); + return __ret; +} +#else +__ai int32_t vaddv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); + return __ret; +} +#else +__ai int16_t vaddv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); + return __ret; +} +#endif + +__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); + return __ret; +} +#else +__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); + return __ret; +} +__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vceqz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vceqzd_u64(uint64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); + return __ret; +} +__ai uint64_t vceqzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); + return __ret; +} +__ai uint64_t vceqzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); + return __ret; +} +__ai uint32_t vceqzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcgez_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgez_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgez_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgez_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vcgezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); + return __ret; +} +__ai uint64_t vcgezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); + return __ret; +} +__ai uint32_t vcgezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vcgtzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); + return __ret; +} +__ai uint64_t vcgtzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); + return __ret; +} +__ai uint32_t vcgtzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vclezq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vclezq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclezq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclezq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vclezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vclezq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclezq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vclez_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vclez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclez_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclez_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vclez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclez_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vclezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); + return __ret; +} +__ai uint64_t vclezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); + return __ret; +} +__ai uint32_t vclezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); + return __ret; +} +__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); + return __ret; +} +__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcltz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcltz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcltz_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcltz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcltz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcltz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64_t vcltzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); + return __ret; +} +__ai uint64_t vcltzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); + return __ret; +} +__ai uint32_t vcltzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ + poly8x16_t __s0_341 = __p0_341; \ + poly8x8_t __s2_341 = __p2_341; \ + poly8x16_t __ret_341; \ + __ret_341 = vsetq_lane_p8(vget_lane_p8(__s2_341, __p3_341), __s0_341, __p1_341); \ + __ret_341; \ +}) +#else +#define vcopyq_lane_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ + poly8x16_t __s0_342 = __p0_342; \ + poly8x8_t __s2_342 = __p2_342; \ + poly8x16_t __rev0_342; __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret_342; \ + __ret_342 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_342, __p3_342), __rev0_342, __p1_342); \ + __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_342; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \ + poly16x8_t __s0_343 = __p0_343; \ + poly16x4_t __s2_343 = __p2_343; \ + poly16x8_t __ret_343; \ + __ret_343 = vsetq_lane_p16(vget_lane_p16(__s2_343, __p3_343), __s0_343, __p1_343); \ + __ret_343; \ +}) +#else +#define vcopyq_lane_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \ + poly16x8_t __s0_344 = __p0_344; \ + poly16x4_t __s2_344 = __p2_344; \ + poly16x8_t __rev0_344; __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_344; __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \ + poly16x8_t __ret_344; \ + __ret_344 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_344, __p3_344), __rev0_344, __p1_344); \ + __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_344; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u8(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \ + uint8x16_t __s0_345 = __p0_345; \ + uint8x8_t __s2_345 = __p2_345; \ + uint8x16_t __ret_345; \ + __ret_345 = vsetq_lane_u8(vget_lane_u8(__s2_345, __p3_345), __s0_345, __p1_345); \ + __ret_345; \ +}) +#else +#define vcopyq_lane_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \ + uint8x16_t __s0_346 = __p0_346; \ + uint8x8_t __s2_346 = __p2_346; \ + uint8x16_t __rev0_346; __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_346; __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_346; \ + __ret_346 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_346, __p3_346), __rev0_346, __p1_346); \ + __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_346; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u32(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \ + uint32x4_t __s0_347 = __p0_347; \ + uint32x2_t __s2_347 = __p2_347; \ + uint32x4_t __ret_347; \ + __ret_347 = vsetq_lane_u32(vget_lane_u32(__s2_347, __p3_347), __s0_347, __p1_347); \ + __ret_347; \ +}) +#else +#define vcopyq_lane_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \ + uint32x4_t __s0_348 = __p0_348; \ + uint32x2_t __s2_348 = __p2_348; \ + uint32x4_t __rev0_348; __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 3, 2, 1, 0); \ + uint32x2_t __rev2_348; __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 1, 0); \ + uint32x4_t __ret_348; \ + __ret_348 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_348, __p3_348), __rev0_348, __p1_348); \ + __ret_348 = __builtin_shufflevector(__ret_348, __ret_348, 3, 2, 1, 0); \ + __ret_348; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u64(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \ + uint64x2_t __s0_349 = __p0_349; \ + uint64x1_t __s2_349 = __p2_349; \ + uint64x2_t __ret_349; \ + __ret_349 = vsetq_lane_u64(vget_lane_u64(__s2_349, __p3_349), __s0_349, __p1_349); \ + __ret_349; \ +}) +#else +#define vcopyq_lane_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \ + uint64x2_t __s0_350 = __p0_350; \ + uint64x1_t __s2_350 = __p2_350; \ + uint64x2_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 1, 0); \ + uint64x2_t __ret_350; \ + __ret_350 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_350, __p3_350), __rev0_350, __p1_350); \ + __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 1, 0); \ + __ret_350; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \ + uint16x8_t __s0_351 = __p0_351; \ + uint16x4_t __s2_351 = __p2_351; \ + uint16x8_t __ret_351; \ + __ret_351 = vsetq_lane_u16(vget_lane_u16(__s2_351, __p3_351), __s0_351, __p1_351); \ + __ret_351; \ +}) +#else +#define vcopyq_lane_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \ + uint16x8_t __s0_352 = __p0_352; \ + uint16x4_t __s2_352 = __p2_352; \ + uint16x8_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_352; __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 3, 2, 1, 0); \ + uint16x8_t __ret_352; \ + __ret_352 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_352, __p3_352), __rev0_352, __p1_352); \ + __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_352; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \ + int8x16_t __s0_353 = __p0_353; \ + int8x8_t __s2_353 = __p2_353; \ + int8x16_t __ret_353; \ + __ret_353 = vsetq_lane_s8(vget_lane_s8(__s2_353, __p3_353), __s0_353, __p1_353); \ + __ret_353; \ +}) +#else +#define vcopyq_lane_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \ + int8x16_t __s0_354 = __p0_354; \ + int8x8_t __s2_354 = __p2_354; \ + int8x16_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_354; __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_354; \ + __ret_354 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_354, __p3_354), __rev0_354, __p1_354); \ + __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_354; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f32(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \ + float32x4_t __s0_355 = __p0_355; \ + float32x2_t __s2_355 = __p2_355; \ + float32x4_t __ret_355; \ + __ret_355 = vsetq_lane_f32(vget_lane_f32(__s2_355, __p3_355), __s0_355, __p1_355); \ + __ret_355; \ +}) +#else +#define vcopyq_lane_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \ + float32x4_t __s0_356 = __p0_356; \ + float32x2_t __s2_356 = __p2_356; \ + float32x4_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \ + float32x2_t __rev2_356; __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 1, 0); \ + float32x4_t __ret_356; \ + __ret_356 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_356, __p3_356), __rev0_356, __p1_356); \ + __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \ + __ret_356; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \ + int32x4_t __s0_357 = __p0_357; \ + int32x2_t __s2_357 = __p2_357; \ + int32x4_t __ret_357; \ + __ret_357 = vsetq_lane_s32(vget_lane_s32(__s2_357, __p3_357), __s0_357, __p1_357); \ + __ret_357; \ +}) +#else +#define vcopyq_lane_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \ + int32x4_t __s0_358 = __p0_358; \ + int32x2_t __s2_358 = __p2_358; \ + int32x4_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 3, 2, 1, 0); \ + int32x2_t __rev2_358; __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 1, 0); \ + int32x4_t __ret_358; \ + __ret_358 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_358, __p3_358), __rev0_358, __p1_358); \ + __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 3, 2, 1, 0); \ + __ret_358; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s64(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \ + int64x2_t __s0_359 = __p0_359; \ + int64x1_t __s2_359 = __p2_359; \ + int64x2_t __ret_359; \ + __ret_359 = vsetq_lane_s64(vget_lane_s64(__s2_359, __p3_359), __s0_359, __p1_359); \ + __ret_359; \ +}) +#else +#define vcopyq_lane_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \ + int64x2_t __s0_360 = __p0_360; \ + int64x1_t __s2_360 = __p2_360; \ + int64x2_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 1, 0); \ + int64x2_t __ret_360; \ + __ret_360 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_360, __p3_360), __rev0_360, __p1_360); \ + __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 1, 0); \ + __ret_360; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s16(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \ + int16x8_t __s0_361 = __p0_361; \ + int16x4_t __s2_361 = __p2_361; \ + int16x8_t __ret_361; \ + __ret_361 = vsetq_lane_s16(vget_lane_s16(__s2_361, __p3_361), __s0_361, __p1_361); \ + __ret_361; \ +}) +#else +#define vcopyq_lane_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \ + int16x8_t __s0_362 = __p0_362; \ + int16x4_t __s2_362 = __p2_362; \ + int16x8_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_362; __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 3, 2, 1, 0); \ + int16x8_t __ret_362; \ + __ret_362 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_362, __p3_362), __rev0_362, __p1_362); \ + __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_362; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p8(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \ + poly8x8_t __s0_363 = __p0_363; \ + poly8x8_t __s2_363 = __p2_363; \ + poly8x8_t __ret_363; \ + __ret_363 = vset_lane_p8(vget_lane_p8(__s2_363, __p3_363), __s0_363, __p1_363); \ + __ret_363; \ +}) +#else +#define vcopy_lane_p8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \ + poly8x8_t __s0_364 = __p0_364; \ + poly8x8_t __s2_364 = __p2_364; \ + poly8x8_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_364; __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret_364; \ + __ret_364 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_364, __p3_364), __rev0_364, __p1_364); \ + __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_364; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p16(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \ + poly16x4_t __s0_365 = __p0_365; \ + poly16x4_t __s2_365 = __p2_365; \ + poly16x4_t __ret_365; \ + __ret_365 = vset_lane_p16(vget_lane_p16(__s2_365, __p3_365), __s0_365, __p1_365); \ + __ret_365; \ +}) +#else +#define vcopy_lane_p16(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \ + poly16x4_t __s0_366 = __p0_366; \ + poly16x4_t __s2_366 = __p2_366; \ + poly16x4_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 3, 2, 1, 0); \ + poly16x4_t __rev2_366; __rev2_366 = __builtin_shufflevector(__s2_366, __s2_366, 3, 2, 1, 0); \ + poly16x4_t __ret_366; \ + __ret_366 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_366, __p3_366), __rev0_366, __p1_366); \ + __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 3, 2, 1, 0); \ + __ret_366; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u8(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \ + uint8x8_t __s0_367 = __p0_367; \ + uint8x8_t __s2_367 = __p2_367; \ + uint8x8_t __ret_367; \ + __ret_367 = vset_lane_u8(vget_lane_u8(__s2_367, __p3_367), __s0_367, __p1_367); \ + __ret_367; \ +}) +#else +#define vcopy_lane_u8(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \ + uint8x8_t __s0_368 = __p0_368; \ + uint8x8_t __s2_368 = __p2_368; \ + uint8x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_368; __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret_368; \ + __ret_368 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_368, __p3_368), __rev0_368, __p1_368); \ + __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_368; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \ + uint32x2_t __s0_369 = __p0_369; \ + uint32x2_t __s2_369 = __p2_369; \ + uint32x2_t __ret_369; \ + __ret_369 = vset_lane_u32(vget_lane_u32(__s2_369, __p3_369), __s0_369, __p1_369); \ + __ret_369; \ +}) +#else +#define vcopy_lane_u32(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \ + uint32x2_t __s0_370 = __p0_370; \ + uint32x2_t __s2_370 = __p2_370; \ + uint32x2_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 1, 0); \ + uint32x2_t __rev2_370; __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 1, 0); \ + uint32x2_t __ret_370; \ + __ret_370 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_370, __p3_370), __rev0_370, __p1_370); \ + __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 1, 0); \ + __ret_370; \ +}) +#endif + +#define vcopy_lane_u64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \ + uint64x1_t __s0_371 = __p0_371; \ + uint64x1_t __s2_371 = __p2_371; \ + uint64x1_t __ret_371; \ + __ret_371 = vset_lane_u64(vget_lane_u64(__s2_371, __p3_371), __s0_371, __p1_371); \ + __ret_371; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u16(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \ + uint16x4_t __s0_372 = __p0_372; \ + uint16x4_t __s2_372 = __p2_372; \ + uint16x4_t __ret_372; \ + __ret_372 = vset_lane_u16(vget_lane_u16(__s2_372, __p3_372), __s0_372, __p1_372); \ + __ret_372; \ +}) +#else +#define vcopy_lane_u16(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \ + uint16x4_t __s0_373 = __p0_373; \ + uint16x4_t __s2_373 = __p2_373; \ + uint16x4_t __rev0_373; __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 3, 2, 1, 0); \ + uint16x4_t __rev2_373; __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, 3, 2, 1, 0); \ + uint16x4_t __ret_373; \ + __ret_373 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_373, __p3_373), __rev0_373, __p1_373); \ + __ret_373 = __builtin_shufflevector(__ret_373, __ret_373, 3, 2, 1, 0); \ + __ret_373; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s8(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \ + int8x8_t __s0_374 = __p0_374; \ + int8x8_t __s2_374 = __p2_374; \ + int8x8_t __ret_374; \ + __ret_374 = vset_lane_s8(vget_lane_s8(__s2_374, __p3_374), __s0_374, __p1_374); \ + __ret_374; \ +}) +#else +#define vcopy_lane_s8(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \ + int8x8_t __s0_375 = __p0_375; \ + int8x8_t __s2_375 = __p2_375; \ + int8x8_t __rev0_375; __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_375; __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret_375; \ + __ret_375 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_375, __p3_375), __rev0_375, __p1_375); \ + __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_375; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_f32(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \ + float32x2_t __s0_376 = __p0_376; \ + float32x2_t __s2_376 = __p2_376; \ + float32x2_t __ret_376; \ + __ret_376 = vset_lane_f32(vget_lane_f32(__s2_376, __p3_376), __s0_376, __p1_376); \ + __ret_376; \ +}) +#else +#define vcopy_lane_f32(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \ + float32x2_t __s0_377 = __p0_377; \ + float32x2_t __s2_377 = __p2_377; \ + float32x2_t __rev0_377; __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 1, 0); \ + float32x2_t __rev2_377; __rev2_377 = __builtin_shufflevector(__s2_377, __s2_377, 1, 0); \ + float32x2_t __ret_377; \ + __ret_377 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_377, __p3_377), __rev0_377, __p1_377); \ + __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 1, 0); \ + __ret_377; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s32(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \ + int32x2_t __s0_378 = __p0_378; \ + int32x2_t __s2_378 = __p2_378; \ + int32x2_t __ret_378; \ + __ret_378 = vset_lane_s32(vget_lane_s32(__s2_378, __p3_378), __s0_378, __p1_378); \ + __ret_378; \ +}) +#else +#define vcopy_lane_s32(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \ + int32x2_t __s0_379 = __p0_379; \ + int32x2_t __s2_379 = __p2_379; \ + int32x2_t __rev0_379; __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \ + int32x2_t __rev2_379; __rev2_379 = __builtin_shufflevector(__s2_379, __s2_379, 1, 0); \ + int32x2_t __ret_379; \ + __ret_379 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_379, __p3_379), __rev0_379, __p1_379); \ + __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \ + __ret_379; \ +}) +#endif + +#define vcopy_lane_s64(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \ + int64x1_t __s0_380 = __p0_380; \ + int64x1_t __s2_380 = __p2_380; \ + int64x1_t __ret_380; \ + __ret_380 = vset_lane_s64(vget_lane_s64(__s2_380, __p3_380), __s0_380, __p1_380); \ + __ret_380; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \ + int16x4_t __s0_381 = __p0_381; \ + int16x4_t __s2_381 = __p2_381; \ + int16x4_t __ret_381; \ + __ret_381 = vset_lane_s16(vget_lane_s16(__s2_381, __p3_381), __s0_381, __p1_381); \ + __ret_381; \ +}) +#else +#define vcopy_lane_s16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \ + int16x4_t __s0_382 = __p0_382; \ + int16x4_t __s2_382 = __p2_382; \ + int16x4_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \ + int16x4_t __rev2_382; __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \ + int16x4_t __ret_382; \ + __ret_382 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_382, __p3_382), __rev0_382, __p1_382); \ + __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \ + __ret_382; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \ + poly8x16_t __s0_383 = __p0_383; \ + poly8x16_t __s2_383 = __p2_383; \ + poly8x16_t __ret_383; \ + __ret_383 = vsetq_lane_p8(vgetq_lane_p8(__s2_383, __p3_383), __s0_383, __p1_383); \ + __ret_383; \ +}) +#else +#define vcopyq_laneq_p8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \ + poly8x16_t __s0_384 = __p0_384; \ + poly8x16_t __s2_384 = __p2_384; \ + poly8x16_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_384; __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret_384; \ + __ret_384 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_384, __p3_384), __rev0_384, __p1_384); \ + __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_384; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p16(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \ + poly16x8_t __s0_385 = __p0_385; \ + poly16x8_t __s2_385 = __p2_385; \ + poly16x8_t __ret_385; \ + __ret_385 = vsetq_lane_p16(vgetq_lane_p16(__s2_385, __p3_385), __s0_385, __p1_385); \ + __ret_385; \ +}) +#else +#define vcopyq_laneq_p16(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \ + poly16x8_t __s0_386 = __p0_386; \ + poly16x8_t __s2_386 = __p2_386; \ + poly16x8_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_386; __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret_386; \ + __ret_386 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_386, __p3_386), __rev0_386, __p1_386); \ + __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_386; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u8(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \ + uint8x16_t __s0_387 = __p0_387; \ + uint8x16_t __s2_387 = __p2_387; \ + uint8x16_t __ret_387; \ + __ret_387 = vsetq_lane_u8(vgetq_lane_u8(__s2_387, __p3_387), __s0_387, __p1_387); \ + __ret_387; \ +}) +#else +#define vcopyq_laneq_u8(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \ + uint8x16_t __s0_388 = __p0_388; \ + uint8x16_t __s2_388 = __p2_388; \ + uint8x16_t __rev0_388; __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_388; __rev2_388 = __builtin_shufflevector(__s2_388, __s2_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_388; \ + __ret_388 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_388, __p3_388), __rev0_388, __p1_388); \ + __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_388; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u32(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \ + uint32x4_t __s0_389 = __p0_389; \ + uint32x4_t __s2_389 = __p2_389; \ + uint32x4_t __ret_389; \ + __ret_389 = vsetq_lane_u32(vgetq_lane_u32(__s2_389, __p3_389), __s0_389, __p1_389); \ + __ret_389; \ +}) +#else +#define vcopyq_laneq_u32(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \ + uint32x4_t __s0_390 = __p0_390; \ + uint32x4_t __s2_390 = __p2_390; \ + uint32x4_t __rev0_390; __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 3, 2, 1, 0); \ + uint32x4_t __rev2_390; __rev2_390 = __builtin_shufflevector(__s2_390, __s2_390, 3, 2, 1, 0); \ + uint32x4_t __ret_390; \ + __ret_390 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_390, __p3_390), __rev0_390, __p1_390); \ + __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \ + __ret_390; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u64(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \ + uint64x2_t __s0_391 = __p0_391; \ + uint64x2_t __s2_391 = __p2_391; \ + uint64x2_t __ret_391; \ + __ret_391 = vsetq_lane_u64(vgetq_lane_u64(__s2_391, __p3_391), __s0_391, __p1_391); \ + __ret_391; \ +}) +#else +#define vcopyq_laneq_u64(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \ + uint64x2_t __s0_392 = __p0_392; \ + uint64x2_t __s2_392 = __p2_392; \ + uint64x2_t __rev0_392; __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 1, 0); \ + uint64x2_t __rev2_392; __rev2_392 = __builtin_shufflevector(__s2_392, __s2_392, 1, 0); \ + uint64x2_t __ret_392; \ + __ret_392 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_392, __p3_392), __rev0_392, __p1_392); \ + __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \ + __ret_392; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u16(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \ + uint16x8_t __s0_393 = __p0_393; \ + uint16x8_t __s2_393 = __p2_393; \ + uint16x8_t __ret_393; \ + __ret_393 = vsetq_lane_u16(vgetq_lane_u16(__s2_393, __p3_393), __s0_393, __p1_393); \ + __ret_393; \ +}) +#else +#define vcopyq_laneq_u16(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \ + uint16x8_t __s0_394 = __p0_394; \ + uint16x8_t __s2_394 = __p2_394; \ + uint16x8_t __rev0_394; __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_394; __rev2_394 = __builtin_shufflevector(__s2_394, __s2_394, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_394; \ + __ret_394 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_394, __p3_394), __rev0_394, __p1_394); \ + __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_394; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s8(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \ + int8x16_t __s0_395 = __p0_395; \ + int8x16_t __s2_395 = __p2_395; \ + int8x16_t __ret_395; \ + __ret_395 = vsetq_lane_s8(vgetq_lane_s8(__s2_395, __p3_395), __s0_395, __p1_395); \ + __ret_395; \ +}) +#else +#define vcopyq_laneq_s8(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \ + int8x16_t __s0_396 = __p0_396; \ + int8x16_t __s2_396 = __p2_396; \ + int8x16_t __rev0_396; __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_396; __rev2_396 = __builtin_shufflevector(__s2_396, __s2_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_396; \ + __ret_396 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_396, __p3_396), __rev0_396, __p1_396); \ + __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_396; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f32(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \ + float32x4_t __s0_397 = __p0_397; \ + float32x4_t __s2_397 = __p2_397; \ + float32x4_t __ret_397; \ + __ret_397 = vsetq_lane_f32(vgetq_lane_f32(__s2_397, __p3_397), __s0_397, __p1_397); \ + __ret_397; \ +}) +#else +#define vcopyq_laneq_f32(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \ + float32x4_t __s0_398 = __p0_398; \ + float32x4_t __s2_398 = __p2_398; \ + float32x4_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \ + float32x4_t __rev2_398; __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \ + float32x4_t __ret_398; \ + __ret_398 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_398, __p3_398), __rev0_398, __p1_398); \ + __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ + __ret_398; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s32(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \ + int32x4_t __s0_399 = __p0_399; \ + int32x4_t __s2_399 = __p2_399; \ + int32x4_t __ret_399; \ + __ret_399 = vsetq_lane_s32(vgetq_lane_s32(__s2_399, __p3_399), __s0_399, __p1_399); \ + __ret_399; \ +}) +#else +#define vcopyq_laneq_s32(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \ + int32x4_t __s0_400 = __p0_400; \ + int32x4_t __s2_400 = __p2_400; \ + int32x4_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \ + int32x4_t __rev2_400; __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 3, 2, 1, 0); \ + int32x4_t __ret_400; \ + __ret_400 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_400, __p3_400), __rev0_400, __p1_400); \ + __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \ + __ret_400; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \ + int64x2_t __s0_401 = __p0_401; \ + int64x2_t __s2_401 = __p2_401; \ + int64x2_t __ret_401; \ + __ret_401 = vsetq_lane_s64(vgetq_lane_s64(__s2_401, __p3_401), __s0_401, __p1_401); \ + __ret_401; \ +}) +#else +#define vcopyq_laneq_s64(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \ + int64x2_t __s0_402 = __p0_402; \ + int64x2_t __s2_402 = __p2_402; \ + int64x2_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \ + int64x2_t __rev2_402; __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 1, 0); \ + int64x2_t __ret_402; \ + __ret_402 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_402, __p3_402), __rev0_402, __p1_402); \ + __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 1, 0); \ + __ret_402; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s16(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \ + int16x8_t __s0_403 = __p0_403; \ + int16x8_t __s2_403 = __p2_403; \ + int16x8_t __ret_403; \ + __ret_403 = vsetq_lane_s16(vgetq_lane_s16(__s2_403, __p3_403), __s0_403, __p1_403); \ + __ret_403; \ +}) +#else +#define vcopyq_laneq_s16(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \ + int16x8_t __s0_404 = __p0_404; \ + int16x8_t __s2_404 = __p2_404; \ + int16x8_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_404; __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_404; \ + __ret_404 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_404, __p3_404), __rev0_404, __p1_404); \ + __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_404; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p8(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ + poly8x8_t __s0_405 = __p0_405; \ + poly8x16_t __s2_405 = __p2_405; \ + poly8x8_t __ret_405; \ + __ret_405 = vset_lane_p8(vgetq_lane_p8(__s2_405, __p3_405), __s0_405, __p1_405); \ + __ret_405; \ +}) +#else +#define vcopy_laneq_p8(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ + poly8x8_t __s0_406 = __p0_406; \ + poly8x16_t __s2_406 = __p2_406; \ + poly8x8_t __rev0_406; __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_406; __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret_406; \ + __ret_406 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_406, __p3_406), __rev0_406, __p1_406); \ + __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_406; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p16(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ + poly16x4_t __s0_407 = __p0_407; \ + poly16x8_t __s2_407 = __p2_407; \ + poly16x4_t __ret_407; \ + __ret_407 = vset_lane_p16(vgetq_lane_p16(__s2_407, __p3_407), __s0_407, __p1_407); \ + __ret_407; \ +}) +#else +#define vcopy_laneq_p16(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ + poly16x4_t __s0_408 = __p0_408; \ + poly16x8_t __s2_408 = __p2_408; \ + poly16x4_t __rev0_408; __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 3, 2, 1, 0); \ + poly16x8_t __rev2_408; __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __ret_408; \ + __ret_408 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_408, __p3_408), __rev0_408, __p1_408); \ + __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 3, 2, 1, 0); \ + __ret_408; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u8(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ + uint8x8_t __s0_409 = __p0_409; \ + uint8x16_t __s2_409 = __p2_409; \ + uint8x8_t __ret_409; \ + __ret_409 = vset_lane_u8(vgetq_lane_u8(__s2_409, __p3_409), __s0_409, __p1_409); \ + __ret_409; \ +}) +#else +#define vcopy_laneq_u8(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ + uint8x8_t __s0_410 = __p0_410; \ + uint8x16_t __s2_410 = __p2_410; \ + uint8x8_t __rev0_410; __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_410; __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret_410; \ + __ret_410 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_410, __p3_410), __rev0_410, __p1_410); \ + __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_410; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ + uint32x2_t __s0_411 = __p0_411; \ + uint32x4_t __s2_411 = __p2_411; \ + uint32x2_t __ret_411; \ + __ret_411 = vset_lane_u32(vgetq_lane_u32(__s2_411, __p3_411), __s0_411, __p1_411); \ + __ret_411; \ +}) +#else +#define vcopy_laneq_u32(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ + uint32x2_t __s0_412 = __p0_412; \ + uint32x4_t __s2_412 = __p2_412; \ + uint32x2_t __rev0_412; __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 1, 0); \ + uint32x4_t __rev2_412; __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 3, 2, 1, 0); \ + uint32x2_t __ret_412; \ + __ret_412 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_412, __p3_412), __rev0_412, __p1_412); \ + __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 1, 0); \ + __ret_412; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u64(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ + uint64x1_t __s0_413 = __p0_413; \ + uint64x2_t __s2_413 = __p2_413; \ + uint64x1_t __ret_413; \ + __ret_413 = vset_lane_u64(vgetq_lane_u64(__s2_413, __p3_413), __s0_413, __p1_413); \ + __ret_413; \ +}) +#else +#define vcopy_laneq_u64(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ + uint64x1_t __s0_414 = __p0_414; \ + uint64x2_t __s2_414 = __p2_414; \ + uint64x2_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \ + uint64x1_t __ret_414; \ + __ret_414 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_414, __p3_414), __s0_414, __p1_414); \ + __ret_414; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u16(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ + uint16x4_t __s0_415 = __p0_415; \ + uint16x8_t __s2_415 = __p2_415; \ + uint16x4_t __ret_415; \ + __ret_415 = vset_lane_u16(vgetq_lane_u16(__s2_415, __p3_415), __s0_415, __p1_415); \ + __ret_415; \ +}) +#else +#define vcopy_laneq_u16(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ + uint16x4_t __s0_416 = __p0_416; \ + uint16x8_t __s2_416 = __p2_416; \ + uint16x4_t __rev0_416; __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \ + uint16x8_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret_416; \ + __ret_416 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_416, __p3_416), __rev0_416, __p1_416); \ + __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \ + __ret_416; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s8(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ + int8x8_t __s0_417 = __p0_417; \ + int8x16_t __s2_417 = __p2_417; \ + int8x8_t __ret_417; \ + __ret_417 = vset_lane_s8(vgetq_lane_s8(__s2_417, __p3_417), __s0_417, __p1_417); \ + __ret_417; \ +}) +#else +#define vcopy_laneq_s8(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ + int8x8_t __s0_418 = __p0_418; \ + int8x16_t __s2_418 = __p2_418; \ + int8x8_t __rev0_418; __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret_418; \ + __ret_418 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_418, __p3_418), __rev0_418, __p1_418); \ + __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_418; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f32(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ + float32x2_t __s0_419 = __p0_419; \ + float32x4_t __s2_419 = __p2_419; \ + float32x2_t __ret_419; \ + __ret_419 = vset_lane_f32(vgetq_lane_f32(__s2_419, __p3_419), __s0_419, __p1_419); \ + __ret_419; \ +}) +#else +#define vcopy_laneq_f32(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ + float32x2_t __s0_420 = __p0_420; \ + float32x4_t __s2_420 = __p2_420; \ + float32x2_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \ + float32x4_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 3, 2, 1, 0); \ + float32x2_t __ret_420; \ + __ret_420 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_420, __p3_420), __rev0_420, __p1_420); \ + __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \ + __ret_420; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ + int32x2_t __s0_421 = __p0_421; \ + int32x4_t __s2_421 = __p2_421; \ + int32x2_t __ret_421; \ + __ret_421 = vset_lane_s32(vgetq_lane_s32(__s2_421, __p3_421), __s0_421, __p1_421); \ + __ret_421; \ +}) +#else +#define vcopy_laneq_s32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ + int32x2_t __s0_422 = __p0_422; \ + int32x4_t __s2_422 = __p2_422; \ + int32x2_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \ + int32x4_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \ + int32x2_t __ret_422; \ + __ret_422 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_422, __p3_422), __rev0_422, __p1_422); \ + __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \ + __ret_422; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ + int64x1_t __s0_423 = __p0_423; \ + int64x2_t __s2_423 = __p2_423; \ + int64x1_t __ret_423; \ + __ret_423 = vset_lane_s64(vgetq_lane_s64(__s2_423, __p3_423), __s0_423, __p1_423); \ + __ret_423; \ +}) +#else +#define vcopy_laneq_s64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ + int64x1_t __s0_424 = __p0_424; \ + int64x2_t __s2_424 = __p2_424; \ + int64x2_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \ + int64x1_t __ret_424; \ + __ret_424 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_424, __p3_424), __s0_424, __p1_424); \ + __ret_424; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ + int16x4_t __s0_425 = __p0_425; \ + int16x8_t __s2_425 = __p2_425; \ + int16x4_t __ret_425; \ + __ret_425 = vset_lane_s16(vgetq_lane_s16(__s2_425, __p3_425), __s0_425, __p1_425); \ + __ret_425; \ +}) +#else +#define vcopy_laneq_s16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ + int16x4_t __s0_426 = __p0_426; \ + int16x8_t __s2_426 = __p2_426; \ + int16x4_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 3, 2, 1, 0); \ + int16x8_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_426; \ + __ret_426 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_426, __p3_426), __rev0_426, __p1_426); \ + __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 3, 2, 1, 0); \ + __ret_426; \ +}) +#endif + +#define vcreate_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float64x1_t)(__promote); \ + __ret; \ +}) +__ai float32_t vcvts_f32_s32(int32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); + return __ret; +} +__ai float32_t vcvts_f32_u32(uint32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#endif + +__ai float64_t vcvtd_f64_s64(int64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); + return __ret; +} +__ai float64_t vcvtd_f64_u64(uint64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); + return __ret; +} +#else +__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_f16(vget_high_f16(__p0)); + return __ret; +} +#else +__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); + return __ret; +} +#else +__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = vcvt_f64_f32(vget_high_f32(__p0)); + return __ret; +} +#else +__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float64x2_t __ret; + __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ + __ret; \ +}) +#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ + __ret; \ +}) +__ai int32_t vcvts_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); + return __ret; +} +__ai int64_t vcvtd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai uint32_t vcvts_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); + return __ret; +} +__ai uint64_t vcvtd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai int32_t vcvtas_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); + return __ret; +} +__ai int64_t vcvtad_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtas_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); + return __ret; +} +__ai uint64_t vcvtad_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + return __ret; +} +__ai int32_t vcvtms_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); + return __ret; +} +__ai int64_t vcvtmd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtms_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); + return __ret; +} +__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); + return __ret; +} +__ai int32_t vcvtns_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); + return __ret; +} +__ai int64_t vcvtnd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtns_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); + return __ret; +} +__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + return __ret; +} +__ai int32_t vcvtps_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); + return __ret; +} +__ai int64_t vcvtpd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); + return __ret; +} +__ai uint32_t vcvtps_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); + return __ret; +} +__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); + return __ret; +} +__ai float32_t vcvtxd_f32_f64(float64_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); + return __ret; +} +#else +__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdup_lane_p64(__p0_427, __p1_427) __extension__ ({ \ + poly64x1_t __s0_427 = __p0_427; \ + poly64x1_t __ret_427; \ + __ret_427 = splat_lane_p64(__s0_427, __p1_427); \ + __ret_427; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p64(__p0_428, __p1_428) __extension__ ({ \ + poly64x1_t __s0_428 = __p0_428; \ + poly64x2_t __ret_428; \ + __ret_428 = splatq_lane_p64(__s0_428, __p1_428); \ + __ret_428; \ +}) +#else +#define vdupq_lane_p64(__p0_429, __p1_429) __extension__ ({ \ + poly64x1_t __s0_429 = __p0_429; \ + poly64x2_t __ret_429; \ + __ret_429 = __noswap_splatq_lane_p64(__s0_429, __p1_429); \ + __ret_429 = __builtin_shufflevector(__ret_429, __ret_429, 1, 0); \ + __ret_429; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f64(__p0_430, __p1_430) __extension__ ({ \ + float64x1_t __s0_430 = __p0_430; \ + float64x2_t __ret_430; \ + __ret_430 = splatq_lane_f64(__s0_430, __p1_430); \ + __ret_430; \ +}) +#else +#define vdupq_lane_f64(__p0_431, __p1_431) __extension__ ({ \ + float64x1_t __s0_431 = __p0_431; \ + float64x2_t __ret_431; \ + __ret_431 = __noswap_splatq_lane_f64(__s0_431, __p1_431); \ + __ret_431 = __builtin_shufflevector(__ret_431, __ret_431, 1, 0); \ + __ret_431; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f16(__p0_432, __p1_432) __extension__ ({ \ + float16x4_t __s0_432 = __p0_432; \ + float16x8_t __ret_432; \ + __ret_432 = splatq_lane_f16(__s0_432, __p1_432); \ + __ret_432; \ +}) +#else +#define vdupq_lane_f16(__p0_433, __p1_433) __extension__ ({ \ + float16x4_t __s0_433 = __p0_433; \ + float16x4_t __rev0_433; __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, 3, 2, 1, 0); \ + float16x8_t __ret_433; \ + __ret_433 = __noswap_splatq_lane_f16(__rev0_433, __p1_433); \ + __ret_433 = __builtin_shufflevector(__ret_433, __ret_433, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_433; \ +}) +#endif + +#define vdup_lane_f64(__p0_434, __p1_434) __extension__ ({ \ + float64x1_t __s0_434 = __p0_434; \ + float64x1_t __ret_434; \ + __ret_434 = splat_lane_f64(__s0_434, __p1_434); \ + __ret_434; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f16(__p0_435, __p1_435) __extension__ ({ \ + float16x4_t __s0_435 = __p0_435; \ + float16x4_t __ret_435; \ + __ret_435 = splat_lane_f16(__s0_435, __p1_435); \ + __ret_435; \ +}) +#else +#define vdup_lane_f16(__p0_436, __p1_436) __extension__ ({ \ + float16x4_t __s0_436 = __p0_436; \ + float16x4_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \ + float16x4_t __ret_436; \ + __ret_436 = __noswap_splat_lane_f16(__rev0_436, __p1_436); \ + __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \ + __ret_436; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p8(__p0_437, __p1_437) __extension__ ({ \ + poly8x16_t __s0_437 = __p0_437; \ + poly8x8_t __ret_437; \ + __ret_437 = splat_laneq_p8(__s0_437, __p1_437); \ + __ret_437; \ +}) +#else +#define vdup_laneq_p8(__p0_438, __p1_438) __extension__ ({ \ + poly8x16_t __s0_438 = __p0_438; \ + poly8x16_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret_438; \ + __ret_438 = __noswap_splat_laneq_p8(__rev0_438, __p1_438); \ + __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_438; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p64(__p0_439, __p1_439) __extension__ ({ \ + poly64x2_t __s0_439 = __p0_439; \ + poly64x1_t __ret_439; \ + __ret_439 = splat_laneq_p64(__s0_439, __p1_439); \ + __ret_439; \ +}) +#else +#define vdup_laneq_p64(__p0_440, __p1_440) __extension__ ({ \ + poly64x2_t __s0_440 = __p0_440; \ + poly64x2_t __rev0_440; __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \ + poly64x1_t __ret_440; \ + __ret_440 = __noswap_splat_laneq_p64(__rev0_440, __p1_440); \ + __ret_440; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p16(__p0_441, __p1_441) __extension__ ({ \ + poly16x8_t __s0_441 = __p0_441; \ + poly16x4_t __ret_441; \ + __ret_441 = splat_laneq_p16(__s0_441, __p1_441); \ + __ret_441; \ +}) +#else +#define vdup_laneq_p16(__p0_442, __p1_442) __extension__ ({ \ + poly16x8_t __s0_442 = __p0_442; \ + poly16x8_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __ret_442; \ + __ret_442 = __noswap_splat_laneq_p16(__rev0_442, __p1_442); \ + __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \ + __ret_442; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p8(__p0_443, __p1_443) __extension__ ({ \ + poly8x16_t __s0_443 = __p0_443; \ + poly8x16_t __ret_443; \ + __ret_443 = splatq_laneq_p8(__s0_443, __p1_443); \ + __ret_443; \ +}) +#else +#define vdupq_laneq_p8(__p0_444, __p1_444) __extension__ ({ \ + poly8x16_t __s0_444 = __p0_444; \ + poly8x16_t __rev0_444; __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret_444; \ + __ret_444 = __noswap_splatq_laneq_p8(__rev0_444, __p1_444); \ + __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_444; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p64(__p0_445, __p1_445) __extension__ ({ \ + poly64x2_t __s0_445 = __p0_445; \ + poly64x2_t __ret_445; \ + __ret_445 = splatq_laneq_p64(__s0_445, __p1_445); \ + __ret_445; \ +}) +#else +#define vdupq_laneq_p64(__p0_446, __p1_446) __extension__ ({ \ + poly64x2_t __s0_446 = __p0_446; \ + poly64x2_t __rev0_446; __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 1, 0); \ + poly64x2_t __ret_446; \ + __ret_446 = __noswap_splatq_laneq_p64(__rev0_446, __p1_446); \ + __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 1, 0); \ + __ret_446; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p16(__p0_447, __p1_447) __extension__ ({ \ + poly16x8_t __s0_447 = __p0_447; \ + poly16x8_t __ret_447; \ + __ret_447 = splatq_laneq_p16(__s0_447, __p1_447); \ + __ret_447; \ +}) +#else +#define vdupq_laneq_p16(__p0_448, __p1_448) __extension__ ({ \ + poly16x8_t __s0_448 = __p0_448; \ + poly16x8_t __rev0_448; __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret_448; \ + __ret_448 = __noswap_splatq_laneq_p16(__rev0_448, __p1_448); \ + __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_448; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u8(__p0_449, __p1_449) __extension__ ({ \ + uint8x16_t __s0_449 = __p0_449; \ + uint8x16_t __ret_449; \ + __ret_449 = splatq_laneq_u8(__s0_449, __p1_449); \ + __ret_449; \ +}) +#else +#define vdupq_laneq_u8(__p0_450, __p1_450) __extension__ ({ \ + uint8x16_t __s0_450 = __p0_450; \ + uint8x16_t __rev0_450; __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_450; \ + __ret_450 = __noswap_splatq_laneq_u8(__rev0_450, __p1_450); \ + __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_450; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u32(__p0_451, __p1_451) __extension__ ({ \ + uint32x4_t __s0_451 = __p0_451; \ + uint32x4_t __ret_451; \ + __ret_451 = splatq_laneq_u32(__s0_451, __p1_451); \ + __ret_451; \ +}) +#else +#define vdupq_laneq_u32(__p0_452, __p1_452) __extension__ ({ \ + uint32x4_t __s0_452 = __p0_452; \ + uint32x4_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \ + uint32x4_t __ret_452; \ + __ret_452 = __noswap_splatq_laneq_u32(__rev0_452, __p1_452); \ + __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \ + __ret_452; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u64(__p0_453, __p1_453) __extension__ ({ \ + uint64x2_t __s0_453 = __p0_453; \ + uint64x2_t __ret_453; \ + __ret_453 = splatq_laneq_u64(__s0_453, __p1_453); \ + __ret_453; \ +}) +#else +#define vdupq_laneq_u64(__p0_454, __p1_454) __extension__ ({ \ + uint64x2_t __s0_454 = __p0_454; \ + uint64x2_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 1, 0); \ + uint64x2_t __ret_454; \ + __ret_454 = __noswap_splatq_laneq_u64(__rev0_454, __p1_454); \ + __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 1, 0); \ + __ret_454; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u16(__p0_455, __p1_455) __extension__ ({ \ + uint16x8_t __s0_455 = __p0_455; \ + uint16x8_t __ret_455; \ + __ret_455 = splatq_laneq_u16(__s0_455, __p1_455); \ + __ret_455; \ +}) +#else +#define vdupq_laneq_u16(__p0_456, __p1_456) __extension__ ({ \ + uint16x8_t __s0_456 = __p0_456; \ + uint16x8_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_456; \ + __ret_456 = __noswap_splatq_laneq_u16(__rev0_456, __p1_456); \ + __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_456; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s8(__p0_457, __p1_457) __extension__ ({ \ + int8x16_t __s0_457 = __p0_457; \ + int8x16_t __ret_457; \ + __ret_457 = splatq_laneq_s8(__s0_457, __p1_457); \ + __ret_457; \ +}) +#else +#define vdupq_laneq_s8(__p0_458, __p1_458) __extension__ ({ \ + int8x16_t __s0_458 = __p0_458; \ + int8x16_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_458; \ + __ret_458 = __noswap_splatq_laneq_s8(__rev0_458, __p1_458); \ + __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_458; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f64(__p0_459, __p1_459) __extension__ ({ \ + float64x2_t __s0_459 = __p0_459; \ + float64x2_t __ret_459; \ + __ret_459 = splatq_laneq_f64(__s0_459, __p1_459); \ + __ret_459; \ +}) +#else +#define vdupq_laneq_f64(__p0_460, __p1_460) __extension__ ({ \ + float64x2_t __s0_460 = __p0_460; \ + float64x2_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \ + float64x2_t __ret_460; \ + __ret_460 = __noswap_splatq_laneq_f64(__rev0_460, __p1_460); \ + __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \ + __ret_460; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f32(__p0_461, __p1_461) __extension__ ({ \ + float32x4_t __s0_461 = __p0_461; \ + float32x4_t __ret_461; \ + __ret_461 = splatq_laneq_f32(__s0_461, __p1_461); \ + __ret_461; \ +}) +#else +#define vdupq_laneq_f32(__p0_462, __p1_462) __extension__ ({ \ + float32x4_t __s0_462 = __p0_462; \ + float32x4_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \ + float32x4_t __ret_462; \ + __ret_462 = __noswap_splatq_laneq_f32(__rev0_462, __p1_462); \ + __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \ + __ret_462; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f16(__p0_463, __p1_463) __extension__ ({ \ + float16x8_t __s0_463 = __p0_463; \ + float16x8_t __ret_463; \ + __ret_463 = splatq_laneq_f16(__s0_463, __p1_463); \ + __ret_463; \ +}) +#else +#define vdupq_laneq_f16(__p0_464, __p1_464) __extension__ ({ \ + float16x8_t __s0_464 = __p0_464; \ + float16x8_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_464; \ + __ret_464 = __noswap_splatq_laneq_f16(__rev0_464, __p1_464); \ + __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_464; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s32(__p0_465, __p1_465) __extension__ ({ \ + int32x4_t __s0_465 = __p0_465; \ + int32x4_t __ret_465; \ + __ret_465 = splatq_laneq_s32(__s0_465, __p1_465); \ + __ret_465; \ +}) +#else +#define vdupq_laneq_s32(__p0_466, __p1_466) __extension__ ({ \ + int32x4_t __s0_466 = __p0_466; \ + int32x4_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \ + int32x4_t __ret_466; \ + __ret_466 = __noswap_splatq_laneq_s32(__rev0_466, __p1_466); \ + __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \ + __ret_466; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s64(__p0_467, __p1_467) __extension__ ({ \ + int64x2_t __s0_467 = __p0_467; \ + int64x2_t __ret_467; \ + __ret_467 = splatq_laneq_s64(__s0_467, __p1_467); \ + __ret_467; \ +}) +#else +#define vdupq_laneq_s64(__p0_468, __p1_468) __extension__ ({ \ + int64x2_t __s0_468 = __p0_468; \ + int64x2_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \ + int64x2_t __ret_468; \ + __ret_468 = __noswap_splatq_laneq_s64(__rev0_468, __p1_468); \ + __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \ + __ret_468; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s16(__p0_469, __p1_469) __extension__ ({ \ + int16x8_t __s0_469 = __p0_469; \ + int16x8_t __ret_469; \ + __ret_469 = splatq_laneq_s16(__s0_469, __p1_469); \ + __ret_469; \ +}) +#else +#define vdupq_laneq_s16(__p0_470, __p1_470) __extension__ ({ \ + int16x8_t __s0_470 = __p0_470; \ + int16x8_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_470; \ + __ret_470 = __noswap_splatq_laneq_s16(__rev0_470, __p1_470); \ + __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_470; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u8(__p0_471, __p1_471) __extension__ ({ \ + uint8x16_t __s0_471 = __p0_471; \ + uint8x8_t __ret_471; \ + __ret_471 = splat_laneq_u8(__s0_471, __p1_471); \ + __ret_471; \ +}) +#else +#define vdup_laneq_u8(__p0_472, __p1_472) __extension__ ({ \ + uint8x16_t __s0_472 = __p0_472; \ + uint8x16_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret_472; \ + __ret_472 = __noswap_splat_laneq_u8(__rev0_472, __p1_472); \ + __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_472; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u32(__p0_473, __p1_473) __extension__ ({ \ + uint32x4_t __s0_473 = __p0_473; \ + uint32x2_t __ret_473; \ + __ret_473 = splat_laneq_u32(__s0_473, __p1_473); \ + __ret_473; \ +}) +#else +#define vdup_laneq_u32(__p0_474, __p1_474) __extension__ ({ \ + uint32x4_t __s0_474 = __p0_474; \ + uint32x4_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \ + uint32x2_t __ret_474; \ + __ret_474 = __noswap_splat_laneq_u32(__rev0_474, __p1_474); \ + __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 1, 0); \ + __ret_474; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u64(__p0_475, __p1_475) __extension__ ({ \ + uint64x2_t __s0_475 = __p0_475; \ + uint64x1_t __ret_475; \ + __ret_475 = splat_laneq_u64(__s0_475, __p1_475); \ + __ret_475; \ +}) +#else +#define vdup_laneq_u64(__p0_476, __p1_476) __extension__ ({ \ + uint64x2_t __s0_476 = __p0_476; \ + uint64x2_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \ + uint64x1_t __ret_476; \ + __ret_476 = __noswap_splat_laneq_u64(__rev0_476, __p1_476); \ + __ret_476; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u16(__p0_477, __p1_477) __extension__ ({ \ + uint16x8_t __s0_477 = __p0_477; \ + uint16x4_t __ret_477; \ + __ret_477 = splat_laneq_u16(__s0_477, __p1_477); \ + __ret_477; \ +}) +#else +#define vdup_laneq_u16(__p0_478, __p1_478) __extension__ ({ \ + uint16x8_t __s0_478 = __p0_478; \ + uint16x8_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret_478; \ + __ret_478 = __noswap_splat_laneq_u16(__rev0_478, __p1_478); \ + __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ + __ret_478; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s8(__p0_479, __p1_479) __extension__ ({ \ + int8x16_t __s0_479 = __p0_479; \ + int8x8_t __ret_479; \ + __ret_479 = splat_laneq_s8(__s0_479, __p1_479); \ + __ret_479; \ +}) +#else +#define vdup_laneq_s8(__p0_480, __p1_480) __extension__ ({ \ + int8x16_t __s0_480 = __p0_480; \ + int8x16_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret_480; \ + __ret_480 = __noswap_splat_laneq_s8(__rev0_480, __p1_480); \ + __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_480; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f64(__p0_481, __p1_481) __extension__ ({ \ + float64x2_t __s0_481 = __p0_481; \ + float64x1_t __ret_481; \ + __ret_481 = splat_laneq_f64(__s0_481, __p1_481); \ + __ret_481; \ +}) +#else +#define vdup_laneq_f64(__p0_482, __p1_482) __extension__ ({ \ + float64x2_t __s0_482 = __p0_482; \ + float64x2_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \ + float64x1_t __ret_482; \ + __ret_482 = __noswap_splat_laneq_f64(__rev0_482, __p1_482); \ + __ret_482; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f32(__p0_483, __p1_483) __extension__ ({ \ + float32x4_t __s0_483 = __p0_483; \ + float32x2_t __ret_483; \ + __ret_483 = splat_laneq_f32(__s0_483, __p1_483); \ + __ret_483; \ +}) +#else +#define vdup_laneq_f32(__p0_484, __p1_484) __extension__ ({ \ + float32x4_t __s0_484 = __p0_484; \ + float32x4_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \ + float32x2_t __ret_484; \ + __ret_484 = __noswap_splat_laneq_f32(__rev0_484, __p1_484); \ + __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \ + __ret_484; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f16(__p0_485, __p1_485) __extension__ ({ \ + float16x8_t __s0_485 = __p0_485; \ + float16x4_t __ret_485; \ + __ret_485 = splat_laneq_f16(__s0_485, __p1_485); \ + __ret_485; \ +}) +#else +#define vdup_laneq_f16(__p0_486, __p1_486) __extension__ ({ \ + float16x8_t __s0_486 = __p0_486; \ + float16x8_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret_486; \ + __ret_486 = __noswap_splat_laneq_f16(__rev0_486, __p1_486); \ + __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \ + __ret_486; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s32(__p0_487, __p1_487) __extension__ ({ \ + int32x4_t __s0_487 = __p0_487; \ + int32x2_t __ret_487; \ + __ret_487 = splat_laneq_s32(__s0_487, __p1_487); \ + __ret_487; \ +}) +#else +#define vdup_laneq_s32(__p0_488, __p1_488) __extension__ ({ \ + int32x4_t __s0_488 = __p0_488; \ + int32x4_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 3, 2, 1, 0); \ + int32x2_t __ret_488; \ + __ret_488 = __noswap_splat_laneq_s32(__rev0_488, __p1_488); \ + __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \ + __ret_488; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s64(__p0_489, __p1_489) __extension__ ({ \ + int64x2_t __s0_489 = __p0_489; \ + int64x1_t __ret_489; \ + __ret_489 = splat_laneq_s64(__s0_489, __p1_489); \ + __ret_489; \ +}) +#else +#define vdup_laneq_s64(__p0_490, __p1_490) __extension__ ({ \ + int64x2_t __s0_490 = __p0_490; \ + int64x2_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 1, 0); \ + int64x1_t __ret_490; \ + __ret_490 = __noswap_splat_laneq_s64(__rev0_490, __p1_490); \ + __ret_490; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s16(__p0_491, __p1_491) __extension__ ({ \ + int16x8_t __s0_491 = __p0_491; \ + int16x4_t __ret_491; \ + __ret_491 = splat_laneq_s16(__s0_491, __p1_491); \ + __ret_491; \ +}) +#else +#define vdup_laneq_s16(__p0_492, __p1_492) __extension__ ({ \ + int16x8_t __s0_492 = __p0_492; \ + int16x8_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_492; \ + __ret_492 = __noswap_splat_laneq_s16(__rev0_492, __p1_492); \ + __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 3, 2, 1, 0); \ + __ret_492; \ +}) +#endif + +__ai poly64x1_t vdup_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vdup_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#else +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, __p2); + return __ret; +} +#define vfmsd_lane_f64(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ + float64_t __s0_493 = __p0_493; \ + float64_t __s1_493 = __p1_493; \ + float64x1_t __s2_493 = __p2_493; \ + float64_t __ret_493; \ + __ret_493 = vfmad_lane_f64(__s0_493, -__s1_493, __s2_493, __p3_493); \ + __ret_493; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmss_lane_f32(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ + float32_t __s0_494 = __p0_494; \ + float32_t __s1_494 = __p1_494; \ + float32x2_t __s2_494 = __p2_494; \ + float32_t __ret_494; \ + __ret_494 = vfmas_lane_f32(__s0_494, -__s1_494, __s2_494, __p3_494); \ + __ret_494; \ +}) +#else +#define vfmss_lane_f32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ + float32_t __s0_495 = __p0_495; \ + float32_t __s1_495 = __p1_495; \ + float32x2_t __s2_495 = __p2_495; \ + float32x2_t __rev2_495; __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 1, 0); \ + float32_t __ret_495; \ + __ret_495 = __noswap_vfmas_lane_f32(__s0_495, -__s1_495, __rev2_495, __p3_495); \ + __ret_495; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f64(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ + float64x2_t __s0_496 = __p0_496; \ + float64x2_t __s1_496 = __p1_496; \ + float64x1_t __s2_496 = __p2_496; \ + float64x2_t __ret_496; \ + __ret_496 = vfmaq_lane_f64(__s0_496, -__s1_496, __s2_496, __p3_496); \ + __ret_496; \ +}) +#else +#define vfmsq_lane_f64(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ + float64x2_t __s0_497 = __p0_497; \ + float64x2_t __s1_497 = __p1_497; \ + float64x1_t __s2_497 = __p2_497; \ + float64x2_t __rev0_497; __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 1, 0); \ + float64x2_t __rev1_497; __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 1, 0); \ + float64x2_t __ret_497; \ + __ret_497 = __noswap_vfmaq_lane_f64(__rev0_497, -__rev1_497, __s2_497, __p3_497); \ + __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 1, 0); \ + __ret_497; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ + float32x4_t __s0_498 = __p0_498; \ + float32x4_t __s1_498 = __p1_498; \ + float32x2_t __s2_498 = __p2_498; \ + float32x4_t __ret_498; \ + __ret_498 = vfmaq_lane_f32(__s0_498, -__s1_498, __s2_498, __p3_498); \ + __ret_498; \ +}) +#else +#define vfmsq_lane_f32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ + float32x4_t __s0_499 = __p0_499; \ + float32x4_t __s1_499 = __p1_499; \ + float32x2_t __s2_499 = __p2_499; \ + float32x4_t __rev0_499; __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \ + float32x4_t __rev1_499; __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \ + float32x2_t __rev2_499; __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 1, 0); \ + float32x4_t __ret_499; \ + __ret_499 = __noswap_vfmaq_lane_f32(__rev0_499, -__rev1_499, __rev2_499, __p3_499); \ + __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \ + __ret_499; \ +}) +#endif + +#define vfms_lane_f64(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ + float64x1_t __s0_500 = __p0_500; \ + float64x1_t __s1_500 = __p1_500; \ + float64x1_t __s2_500 = __p2_500; \ + float64x1_t __ret_500; \ + __ret_500 = vfma_lane_f64(__s0_500, -__s1_500, __s2_500, __p3_500); \ + __ret_500; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ + float32x2_t __s0_501 = __p0_501; \ + float32x2_t __s1_501 = __p1_501; \ + float32x2_t __s2_501 = __p2_501; \ + float32x2_t __ret_501; \ + __ret_501 = vfma_lane_f32(__s0_501, -__s1_501, __s2_501, __p3_501); \ + __ret_501; \ +}) +#else +#define vfms_lane_f32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ + float32x2_t __s0_502 = __p0_502; \ + float32x2_t __s1_502 = __p1_502; \ + float32x2_t __s2_502 = __p2_502; \ + float32x2_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 1, 0); \ + float32x2_t __rev1_502; __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 1, 0); \ + float32x2_t __rev2_502; __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 1, 0); \ + float32x2_t __ret_502; \ + __ret_502 = __noswap_vfma_lane_f32(__rev0_502, -__rev1_502, __rev2_502, __p3_502); \ + __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 1, 0); \ + __ret_502; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsd_laneq_f64(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ + float64_t __s0_503 = __p0_503; \ + float64_t __s1_503 = __p1_503; \ + float64x2_t __s2_503 = __p2_503; \ + float64_t __ret_503; \ + __ret_503 = vfmad_laneq_f64(__s0_503, -__s1_503, __s2_503, __p3_503); \ + __ret_503; \ +}) +#else +#define vfmsd_laneq_f64(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ + float64_t __s0_504 = __p0_504; \ + float64_t __s1_504 = __p1_504; \ + float64x2_t __s2_504 = __p2_504; \ + float64x2_t __rev2_504; __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 1, 0); \ + float64_t __ret_504; \ + __ret_504 = __noswap_vfmad_laneq_f64(__s0_504, -__s1_504, __rev2_504, __p3_504); \ + __ret_504; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmss_laneq_f32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ + float32_t __s0_505 = __p0_505; \ + float32_t __s1_505 = __p1_505; \ + float32x4_t __s2_505 = __p2_505; \ + float32_t __ret_505; \ + __ret_505 = vfmas_laneq_f32(__s0_505, -__s1_505, __s2_505, __p3_505); \ + __ret_505; \ +}) +#else +#define vfmss_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ + float32_t __s0_506 = __p0_506; \ + float32_t __s1_506 = __p1_506; \ + float32x4_t __s2_506 = __p2_506; \ + float32x4_t __rev2_506; __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 3, 2, 1, 0); \ + float32_t __ret_506; \ + __ret_506 = __noswap_vfmas_laneq_f32(__s0_506, -__s1_506, __rev2_506, __p3_506); \ + __ret_506; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f64(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ + float64x2_t __s0_507 = __p0_507; \ + float64x2_t __s1_507 = __p1_507; \ + float64x2_t __s2_507 = __p2_507; \ + float64x2_t __ret_507; \ + __ret_507 = vfmaq_laneq_f64(__s0_507, -__s1_507, __s2_507, __p3_507); \ + __ret_507; \ +}) +#else +#define vfmsq_laneq_f64(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ + float64x2_t __s0_508 = __p0_508; \ + float64x2_t __s1_508 = __p1_508; \ + float64x2_t __s2_508 = __p2_508; \ + float64x2_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \ + float64x2_t __rev1_508; __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \ + float64x2_t __rev2_508; __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 1, 0); \ + float64x2_t __ret_508; \ + __ret_508 = __noswap_vfmaq_laneq_f64(__rev0_508, -__rev1_508, __rev2_508, __p3_508); \ + __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \ + __ret_508; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ + float32x4_t __s0_509 = __p0_509; \ + float32x4_t __s1_509 = __p1_509; \ + float32x4_t __s2_509 = __p2_509; \ + float32x4_t __ret_509; \ + __ret_509 = vfmaq_laneq_f32(__s0_509, -__s1_509, __s2_509, __p3_509); \ + __ret_509; \ +}) +#else +#define vfmsq_laneq_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ + float32x4_t __s0_510 = __p0_510; \ + float32x4_t __s1_510 = __p1_510; \ + float32x4_t __s2_510 = __p2_510; \ + float32x4_t __rev0_510; __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \ + float32x4_t __rev1_510; __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \ + float32x4_t __rev2_510; __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 3, 2, 1, 0); \ + float32x4_t __ret_510; \ + __ret_510 = __noswap_vfmaq_laneq_f32(__rev0_510, -__rev1_510, __rev2_510, __p3_510); \ + __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \ + __ret_510; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f64(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ + float64x1_t __s0_511 = __p0_511; \ + float64x1_t __s1_511 = __p1_511; \ + float64x2_t __s2_511 = __p2_511; \ + float64x1_t __ret_511; \ + __ret_511 = vfma_laneq_f64(__s0_511, -__s1_511, __s2_511, __p3_511); \ + __ret_511; \ +}) +#else +#define vfms_laneq_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ + float64x1_t __s0_512 = __p0_512; \ + float64x1_t __s1_512 = __p1_512; \ + float64x2_t __s2_512 = __p2_512; \ + float64x2_t __rev2_512; __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 1, 0); \ + float64x1_t __ret_512; \ + __ret_512 = __noswap_vfma_laneq_f64(__s0_512, -__s1_512, __rev2_512, __p3_512); \ + __ret_512; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ + float32x2_t __s0_513 = __p0_513; \ + float32x2_t __s1_513 = __p1_513; \ + float32x4_t __s2_513 = __p2_513; \ + float32x2_t __ret_513; \ + __ret_513 = vfma_laneq_f32(__s0_513, -__s1_513, __s2_513, __p3_513); \ + __ret_513; \ +}) +#else +#define vfms_laneq_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ + float32x2_t __s0_514 = __p0_514; \ + float32x2_t __s1_514 = __p1_514; \ + float32x4_t __s2_514 = __p2_514; \ + float32x2_t __rev0_514; __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 1, 0); \ + float32x2_t __rev1_514; __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 1, 0); \ + float32x4_t __rev2_514; __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 3, 2, 1, 0); \ + float32x2_t __ret_514; \ + __ret_514 = __noswap_vfma_laneq_f32(__rev0_514, -__rev1_514, __rev2_514, __p3_514); \ + __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 1, 0); \ + __ret_514; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai float64x1_t vget_high_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#define vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai float64x1_t vget_low_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#define vld1_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ + __ret; \ +}) +#define vld1_dup_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ + __ret; \ +}) +#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#define vld1_p64_x2(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x2(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x3(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x3(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x4(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x4(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_dup_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ + __ret; \ +}) +#else +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ + __ret; \ +}) +#else +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ + __ret; \ +}) +#else +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ + __ret; \ +}) +#else +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ + __ret; \ +}) +#else +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ + __ret; \ +}) +#else +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ + __ret; \ +}) +#else +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ + __ret; \ +}) +#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ + __ret; \ +}) +#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ + __ret; \ +}) +#define vld3_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld3_dup_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ + __ret; \ +}) +#else +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ + __ret; \ +}) +#else +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ + __ret; \ +}) +#else +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ + __ret; \ +}) +#else +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ + __ret; \ +}) +#else +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ + __ret; \ +}) +#else +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ + __ret; \ +}) +#else +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ + __ret; \ +}) +#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ + __ret; \ +}) +#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ + __ret; \ +}) +#define vld4_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_dup_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ + __ret; \ +}) +#else +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ + __ret; \ +}) +#else +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ + __ret; \ +}) +#else +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ + __ret; \ +}) +#else +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ + __ret; \ +}) +#else +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ + __ret; \ +}) +#else +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ + __ret; \ +}) +#else +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ + __ret; \ +}) +#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ + __ret; \ +}) +#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ + __ret; \ +}) +#define vldrq_p128(__p0) __extension__ ({ \ + poly128_t __ret; \ + __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxnmv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); + return __ret; +} +#else +__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); + return __ret; +} +#else +__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); + return __ret; +} +#else +__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); + return __ret; +} +#else +__ai int8_t vmaxvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vmaxvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); + return __ret; +} +#else +__ai int32_t vmaxvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); + return __ret; +} +#else +__ai int16_t vmaxvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); + return __ret; +} +#else +__ai uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); + return __ret; +} +#else +__ai uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); + return __ret; +} +#else +__ai uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); + return __ret; +} +#else +__ai int8_t vmaxv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); + return __ret; +} +#else +__ai float32_t vmaxv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); + return __ret; +} +#else +__ai int32_t vmaxv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); + return __ret; +} +#else +__ai int16_t vmaxv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vminnmvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vminnmvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); + return __ret; +} +#else +__ai float32_t vminnmv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); + return __ret; +} +#else +__ai uint8_t vminvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); + return __ret; +} +#else +__ai uint32_t vminvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); + return __ret; +} +#else +__ai uint16_t vminvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); + return __ret; +} +#else +__ai int8_t vminvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); + return __ret; +} +#else +__ai float64_t vminvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); + return __ret; +} +#else +__ai float32_t vminvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); + return __ret; +} +#else +__ai int32_t vminvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); + return __ret; +} +#else +__ai int16_t vminvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); + return __ret; +} +#else +__ai uint8_t vminv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); + return __ret; +} +#else +__ai uint32_t vminv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); + return __ret; +} +#else +__ai uint16_t vminv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8(__p0); + return __ret; +} +#else +__ai int8_t vminv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32(__p0); + return __ret; +} +#else +__ai float32_t vminv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32(__p0); + return __ret; +} +#else +__ai int32_t vminv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16(__p0); + return __ret; +} +#else +__ai int16_t vminv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \ + uint32x4_t __s0_515 = __p0_515; \ + uint32x4_t __s1_515 = __p1_515; \ + uint32x4_t __s2_515 = __p2_515; \ + uint32x4_t __ret_515; \ + __ret_515 = __s0_515 + __s1_515 * splatq_laneq_u32(__s2_515, __p3_515); \ + __ret_515; \ +}) +#else +#define vmlaq_laneq_u32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \ + uint32x4_t __s0_516 = __p0_516; \ + uint32x4_t __s1_516 = __p1_516; \ + uint32x4_t __s2_516 = __p2_516; \ + uint32x4_t __rev0_516; __rev0_516 = __builtin_shufflevector(__s0_516, __s0_516, 3, 2, 1, 0); \ + uint32x4_t __rev1_516; __rev1_516 = __builtin_shufflevector(__s1_516, __s1_516, 3, 2, 1, 0); \ + uint32x4_t __rev2_516; __rev2_516 = __builtin_shufflevector(__s2_516, __s2_516, 3, 2, 1, 0); \ + uint32x4_t __ret_516; \ + __ret_516 = __rev0_516 + __rev1_516 * __noswap_splatq_laneq_u32(__rev2_516, __p3_516); \ + __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 3, 2, 1, 0); \ + __ret_516; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \ + uint16x8_t __s0_517 = __p0_517; \ + uint16x8_t __s1_517 = __p1_517; \ + uint16x8_t __s2_517 = __p2_517; \ + uint16x8_t __ret_517; \ + __ret_517 = __s0_517 + __s1_517 * splatq_laneq_u16(__s2_517, __p3_517); \ + __ret_517; \ +}) +#else +#define vmlaq_laneq_u16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \ + uint16x8_t __s0_518 = __p0_518; \ + uint16x8_t __s1_518 = __p1_518; \ + uint16x8_t __s2_518 = __p2_518; \ + uint16x8_t __rev0_518; __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_518; __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_518; __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_518; \ + __ret_518 = __rev0_518 + __rev1_518 * __noswap_splatq_laneq_u16(__rev2_518, __p3_518); \ + __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_518; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_f32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \ + float32x4_t __s0_519 = __p0_519; \ + float32x4_t __s1_519 = __p1_519; \ + float32x4_t __s2_519 = __p2_519; \ + float32x4_t __ret_519; \ + __ret_519 = __s0_519 + __s1_519 * splatq_laneq_f32(__s2_519, __p3_519); \ + __ret_519; \ +}) +#else +#define vmlaq_laneq_f32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \ + float32x4_t __s0_520 = __p0_520; \ + float32x4_t __s1_520 = __p1_520; \ + float32x4_t __s2_520 = __p2_520; \ + float32x4_t __rev0_520; __rev0_520 = __builtin_shufflevector(__s0_520, __s0_520, 3, 2, 1, 0); \ + float32x4_t __rev1_520; __rev1_520 = __builtin_shufflevector(__s1_520, __s1_520, 3, 2, 1, 0); \ + float32x4_t __rev2_520; __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 3, 2, 1, 0); \ + float32x4_t __ret_520; \ + __ret_520 = __rev0_520 + __rev1_520 * __noswap_splatq_laneq_f32(__rev2_520, __p3_520); \ + __ret_520 = __builtin_shufflevector(__ret_520, __ret_520, 3, 2, 1, 0); \ + __ret_520; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \ + int32x4_t __s0_521 = __p0_521; \ + int32x4_t __s1_521 = __p1_521; \ + int32x4_t __s2_521 = __p2_521; \ + int32x4_t __ret_521; \ + __ret_521 = __s0_521 + __s1_521 * splatq_laneq_s32(__s2_521, __p3_521); \ + __ret_521; \ +}) +#else +#define vmlaq_laneq_s32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \ + int32x4_t __s0_522 = __p0_522; \ + int32x4_t __s1_522 = __p1_522; \ + int32x4_t __s2_522 = __p2_522; \ + int32x4_t __rev0_522; __rev0_522 = __builtin_shufflevector(__s0_522, __s0_522, 3, 2, 1, 0); \ + int32x4_t __rev1_522; __rev1_522 = __builtin_shufflevector(__s1_522, __s1_522, 3, 2, 1, 0); \ + int32x4_t __rev2_522; __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \ + int32x4_t __ret_522; \ + __ret_522 = __rev0_522 + __rev1_522 * __noswap_splatq_laneq_s32(__rev2_522, __p3_522); \ + __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); \ + __ret_522; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \ + int16x8_t __s0_523 = __p0_523; \ + int16x8_t __s1_523 = __p1_523; \ + int16x8_t __s2_523 = __p2_523; \ + int16x8_t __ret_523; \ + __ret_523 = __s0_523 + __s1_523 * splatq_laneq_s16(__s2_523, __p3_523); \ + __ret_523; \ +}) +#else +#define vmlaq_laneq_s16(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \ + int16x8_t __s0_524 = __p0_524; \ + int16x8_t __s1_524 = __p1_524; \ + int16x8_t __s2_524 = __p2_524; \ + int16x8_t __rev0_524; __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_524; __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_524; __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_524; \ + __ret_524 = __rev0_524 + __rev1_524 * __noswap_splatq_laneq_s16(__rev2_524, __p3_524); \ + __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_524; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \ + uint32x2_t __s0_525 = __p0_525; \ + uint32x2_t __s1_525 = __p1_525; \ + uint32x4_t __s2_525 = __p2_525; \ + uint32x2_t __ret_525; \ + __ret_525 = __s0_525 + __s1_525 * splat_laneq_u32(__s2_525, __p3_525); \ + __ret_525; \ +}) +#else +#define vmla_laneq_u32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \ + uint32x2_t __s0_526 = __p0_526; \ + uint32x2_t __s1_526 = __p1_526; \ + uint32x4_t __s2_526 = __p2_526; \ + uint32x2_t __rev0_526; __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 1, 0); \ + uint32x2_t __rev1_526; __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 1, 0); \ + uint32x4_t __rev2_526; __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \ + uint32x2_t __ret_526; \ + __ret_526 = __rev0_526 + __rev1_526 * __noswap_splat_laneq_u32(__rev2_526, __p3_526); \ + __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 1, 0); \ + __ret_526; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \ + uint16x4_t __s0_527 = __p0_527; \ + uint16x4_t __s1_527 = __p1_527; \ + uint16x8_t __s2_527 = __p2_527; \ + uint16x4_t __ret_527; \ + __ret_527 = __s0_527 + __s1_527 * splat_laneq_u16(__s2_527, __p3_527); \ + __ret_527; \ +}) +#else +#define vmla_laneq_u16(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \ + uint16x4_t __s0_528 = __p0_528; \ + uint16x4_t __s1_528 = __p1_528; \ + uint16x8_t __s2_528 = __p2_528; \ + uint16x4_t __rev0_528; __rev0_528 = __builtin_shufflevector(__s0_528, __s0_528, 3, 2, 1, 0); \ + uint16x4_t __rev1_528; __rev1_528 = __builtin_shufflevector(__s1_528, __s1_528, 3, 2, 1, 0); \ + uint16x8_t __rev2_528; __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret_528; \ + __ret_528 = __rev0_528 + __rev1_528 * __noswap_splat_laneq_u16(__rev2_528, __p3_528); \ + __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 3, 2, 1, 0); \ + __ret_528; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \ + float32x2_t __s0_529 = __p0_529; \ + float32x2_t __s1_529 = __p1_529; \ + float32x4_t __s2_529 = __p2_529; \ + float32x2_t __ret_529; \ + __ret_529 = __s0_529 + __s1_529 * splat_laneq_f32(__s2_529, __p3_529); \ + __ret_529; \ +}) +#else +#define vmla_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \ + float32x2_t __s0_530 = __p0_530; \ + float32x2_t __s1_530 = __p1_530; \ + float32x4_t __s2_530 = __p2_530; \ + float32x2_t __rev0_530; __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \ + float32x2_t __rev1_530; __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \ + float32x4_t __rev2_530; __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \ + float32x2_t __ret_530; \ + __ret_530 = __rev0_530 + __rev1_530 * __noswap_splat_laneq_f32(__rev2_530, __p3_530); \ + __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \ + __ret_530; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \ + int32x2_t __s0_531 = __p0_531; \ + int32x2_t __s1_531 = __p1_531; \ + int32x4_t __s2_531 = __p2_531; \ + int32x2_t __ret_531; \ + __ret_531 = __s0_531 + __s1_531 * splat_laneq_s32(__s2_531, __p3_531); \ + __ret_531; \ +}) +#else +#define vmla_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \ + int32x2_t __s0_532 = __p0_532; \ + int32x2_t __s1_532 = __p1_532; \ + int32x4_t __s2_532 = __p2_532; \ + int32x2_t __rev0_532; __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 1, 0); \ + int32x2_t __rev1_532; __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 1, 0); \ + int32x4_t __rev2_532; __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \ + int32x2_t __ret_532; \ + __ret_532 = __rev0_532 + __rev1_532 * __noswap_splat_laneq_s32(__rev2_532, __p3_532); \ + __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 1, 0); \ + __ret_532; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \ + int16x4_t __s0_533 = __p0_533; \ + int16x4_t __s1_533 = __p1_533; \ + int16x8_t __s2_533 = __p2_533; \ + int16x4_t __ret_533; \ + __ret_533 = __s0_533 + __s1_533 * splat_laneq_s16(__s2_533, __p3_533); \ + __ret_533; \ +}) +#else +#define vmla_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \ + int16x4_t __s0_534 = __p0_534; \ + int16x4_t __s1_534 = __p1_534; \ + int16x8_t __s2_534 = __p2_534; \ + int16x4_t __rev0_534; __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 3, 2, 1, 0); \ + int16x4_t __rev1_534; __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 3, 2, 1, 0); \ + int16x8_t __rev2_534; __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_534; \ + __ret_534 = __rev0_534 + __rev1_534 * __noswap_splat_laneq_s16(__rev2_534, __p3_534); \ + __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 3, 2, 1, 0); \ + __ret_534; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \ + uint64x2_t __s0_535 = __p0_535; \ + uint32x4_t __s1_535 = __p1_535; \ + uint32x2_t __s2_535 = __p2_535; \ + uint64x2_t __ret_535; \ + __ret_535 = __s0_535 + vmull_u32(vget_high_u32(__s1_535), splat_lane_u32(__s2_535, __p3_535)); \ + __ret_535; \ +}) +#else +#define vmlal_high_lane_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \ + uint64x2_t __s0_536 = __p0_536; \ + uint32x4_t __s1_536 = __p1_536; \ + uint32x2_t __s2_536 = __p2_536; \ + uint64x2_t __rev0_536; __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 1, 0); \ + uint32x4_t __rev1_536; __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \ + uint32x2_t __rev2_536; __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 1, 0); \ + uint64x2_t __ret_536; \ + __ret_536 = __rev0_536 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_536), __noswap_splat_lane_u32(__rev2_536, __p3_536)); \ + __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 1, 0); \ + __ret_536; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u16(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \ + uint32x4_t __s0_537 = __p0_537; \ + uint16x8_t __s1_537 = __p1_537; \ + uint16x4_t __s2_537 = __p2_537; \ + uint32x4_t __ret_537; \ + __ret_537 = __s0_537 + vmull_u16(vget_high_u16(__s1_537), splat_lane_u16(__s2_537, __p3_537)); \ + __ret_537; \ +}) +#else +#define vmlal_high_lane_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \ + uint32x4_t __s0_538 = __p0_538; \ + uint16x8_t __s1_538 = __p1_538; \ + uint16x4_t __s2_538 = __p2_538; \ + uint32x4_t __rev0_538; __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \ + uint16x8_t __rev1_538; __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_538; __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \ + uint32x4_t __ret_538; \ + __ret_538 = __rev0_538 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_538), __noswap_splat_lane_u16(__rev2_538, __p3_538)); \ + __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \ + __ret_538; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s32(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \ + int64x2_t __s0_539 = __p0_539; \ + int32x4_t __s1_539 = __p1_539; \ + int32x2_t __s2_539 = __p2_539; \ + int64x2_t __ret_539; \ + __ret_539 = __s0_539 + vmull_s32(vget_high_s32(__s1_539), splat_lane_s32(__s2_539, __p3_539)); \ + __ret_539; \ +}) +#else +#define vmlal_high_lane_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \ + int64x2_t __s0_540 = __p0_540; \ + int32x4_t __s1_540 = __p1_540; \ + int32x2_t __s2_540 = __p2_540; \ + int64x2_t __rev0_540; __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 1, 0); \ + int32x4_t __rev1_540; __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 3, 2, 1, 0); \ + int32x2_t __rev2_540; __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 1, 0); \ + int64x2_t __ret_540; \ + __ret_540 = __rev0_540 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_540), __noswap_splat_lane_s32(__rev2_540, __p3_540)); \ + __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); \ + __ret_540; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s16(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \ + int32x4_t __s0_541 = __p0_541; \ + int16x8_t __s1_541 = __p1_541; \ + int16x4_t __s2_541 = __p2_541; \ + int32x4_t __ret_541; \ + __ret_541 = __s0_541 + vmull_s16(vget_high_s16(__s1_541), splat_lane_s16(__s2_541, __p3_541)); \ + __ret_541; \ +}) +#else +#define vmlal_high_lane_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \ + int32x4_t __s0_542 = __p0_542; \ + int16x8_t __s1_542 = __p1_542; \ + int16x4_t __s2_542 = __p2_542; \ + int32x4_t __rev0_542; __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 3, 2, 1, 0); \ + int16x8_t __rev1_542; __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_542; __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \ + int32x4_t __ret_542; \ + __ret_542 = __rev0_542 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_542), __noswap_splat_lane_s16(__rev2_542, __p3_542)); \ + __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 3, 2, 1, 0); \ + __ret_542; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u32(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \ + uint64x2_t __s0_543 = __p0_543; \ + uint32x4_t __s1_543 = __p1_543; \ + uint32x4_t __s2_543 = __p2_543; \ + uint64x2_t __ret_543; \ + __ret_543 = __s0_543 + vmull_u32(vget_high_u32(__s1_543), splat_laneq_u32(__s2_543, __p3_543)); \ + __ret_543; \ +}) +#else +#define vmlal_high_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \ + uint64x2_t __s0_544 = __p0_544; \ + uint32x4_t __s1_544 = __p1_544; \ + uint32x4_t __s2_544 = __p2_544; \ + uint64x2_t __rev0_544; __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 1, 0); \ + uint32x4_t __rev1_544; __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \ + uint32x4_t __rev2_544; __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 3, 2, 1, 0); \ + uint64x2_t __ret_544; \ + __ret_544 = __rev0_544 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_544), __noswap_splat_laneq_u32(__rev2_544, __p3_544)); \ + __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 1, 0); \ + __ret_544; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u16(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \ + uint32x4_t __s0_545 = __p0_545; \ + uint16x8_t __s1_545 = __p1_545; \ + uint16x8_t __s2_545 = __p2_545; \ + uint32x4_t __ret_545; \ + __ret_545 = __s0_545 + vmull_u16(vget_high_u16(__s1_545), splat_laneq_u16(__s2_545, __p3_545)); \ + __ret_545; \ +}) +#else +#define vmlal_high_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \ + uint32x4_t __s0_546 = __p0_546; \ + uint16x8_t __s1_546 = __p1_546; \ + uint16x8_t __s2_546 = __p2_546; \ + uint32x4_t __rev0_546; __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 3, 2, 1, 0); \ + uint16x8_t __rev1_546; __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_546; __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_546; \ + __ret_546 = __rev0_546 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_546), __noswap_splat_laneq_u16(__rev2_546, __p3_546)); \ + __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 3, 2, 1, 0); \ + __ret_546; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \ + int64x2_t __s0_547 = __p0_547; \ + int32x4_t __s1_547 = __p1_547; \ + int32x4_t __s2_547 = __p2_547; \ + int64x2_t __ret_547; \ + __ret_547 = __s0_547 + vmull_s32(vget_high_s32(__s1_547), splat_laneq_s32(__s2_547, __p3_547)); \ + __ret_547; \ +}) +#else +#define vmlal_high_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \ + int64x2_t __s0_548 = __p0_548; \ + int32x4_t __s1_548 = __p1_548; \ + int32x4_t __s2_548 = __p2_548; \ + int64x2_t __rev0_548; __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \ + int32x4_t __rev1_548; __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 3, 2, 1, 0); \ + int32x4_t __rev2_548; __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \ + int64x2_t __ret_548; \ + __ret_548 = __rev0_548 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_548), __noswap_splat_laneq_s32(__rev2_548, __p3_548)); \ + __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \ + __ret_548; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \ + int32x4_t __s0_549 = __p0_549; \ + int16x8_t __s1_549 = __p1_549; \ + int16x8_t __s2_549 = __p2_549; \ + int32x4_t __ret_549; \ + __ret_549 = __s0_549 + vmull_s16(vget_high_s16(__s1_549), splat_laneq_s16(__s2_549, __p3_549)); \ + __ret_549; \ +}) +#else +#define vmlal_high_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \ + int32x4_t __s0_550 = __p0_550; \ + int16x8_t __s1_550 = __p1_550; \ + int16x8_t __s2_550 = __p2_550; \ + int32x4_t __rev0_550; __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \ + int16x8_t __rev1_550; __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_550; __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_550; \ + __ret_550 = __rev0_550 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_550), __noswap_splat_laneq_s16(__rev2_550, __p3_550)); \ + __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \ + __ret_550; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \ + uint64x2_t __s0_551 = __p0_551; \ + uint32x2_t __s1_551 = __p1_551; \ + uint32x4_t __s2_551 = __p2_551; \ + uint64x2_t __ret_551; \ + __ret_551 = __s0_551 + vmull_u32(__s1_551, splat_laneq_u32(__s2_551, __p3_551)); \ + __ret_551; \ +}) +#else +#define vmlal_laneq_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \ + uint64x2_t __s0_552 = __p0_552; \ + uint32x2_t __s1_552 = __p1_552; \ + uint32x4_t __s2_552 = __p2_552; \ + uint64x2_t __rev0_552; __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \ + uint32x2_t __rev1_552; __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 1, 0); \ + uint32x4_t __rev2_552; __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 3, 2, 1, 0); \ + uint64x2_t __ret_552; \ + __ret_552 = __rev0_552 + __noswap_vmull_u32(__rev1_552, __noswap_splat_laneq_u32(__rev2_552, __p3_552)); \ + __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \ + __ret_552; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \ + uint32x4_t __s0_553 = __p0_553; \ + uint16x4_t __s1_553 = __p1_553; \ + uint16x8_t __s2_553 = __p2_553; \ + uint32x4_t __ret_553; \ + __ret_553 = __s0_553 + vmull_u16(__s1_553, splat_laneq_u16(__s2_553, __p3_553)); \ + __ret_553; \ +}) +#else +#define vmlal_laneq_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \ + uint32x4_t __s0_554 = __p0_554; \ + uint16x4_t __s1_554 = __p1_554; \ + uint16x8_t __s2_554 = __p2_554; \ + uint32x4_t __rev0_554; __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \ + uint16x4_t __rev1_554; __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 3, 2, 1, 0); \ + uint16x8_t __rev2_554; __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_554; \ + __ret_554 = __rev0_554 + __noswap_vmull_u16(__rev1_554, __noswap_splat_laneq_u16(__rev2_554, __p3_554)); \ + __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \ + __ret_554; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \ + int64x2_t __s0_555 = __p0_555; \ + int32x2_t __s1_555 = __p1_555; \ + int32x4_t __s2_555 = __p2_555; \ + int64x2_t __ret_555; \ + __ret_555 = __s0_555 + vmull_s32(__s1_555, splat_laneq_s32(__s2_555, __p3_555)); \ + __ret_555; \ +}) +#else +#define vmlal_laneq_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \ + int64x2_t __s0_556 = __p0_556; \ + int32x2_t __s1_556 = __p1_556; \ + int32x4_t __s2_556 = __p2_556; \ + int64x2_t __rev0_556; __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \ + int32x2_t __rev1_556; __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \ + int32x4_t __rev2_556; __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 3, 2, 1, 0); \ + int64x2_t __ret_556; \ + __ret_556 = __rev0_556 + __noswap_vmull_s32(__rev1_556, __noswap_splat_laneq_s32(__rev2_556, __p3_556)); \ + __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \ + __ret_556; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \ + int32x4_t __s0_557 = __p0_557; \ + int16x4_t __s1_557 = __p1_557; \ + int16x8_t __s2_557 = __p2_557; \ + int32x4_t __ret_557; \ + __ret_557 = __s0_557 + vmull_s16(__s1_557, splat_laneq_s16(__s2_557, __p3_557)); \ + __ret_557; \ +}) +#else +#define vmlal_laneq_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \ + int32x4_t __s0_558 = __p0_558; \ + int16x4_t __s1_558 = __p1_558; \ + int16x8_t __s2_558 = __p2_558; \ + int32x4_t __rev0_558; __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \ + int16x4_t __rev1_558; __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 3, 2, 1, 0); \ + int16x8_t __rev2_558; __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_558; \ + __ret_558 = __rev0_558 + __noswap_vmull_s16(__rev1_558, __noswap_splat_laneq_s16(__rev2_558, __p3_558)); \ + __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \ + __ret_558; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \ + uint32x4_t __s0_559 = __p0_559; \ + uint32x4_t __s1_559 = __p1_559; \ + uint32x4_t __s2_559 = __p2_559; \ + uint32x4_t __ret_559; \ + __ret_559 = __s0_559 - __s1_559 * splatq_laneq_u32(__s2_559, __p3_559); \ + __ret_559; \ +}) +#else +#define vmlsq_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \ + uint32x4_t __s0_560 = __p0_560; \ + uint32x4_t __s1_560 = __p1_560; \ + uint32x4_t __s2_560 = __p2_560; \ + uint32x4_t __rev0_560; __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 3, 2, 1, 0); \ + uint32x4_t __rev1_560; __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \ + uint32x4_t __rev2_560; __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \ + uint32x4_t __ret_560; \ + __ret_560 = __rev0_560 - __rev1_560 * __noswap_splatq_laneq_u32(__rev2_560, __p3_560); \ + __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 3, 2, 1, 0); \ + __ret_560; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \ + uint16x8_t __s0_561 = __p0_561; \ + uint16x8_t __s1_561 = __p1_561; \ + uint16x8_t __s2_561 = __p2_561; \ + uint16x8_t __ret_561; \ + __ret_561 = __s0_561 - __s1_561 * splatq_laneq_u16(__s2_561, __p3_561); \ + __ret_561; \ +}) +#else +#define vmlsq_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \ + uint16x8_t __s0_562 = __p0_562; \ + uint16x8_t __s1_562 = __p1_562; \ + uint16x8_t __s2_562 = __p2_562; \ + uint16x8_t __rev0_562; __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_562; __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_562; __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_562; \ + __ret_562 = __rev0_562 - __rev1_562 * __noswap_splatq_laneq_u16(__rev2_562, __p3_562); \ + __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_562; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_f32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \ + float32x4_t __s0_563 = __p0_563; \ + float32x4_t __s1_563 = __p1_563; \ + float32x4_t __s2_563 = __p2_563; \ + float32x4_t __ret_563; \ + __ret_563 = __s0_563 - __s1_563 * splatq_laneq_f32(__s2_563, __p3_563); \ + __ret_563; \ +}) +#else +#define vmlsq_laneq_f32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \ + float32x4_t __s0_564 = __p0_564; \ + float32x4_t __s1_564 = __p1_564; \ + float32x4_t __s2_564 = __p2_564; \ + float32x4_t __rev0_564; __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \ + float32x4_t __rev1_564; __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \ + float32x4_t __rev2_564; __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \ + float32x4_t __ret_564; \ + __ret_564 = __rev0_564 - __rev1_564 * __noswap_splatq_laneq_f32(__rev2_564, __p3_564); \ + __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \ + __ret_564; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s32(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \ + int32x4_t __s0_565 = __p0_565; \ + int32x4_t __s1_565 = __p1_565; \ + int32x4_t __s2_565 = __p2_565; \ + int32x4_t __ret_565; \ + __ret_565 = __s0_565 - __s1_565 * splatq_laneq_s32(__s2_565, __p3_565); \ + __ret_565; \ +}) +#else +#define vmlsq_laneq_s32(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \ + int32x4_t __s0_566 = __p0_566; \ + int32x4_t __s1_566 = __p1_566; \ + int32x4_t __s2_566 = __p2_566; \ + int32x4_t __rev0_566; __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \ + int32x4_t __rev1_566; __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 3, 2, 1, 0); \ + int32x4_t __rev2_566; __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 3, 2, 1, 0); \ + int32x4_t __ret_566; \ + __ret_566 = __rev0_566 - __rev1_566 * __noswap_splatq_laneq_s32(__rev2_566, __p3_566); \ + __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \ + __ret_566; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s16(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \ + int16x8_t __s0_567 = __p0_567; \ + int16x8_t __s1_567 = __p1_567; \ + int16x8_t __s2_567 = __p2_567; \ + int16x8_t __ret_567; \ + __ret_567 = __s0_567 - __s1_567 * splatq_laneq_s16(__s2_567, __p3_567); \ + __ret_567; \ +}) +#else +#define vmlsq_laneq_s16(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \ + int16x8_t __s0_568 = __p0_568; \ + int16x8_t __s1_568 = __p1_568; \ + int16x8_t __s2_568 = __p2_568; \ + int16x8_t __rev0_568; __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_568; __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_568; __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_568; \ + __ret_568 = __rev0_568 - __rev1_568 * __noswap_splatq_laneq_s16(__rev2_568, __p3_568); \ + __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_568; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u32(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \ + uint32x2_t __s0_569 = __p0_569; \ + uint32x2_t __s1_569 = __p1_569; \ + uint32x4_t __s2_569 = __p2_569; \ + uint32x2_t __ret_569; \ + __ret_569 = __s0_569 - __s1_569 * splat_laneq_u32(__s2_569, __p3_569); \ + __ret_569; \ +}) +#else +#define vmls_laneq_u32(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \ + uint32x2_t __s0_570 = __p0_570; \ + uint32x2_t __s1_570 = __p1_570; \ + uint32x4_t __s2_570 = __p2_570; \ + uint32x2_t __rev0_570; __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 1, 0); \ + uint32x2_t __rev1_570; __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 1, 0); \ + uint32x4_t __rev2_570; __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 3, 2, 1, 0); \ + uint32x2_t __ret_570; \ + __ret_570 = __rev0_570 - __rev1_570 * __noswap_splat_laneq_u32(__rev2_570, __p3_570); \ + __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 1, 0); \ + __ret_570; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u16(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \ + uint16x4_t __s0_571 = __p0_571; \ + uint16x4_t __s1_571 = __p1_571; \ + uint16x8_t __s2_571 = __p2_571; \ + uint16x4_t __ret_571; \ + __ret_571 = __s0_571 - __s1_571 * splat_laneq_u16(__s2_571, __p3_571); \ + __ret_571; \ +}) +#else +#define vmls_laneq_u16(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \ + uint16x4_t __s0_572 = __p0_572; \ + uint16x4_t __s1_572 = __p1_572; \ + uint16x8_t __s2_572 = __p2_572; \ + uint16x4_t __rev0_572; __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \ + uint16x4_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \ + uint16x8_t __rev2_572; __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret_572; \ + __ret_572 = __rev0_572 - __rev1_572 * __noswap_splat_laneq_u16(__rev2_572, __p3_572); \ + __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \ + __ret_572; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_f32(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \ + float32x2_t __s0_573 = __p0_573; \ + float32x2_t __s1_573 = __p1_573; \ + float32x4_t __s2_573 = __p2_573; \ + float32x2_t __ret_573; \ + __ret_573 = __s0_573 - __s1_573 * splat_laneq_f32(__s2_573, __p3_573); \ + __ret_573; \ +}) +#else +#define vmls_laneq_f32(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \ + float32x2_t __s0_574 = __p0_574; \ + float32x2_t __s1_574 = __p1_574; \ + float32x4_t __s2_574 = __p2_574; \ + float32x2_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 1, 0); \ + float32x2_t __rev1_574; __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 1, 0); \ + float32x4_t __rev2_574; __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 3, 2, 1, 0); \ + float32x2_t __ret_574; \ + __ret_574 = __rev0_574 - __rev1_574 * __noswap_splat_laneq_f32(__rev2_574, __p3_574); \ + __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 1, 0); \ + __ret_574; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \ + int32x2_t __s0_575 = __p0_575; \ + int32x2_t __s1_575 = __p1_575; \ + int32x4_t __s2_575 = __p2_575; \ + int32x2_t __ret_575; \ + __ret_575 = __s0_575 - __s1_575 * splat_laneq_s32(__s2_575, __p3_575); \ + __ret_575; \ +}) +#else +#define vmls_laneq_s32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \ + int32x2_t __s0_576 = __p0_576; \ + int32x2_t __s1_576 = __p1_576; \ + int32x4_t __s2_576 = __p2_576; \ + int32x2_t __rev0_576; __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \ + int32x2_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 1, 0); \ + int32x4_t __rev2_576; __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \ + int32x2_t __ret_576; \ + __ret_576 = __rev0_576 - __rev1_576 * __noswap_splat_laneq_s32(__rev2_576, __p3_576); \ + __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \ + __ret_576; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \ + int16x4_t __s0_577 = __p0_577; \ + int16x4_t __s1_577 = __p1_577; \ + int16x8_t __s2_577 = __p2_577; \ + int16x4_t __ret_577; \ + __ret_577 = __s0_577 - __s1_577 * splat_laneq_s16(__s2_577, __p3_577); \ + __ret_577; \ +}) +#else +#define vmls_laneq_s16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \ + int16x4_t __s0_578 = __p0_578; \ + int16x4_t __s1_578 = __p1_578; \ + int16x8_t __s2_578 = __p2_578; \ + int16x4_t __rev0_578; __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \ + int16x4_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 3, 2, 1, 0); \ + int16x8_t __rev2_578; __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_578; \ + __ret_578 = __rev0_578 - __rev1_578 * __noswap_splat_laneq_s16(__rev2_578, __p3_578); \ + __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \ + __ret_578; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \ + uint64x2_t __s0_579 = __p0_579; \ + uint32x4_t __s1_579 = __p1_579; \ + uint32x2_t __s2_579 = __p2_579; \ + uint64x2_t __ret_579; \ + __ret_579 = __s0_579 - vmull_u32(vget_high_u32(__s1_579), splat_lane_u32(__s2_579, __p3_579)); \ + __ret_579; \ +}) +#else +#define vmlsl_high_lane_u32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \ + uint64x2_t __s0_580 = __p0_580; \ + uint32x4_t __s1_580 = __p1_580; \ + uint32x2_t __s2_580 = __p2_580; \ + uint64x2_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \ + uint32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \ + uint32x2_t __rev2_580; __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 1, 0); \ + uint64x2_t __ret_580; \ + __ret_580 = __rev0_580 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_580), __noswap_splat_lane_u32(__rev2_580, __p3_580)); \ + __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \ + __ret_580; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u16(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \ + uint32x4_t __s0_581 = __p0_581; \ + uint16x8_t __s1_581 = __p1_581; \ + uint16x4_t __s2_581 = __p2_581; \ + uint32x4_t __ret_581; \ + __ret_581 = __s0_581 - vmull_u16(vget_high_u16(__s1_581), splat_lane_u16(__s2_581, __p3_581)); \ + __ret_581; \ +}) +#else +#define vmlsl_high_lane_u16(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \ + uint32x4_t __s0_582 = __p0_582; \ + uint16x8_t __s1_582 = __p1_582; \ + uint16x4_t __s2_582 = __p2_582; \ + uint32x4_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \ + uint16x8_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_582; __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \ + uint32x4_t __ret_582; \ + __ret_582 = __rev0_582 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_582), __noswap_splat_lane_u16(__rev2_582, __p3_582)); \ + __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \ + __ret_582; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s32(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \ + int64x2_t __s0_583 = __p0_583; \ + int32x4_t __s1_583 = __p1_583; \ + int32x2_t __s2_583 = __p2_583; \ + int64x2_t __ret_583; \ + __ret_583 = __s0_583 - vmull_s32(vget_high_s32(__s1_583), splat_lane_s32(__s2_583, __p3_583)); \ + __ret_583; \ +}) +#else +#define vmlsl_high_lane_s32(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \ + int64x2_t __s0_584 = __p0_584; \ + int32x4_t __s1_584 = __p1_584; \ + int32x2_t __s2_584 = __p2_584; \ + int64x2_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 1, 0); \ + int32x4_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 3, 2, 1, 0); \ + int32x2_t __rev2_584; __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 1, 0); \ + int64x2_t __ret_584; \ + __ret_584 = __rev0_584 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_584), __noswap_splat_lane_s32(__rev2_584, __p3_584)); \ + __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 1, 0); \ + __ret_584; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s16(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \ + int32x4_t __s0_585 = __p0_585; \ + int16x8_t __s1_585 = __p1_585; \ + int16x4_t __s2_585 = __p2_585; \ + int32x4_t __ret_585; \ + __ret_585 = __s0_585 - vmull_s16(vget_high_s16(__s1_585), splat_lane_s16(__s2_585, __p3_585)); \ + __ret_585; \ +}) +#else +#define vmlsl_high_lane_s16(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \ + int32x4_t __s0_586 = __p0_586; \ + int16x8_t __s1_586 = __p1_586; \ + int16x4_t __s2_586 = __p2_586; \ + int32x4_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \ + int16x8_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_586; __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \ + int32x4_t __ret_586; \ + __ret_586 = __rev0_586 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_586), __noswap_splat_lane_s16(__rev2_586, __p3_586)); \ + __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 3, 2, 1, 0); \ + __ret_586; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u32(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \ + uint64x2_t __s0_587 = __p0_587; \ + uint32x4_t __s1_587 = __p1_587; \ + uint32x4_t __s2_587 = __p2_587; \ + uint64x2_t __ret_587; \ + __ret_587 = __s0_587 - vmull_u32(vget_high_u32(__s1_587), splat_laneq_u32(__s2_587, __p3_587)); \ + __ret_587; \ +}) +#else +#define vmlsl_high_laneq_u32(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \ + uint64x2_t __s0_588 = __p0_588; \ + uint32x4_t __s1_588 = __p1_588; \ + uint32x4_t __s2_588 = __p2_588; \ + uint64x2_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \ + uint32x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \ + uint32x4_t __rev2_588; __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 3, 2, 1, 0); \ + uint64x2_t __ret_588; \ + __ret_588 = __rev0_588 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_588), __noswap_splat_laneq_u32(__rev2_588, __p3_588)); \ + __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \ + __ret_588; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u16(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \ + uint32x4_t __s0_589 = __p0_589; \ + uint16x8_t __s1_589 = __p1_589; \ + uint16x8_t __s2_589 = __p2_589; \ + uint32x4_t __ret_589; \ + __ret_589 = __s0_589 - vmull_u16(vget_high_u16(__s1_589), splat_laneq_u16(__s2_589, __p3_589)); \ + __ret_589; \ +}) +#else +#define vmlsl_high_laneq_u16(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \ + uint32x4_t __s0_590 = __p0_590; \ + uint16x8_t __s1_590 = __p1_590; \ + uint16x8_t __s2_590 = __p2_590; \ + uint32x4_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \ + uint16x8_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_590; __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_590; \ + __ret_590 = __rev0_590 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_590), __noswap_splat_laneq_u16(__rev2_590, __p3_590)); \ + __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \ + __ret_590; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \ + int64x2_t __s0_591 = __p0_591; \ + int32x4_t __s1_591 = __p1_591; \ + int32x4_t __s2_591 = __p2_591; \ + int64x2_t __ret_591; \ + __ret_591 = __s0_591 - vmull_s32(vget_high_s32(__s1_591), splat_laneq_s32(__s2_591, __p3_591)); \ + __ret_591; \ +}) +#else +#define vmlsl_high_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \ + int64x2_t __s0_592 = __p0_592; \ + int32x4_t __s1_592 = __p1_592; \ + int32x4_t __s2_592 = __p2_592; \ + int64x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ + int32x4_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \ + int32x4_t __rev2_592; __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \ + int64x2_t __ret_592; \ + __ret_592 = __rev0_592 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_592), __noswap_splat_laneq_s32(__rev2_592, __p3_592)); \ + __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ + __ret_592; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \ + int32x4_t __s0_593 = __p0_593; \ + int16x8_t __s1_593 = __p1_593; \ + int16x8_t __s2_593 = __p2_593; \ + int32x4_t __ret_593; \ + __ret_593 = __s0_593 - vmull_s16(vget_high_s16(__s1_593), splat_laneq_s16(__s2_593, __p3_593)); \ + __ret_593; \ +}) +#else +#define vmlsl_high_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \ + int32x4_t __s0_594 = __p0_594; \ + int16x8_t __s1_594 = __p1_594; \ + int16x8_t __s2_594 = __p2_594; \ + int32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ + int16x8_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_594; __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_594; \ + __ret_594 = __rev0_594 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_594), __noswap_splat_laneq_s16(__rev2_594, __p3_594)); \ + __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ + __ret_594; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \ + uint64x2_t __s0_595 = __p0_595; \ + uint32x2_t __s1_595 = __p1_595; \ + uint32x4_t __s2_595 = __p2_595; \ + uint64x2_t __ret_595; \ + __ret_595 = __s0_595 - vmull_u32(__s1_595, splat_laneq_u32(__s2_595, __p3_595)); \ + __ret_595; \ +}) +#else +#define vmlsl_laneq_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \ + uint64x2_t __s0_596 = __p0_596; \ + uint32x2_t __s1_596 = __p1_596; \ + uint32x4_t __s2_596 = __p2_596; \ + uint64x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ + uint32x2_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \ + uint32x4_t __rev2_596; __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 3, 2, 1, 0); \ + uint64x2_t __ret_596; \ + __ret_596 = __rev0_596 - __noswap_vmull_u32(__rev1_596, __noswap_splat_laneq_u32(__rev2_596, __p3_596)); \ + __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ + __ret_596; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \ + uint32x4_t __s0_597 = __p0_597; \ + uint16x4_t __s1_597 = __p1_597; \ + uint16x8_t __s2_597 = __p2_597; \ + uint32x4_t __ret_597; \ + __ret_597 = __s0_597 - vmull_u16(__s1_597, splat_laneq_u16(__s2_597, __p3_597)); \ + __ret_597; \ +}) +#else +#define vmlsl_laneq_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \ + uint32x4_t __s0_598 = __p0_598; \ + uint16x4_t __s1_598 = __p1_598; \ + uint16x8_t __s2_598 = __p2_598; \ + uint32x4_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \ + uint16x4_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \ + uint16x8_t __rev2_598; __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_598; \ + __ret_598 = __rev0_598 - __noswap_vmull_u16(__rev1_598, __noswap_splat_laneq_u16(__rev2_598, __p3_598)); \ + __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \ + __ret_598; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \ + int64x2_t __s0_599 = __p0_599; \ + int32x2_t __s1_599 = __p1_599; \ + int32x4_t __s2_599 = __p2_599; \ + int64x2_t __ret_599; \ + __ret_599 = __s0_599 - vmull_s32(__s1_599, splat_laneq_s32(__s2_599, __p3_599)); \ + __ret_599; \ +}) +#else +#define vmlsl_laneq_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \ + int64x2_t __s0_600 = __p0_600; \ + int32x2_t __s1_600 = __p1_600; \ + int32x4_t __s2_600 = __p2_600; \ + int64x2_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \ + int32x2_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 1, 0); \ + int32x4_t __rev2_600; __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 3, 2, 1, 0); \ + int64x2_t __ret_600; \ + __ret_600 = __rev0_600 - __noswap_vmull_s32(__rev1_600, __noswap_splat_laneq_s32(__rev2_600, __p3_600)); \ + __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \ + __ret_600; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \ + int32x4_t __s0_601 = __p0_601; \ + int16x4_t __s1_601 = __p1_601; \ + int16x8_t __s2_601 = __p2_601; \ + int32x4_t __ret_601; \ + __ret_601 = __s0_601 - vmull_s16(__s1_601, splat_laneq_s16(__s2_601, __p3_601)); \ + __ret_601; \ +}) +#else +#define vmlsl_laneq_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \ + int32x4_t __s0_602 = __p0_602; \ + int16x4_t __s1_602 = __p1_602; \ + int16x8_t __s2_602 = __p2_602; \ + int32x4_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \ + int16x4_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \ + int16x8_t __rev2_602; __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_602; \ + __ret_602 = __rev0_602 - __noswap_vmull_s16(__rev1_602, __noswap_splat_laneq_s16(__rev2_602, __p3_602)); \ + __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \ + __ret_602; \ +}) +#endif + +__ai poly64x1_t vmov_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmov_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_603) { + uint16x8_t __ret_603; + uint8x8_t __a1_603 = vget_high_u8(__p0_603); + __ret_603 = (uint16x8_t)(vshll_n_u8(__a1_603, 0)); + return __ret_603; +} +#else +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_604) { + uint8x16_t __rev0_604; __rev0_604 = __builtin_shufflevector(__p0_604, __p0_604, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret_604; + uint8x8_t __a1_604 = __noswap_vget_high_u8(__rev0_604); + __ret_604 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_604, 0)); + __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_604; +} +__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_605) { + uint16x8_t __ret_605; + uint8x8_t __a1_605 = __noswap_vget_high_u8(__p0_605); + __ret_605 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_605, 0)); + return __ret_605; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_606) { + uint64x2_t __ret_606; + uint32x2_t __a1_606 = vget_high_u32(__p0_606); + __ret_606 = (uint64x2_t)(vshll_n_u32(__a1_606, 0)); + return __ret_606; +} +#else +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_607) { + uint32x4_t __rev0_607; __rev0_607 = __builtin_shufflevector(__p0_607, __p0_607, 3, 2, 1, 0); + uint64x2_t __ret_607; + uint32x2_t __a1_607 = __noswap_vget_high_u32(__rev0_607); + __ret_607 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_607, 0)); + __ret_607 = __builtin_shufflevector(__ret_607, __ret_607, 1, 0); + return __ret_607; +} +__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_608) { + uint64x2_t __ret_608; + uint32x2_t __a1_608 = __noswap_vget_high_u32(__p0_608); + __ret_608 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_608, 0)); + return __ret_608; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_609) { + uint32x4_t __ret_609; + uint16x4_t __a1_609 = vget_high_u16(__p0_609); + __ret_609 = (uint32x4_t)(vshll_n_u16(__a1_609, 0)); + return __ret_609; +} +#else +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_610) { + uint16x8_t __rev0_610; __rev0_610 = __builtin_shufflevector(__p0_610, __p0_610, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret_610; + uint16x4_t __a1_610 = __noswap_vget_high_u16(__rev0_610); + __ret_610 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_610, 0)); + __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); + return __ret_610; +} +__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_611) { + uint32x4_t __ret_611; + uint16x4_t __a1_611 = __noswap_vget_high_u16(__p0_611); + __ret_611 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_611, 0)); + return __ret_611; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_612) { + int16x8_t __ret_612; + int8x8_t __a1_612 = vget_high_s8(__p0_612); + __ret_612 = (int16x8_t)(vshll_n_s8(__a1_612, 0)); + return __ret_612; +} +#else +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_613) { + int8x16_t __rev0_613; __rev0_613 = __builtin_shufflevector(__p0_613, __p0_613, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret_613; + int8x8_t __a1_613 = __noswap_vget_high_s8(__rev0_613); + __ret_613 = (int16x8_t)(__noswap_vshll_n_s8(__a1_613, 0)); + __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_613; +} +__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_614) { + int16x8_t __ret_614; + int8x8_t __a1_614 = __noswap_vget_high_s8(__p0_614); + __ret_614 = (int16x8_t)(__noswap_vshll_n_s8(__a1_614, 0)); + return __ret_614; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_615) { + int64x2_t __ret_615; + int32x2_t __a1_615 = vget_high_s32(__p0_615); + __ret_615 = (int64x2_t)(vshll_n_s32(__a1_615, 0)); + return __ret_615; +} +#else +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_616) { + int32x4_t __rev0_616; __rev0_616 = __builtin_shufflevector(__p0_616, __p0_616, 3, 2, 1, 0); + int64x2_t __ret_616; + int32x2_t __a1_616 = __noswap_vget_high_s32(__rev0_616); + __ret_616 = (int64x2_t)(__noswap_vshll_n_s32(__a1_616, 0)); + __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); + return __ret_616; +} +__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_617) { + int64x2_t __ret_617; + int32x2_t __a1_617 = __noswap_vget_high_s32(__p0_617); + __ret_617 = (int64x2_t)(__noswap_vshll_n_s32(__a1_617, 0)); + return __ret_617; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_618) { + int32x4_t __ret_618; + int16x4_t __a1_618 = vget_high_s16(__p0_618); + __ret_618 = (int32x4_t)(vshll_n_s16(__a1_618, 0)); + return __ret_618; +} +#else +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_619) { + int16x8_t __rev0_619; __rev0_619 = __builtin_shufflevector(__p0_619, __p0_619, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret_619; + int16x4_t __a1_619 = __noswap_vget_high_s16(__rev0_619); + __ret_619 = (int32x4_t)(__noswap_vshll_n_s16(__a1_619, 0)); + __ret_619 = __builtin_shufflevector(__ret_619, __ret_619, 3, 2, 1, 0); + return __ret_619; +} +__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_620) { + int32x4_t __ret_620; + int16x4_t __a1_620 = __noswap_vget_high_s16(__p0_620); + __ret_620 = (int32x4_t)(__noswap_vshll_n_s16(__a1_620, 0)); + return __ret_620; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vmovn_u32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vmovn_u64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vmovn_u16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vmovn_s32(__p1)); + return __ret; +} +#else +__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vmovn_s64(__p1)); + return __ret; +} +#else +__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vmovn_s16(__p1)); + return __ret; +} +#else +__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#define vmuld_lane_f64(__p0_621, __p1_621, __p2_621) __extension__ ({ \ + float64_t __s0_621 = __p0_621; \ + float64x1_t __s1_621 = __p1_621; \ + float64_t __ret_621; \ + __ret_621 = __s0_621 * vget_lane_f64(__s1_621, __p2_621); \ + __ret_621; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmuls_lane_f32(__p0_622, __p1_622, __p2_622) __extension__ ({ \ + float32_t __s0_622 = __p0_622; \ + float32x2_t __s1_622 = __p1_622; \ + float32_t __ret_622; \ + __ret_622 = __s0_622 * vget_lane_f32(__s1_622, __p2_622); \ + __ret_622; \ +}) +#else +#define vmuls_lane_f32(__p0_623, __p1_623, __p2_623) __extension__ ({ \ + float32_t __s0_623 = __p0_623; \ + float32x2_t __s1_623 = __p1_623; \ + float32x2_t __rev1_623; __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \ + float32_t __ret_623; \ + __ret_623 = __s0_623 * __noswap_vget_lane_f32(__rev1_623, __p2_623); \ + __ret_623; \ +}) +#endif + +#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f64(__p0_624, __p1_624, __p2_624) __extension__ ({ \ + float64x2_t __s0_624 = __p0_624; \ + float64x1_t __s1_624 = __p1_624; \ + float64x2_t __ret_624; \ + __ret_624 = __s0_624 * splatq_lane_f64(__s1_624, __p2_624); \ + __ret_624; \ +}) +#else +#define vmulq_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \ + float64x2_t __s0_625 = __p0_625; \ + float64x1_t __s1_625 = __p1_625; \ + float64x2_t __rev0_625; __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 1, 0); \ + float64x2_t __ret_625; \ + __ret_625 = __rev0_625 * __noswap_splatq_lane_f64(__s1_625, __p2_625); \ + __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 1, 0); \ + __ret_625; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuld_laneq_f64(__p0_626, __p1_626, __p2_626) __extension__ ({ \ + float64_t __s0_626 = __p0_626; \ + float64x2_t __s1_626 = __p1_626; \ + float64_t __ret_626; \ + __ret_626 = __s0_626 * vgetq_lane_f64(__s1_626, __p2_626); \ + __ret_626; \ +}) +#else +#define vmuld_laneq_f64(__p0_627, __p1_627, __p2_627) __extension__ ({ \ + float64_t __s0_627 = __p0_627; \ + float64x2_t __s1_627 = __p1_627; \ + float64x2_t __rev1_627; __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 1, 0); \ + float64_t __ret_627; \ + __ret_627 = __s0_627 * __noswap_vgetq_lane_f64(__rev1_627, __p2_627); \ + __ret_627; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuls_laneq_f32(__p0_628, __p1_628, __p2_628) __extension__ ({ \ + float32_t __s0_628 = __p0_628; \ + float32x4_t __s1_628 = __p1_628; \ + float32_t __ret_628; \ + __ret_628 = __s0_628 * vgetq_lane_f32(__s1_628, __p2_628); \ + __ret_628; \ +}) +#else +#define vmuls_laneq_f32(__p0_629, __p1_629, __p2_629) __extension__ ({ \ + float32_t __s0_629 = __p0_629; \ + float32x4_t __s1_629 = __p1_629; \ + float32x4_t __rev1_629; __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \ + float32_t __ret_629; \ + __ret_629 = __s0_629 * __noswap_vgetq_lane_f32(__rev1_629, __p2_629); \ + __ret_629; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u32(__p0_630, __p1_630, __p2_630) __extension__ ({ \ + uint32x4_t __s0_630 = __p0_630; \ + uint32x4_t __s1_630 = __p1_630; \ + uint32x4_t __ret_630; \ + __ret_630 = __s0_630 * splatq_laneq_u32(__s1_630, __p2_630); \ + __ret_630; \ +}) +#else +#define vmulq_laneq_u32(__p0_631, __p1_631, __p2_631) __extension__ ({ \ + uint32x4_t __s0_631 = __p0_631; \ + uint32x4_t __s1_631 = __p1_631; \ + uint32x4_t __rev0_631; __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \ + uint32x4_t __rev1_631; __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 3, 2, 1, 0); \ + uint32x4_t __ret_631; \ + __ret_631 = __rev0_631 * __noswap_splatq_laneq_u32(__rev1_631, __p2_631); \ + __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \ + __ret_631; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u16(__p0_632, __p1_632, __p2_632) __extension__ ({ \ + uint16x8_t __s0_632 = __p0_632; \ + uint16x8_t __s1_632 = __p1_632; \ + uint16x8_t __ret_632; \ + __ret_632 = __s0_632 * splatq_laneq_u16(__s1_632, __p2_632); \ + __ret_632; \ +}) +#else +#define vmulq_laneq_u16(__p0_633, __p1_633, __p2_633) __extension__ ({ \ + uint16x8_t __s0_633 = __p0_633; \ + uint16x8_t __s1_633 = __p1_633; \ + uint16x8_t __rev0_633; __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_633; __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_633; \ + __ret_633 = __rev0_633 * __noswap_splatq_laneq_u16(__rev1_633, __p2_633); \ + __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_633; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \ + float64x2_t __s0_634 = __p0_634; \ + float64x2_t __s1_634 = __p1_634; \ + float64x2_t __ret_634; \ + __ret_634 = __s0_634 * splatq_laneq_f64(__s1_634, __p2_634); \ + __ret_634; \ +}) +#else +#define vmulq_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \ + float64x2_t __s0_635 = __p0_635; \ + float64x2_t __s1_635 = __p1_635; \ + float64x2_t __rev0_635; __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 1, 0); \ + float64x2_t __rev1_635; __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 1, 0); \ + float64x2_t __ret_635; \ + __ret_635 = __rev0_635 * __noswap_splatq_laneq_f64(__rev1_635, __p2_635); \ + __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 1, 0); \ + __ret_635; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \ + float32x4_t __s0_636 = __p0_636; \ + float32x4_t __s1_636 = __p1_636; \ + float32x4_t __ret_636; \ + __ret_636 = __s0_636 * splatq_laneq_f32(__s1_636, __p2_636); \ + __ret_636; \ +}) +#else +#define vmulq_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \ + float32x4_t __s0_637 = __p0_637; \ + float32x4_t __s1_637 = __p1_637; \ + float32x4_t __rev0_637; __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 3, 2, 1, 0); \ + float32x4_t __rev1_637; __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 3, 2, 1, 0); \ + float32x4_t __ret_637; \ + __ret_637 = __rev0_637 * __noswap_splatq_laneq_f32(__rev1_637, __p2_637); \ + __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 3, 2, 1, 0); \ + __ret_637; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s32(__p0_638, __p1_638, __p2_638) __extension__ ({ \ + int32x4_t __s0_638 = __p0_638; \ + int32x4_t __s1_638 = __p1_638; \ + int32x4_t __ret_638; \ + __ret_638 = __s0_638 * splatq_laneq_s32(__s1_638, __p2_638); \ + __ret_638; \ +}) +#else +#define vmulq_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ + int32x4_t __s0_639 = __p0_639; \ + int32x4_t __s1_639 = __p1_639; \ + int32x4_t __rev0_639; __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \ + int32x4_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \ + int32x4_t __ret_639; \ + __ret_639 = __rev0_639 * __noswap_splatq_laneq_s32(__rev1_639, __p2_639); \ + __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \ + __ret_639; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s16(__p0_640, __p1_640, __p2_640) __extension__ ({ \ + int16x8_t __s0_640 = __p0_640; \ + int16x8_t __s1_640 = __p1_640; \ + int16x8_t __ret_640; \ + __ret_640 = __s0_640 * splatq_laneq_s16(__s1_640, __p2_640); \ + __ret_640; \ +}) +#else +#define vmulq_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \ + int16x8_t __s0_641 = __p0_641; \ + int16x8_t __s1_641 = __p1_641; \ + int16x8_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_641; __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_641; \ + __ret_641 = __rev0_641 * __noswap_splatq_laneq_s16(__rev1_641, __p2_641); \ + __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_641; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u32(__p0_642, __p1_642, __p2_642) __extension__ ({ \ + uint32x2_t __s0_642 = __p0_642; \ + uint32x4_t __s1_642 = __p1_642; \ + uint32x2_t __ret_642; \ + __ret_642 = __s0_642 * splat_laneq_u32(__s1_642, __p2_642); \ + __ret_642; \ +}) +#else +#define vmul_laneq_u32(__p0_643, __p1_643, __p2_643) __extension__ ({ \ + uint32x2_t __s0_643 = __p0_643; \ + uint32x4_t __s1_643 = __p1_643; \ + uint32x2_t __rev0_643; __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 1, 0); \ + uint32x4_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 3, 2, 1, 0); \ + uint32x2_t __ret_643; \ + __ret_643 = __rev0_643 * __noswap_splat_laneq_u32(__rev1_643, __p2_643); \ + __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 1, 0); \ + __ret_643; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u16(__p0_644, __p1_644, __p2_644) __extension__ ({ \ + uint16x4_t __s0_644 = __p0_644; \ + uint16x8_t __s1_644 = __p1_644; \ + uint16x4_t __ret_644; \ + __ret_644 = __s0_644 * splat_laneq_u16(__s1_644, __p2_644); \ + __ret_644; \ +}) +#else +#define vmul_laneq_u16(__p0_645, __p1_645, __p2_645) __extension__ ({ \ + uint16x4_t __s0_645 = __p0_645; \ + uint16x8_t __s1_645 = __p1_645; \ + uint16x4_t __rev0_645; __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 3, 2, 1, 0); \ + uint16x8_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret_645; \ + __ret_645 = __rev0_645 * __noswap_splat_laneq_u16(__rev1_645, __p2_645); \ + __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 3, 2, 1, 0); \ + __ret_645; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f32(__p0_646, __p1_646, __p2_646) __extension__ ({ \ + float32x2_t __s0_646 = __p0_646; \ + float32x4_t __s1_646 = __p1_646; \ + float32x2_t __ret_646; \ + __ret_646 = __s0_646 * splat_laneq_f32(__s1_646, __p2_646); \ + __ret_646; \ +}) +#else +#define vmul_laneq_f32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ + float32x2_t __s0_647 = __p0_647; \ + float32x4_t __s1_647 = __p1_647; \ + float32x2_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 1, 0); \ + float32x4_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \ + float32x2_t __ret_647; \ + __ret_647 = __rev0_647 * __noswap_splat_laneq_f32(__rev1_647, __p2_647); \ + __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 1, 0); \ + __ret_647; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \ + int32x2_t __s0_648 = __p0_648; \ + int32x4_t __s1_648 = __p1_648; \ + int32x2_t __ret_648; \ + __ret_648 = __s0_648 * splat_laneq_s32(__s1_648, __p2_648); \ + __ret_648; \ +}) +#else +#define vmul_laneq_s32(__p0_649, __p1_649, __p2_649) __extension__ ({ \ + int32x2_t __s0_649 = __p0_649; \ + int32x4_t __s1_649 = __p1_649; \ + int32x2_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \ + int32x4_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 3, 2, 1, 0); \ + int32x2_t __ret_649; \ + __ret_649 = __rev0_649 * __noswap_splat_laneq_s32(__rev1_649, __p2_649); \ + __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \ + __ret_649; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \ + int16x4_t __s0_650 = __p0_650; \ + int16x8_t __s1_650 = __p1_650; \ + int16x4_t __ret_650; \ + __ret_650 = __s0_650 * splat_laneq_s16(__s1_650, __p2_650); \ + __ret_650; \ +}) +#else +#define vmul_laneq_s16(__p0_651, __p1_651, __p2_651) __extension__ ({ \ + int16x4_t __s0_651 = __p0_651; \ + int16x8_t __s1_651 = __p1_651; \ + int16x4_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \ + int16x8_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_651; \ + __ret_651 = __rev0_651 * __noswap_splat_laneq_s16(__rev1_651, __p2_651); \ + __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \ + __ret_651; \ +}) +#endif + +__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + __ret = __p0 * (float64x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = __rev0 * (float64x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); + return __ret; +} +#else +__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly128_t __ret; + __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ + uint32x4_t __s0_652 = __p0_652; \ + uint32x2_t __s1_652 = __p1_652; \ + uint64x2_t __ret_652; \ + __ret_652 = vmull_u32(vget_high_u32(__s0_652), splat_lane_u32(__s1_652, __p2_652)); \ + __ret_652; \ +}) +#else +#define vmull_high_lane_u32(__p0_653, __p1_653, __p2_653) __extension__ ({ \ + uint32x4_t __s0_653 = __p0_653; \ + uint32x2_t __s1_653 = __p1_653; \ + uint32x4_t __rev0_653; __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \ + uint32x2_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \ + uint64x2_t __ret_653; \ + __ret_653 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_653), __noswap_splat_lane_u32(__rev1_653, __p2_653)); \ + __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 1, 0); \ + __ret_653; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ + uint16x8_t __s0_654 = __p0_654; \ + uint16x4_t __s1_654 = __p1_654; \ + uint32x4_t __ret_654; \ + __ret_654 = vmull_u16(vget_high_u16(__s0_654), splat_lane_u16(__s1_654, __p2_654)); \ + __ret_654; \ +}) +#else +#define vmull_high_lane_u16(__p0_655, __p1_655, __p2_655) __extension__ ({ \ + uint16x8_t __s0_655 = __p0_655; \ + uint16x4_t __s1_655 = __p1_655; \ + uint16x8_t __rev0_655; __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \ + uint32x4_t __ret_655; \ + __ret_655 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_655), __noswap_splat_lane_u16(__rev1_655, __p2_655)); \ + __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \ + __ret_655; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ + int32x4_t __s0_656 = __p0_656; \ + int32x2_t __s1_656 = __p1_656; \ + int64x2_t __ret_656; \ + __ret_656 = vmull_s32(vget_high_s32(__s0_656), splat_lane_s32(__s1_656, __p2_656)); \ + __ret_656; \ +}) +#else +#define vmull_high_lane_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \ + int32x4_t __s0_657 = __p0_657; \ + int32x2_t __s1_657 = __p1_657; \ + int32x4_t __rev0_657; __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 3, 2, 1, 0); \ + int32x2_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 1, 0); \ + int64x2_t __ret_657; \ + __ret_657 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_657), __noswap_splat_lane_s32(__rev1_657, __p2_657)); \ + __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 1, 0); \ + __ret_657; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ + int16x8_t __s0_658 = __p0_658; \ + int16x4_t __s1_658 = __p1_658; \ + int32x4_t __ret_658; \ + __ret_658 = vmull_s16(vget_high_s16(__s0_658), splat_lane_s16(__s1_658, __p2_658)); \ + __ret_658; \ +}) +#else +#define vmull_high_lane_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \ + int16x8_t __s0_659 = __p0_659; \ + int16x4_t __s1_659 = __p1_659; \ + int16x8_t __rev0_659; __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \ + int32x4_t __ret_659; \ + __ret_659 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_659), __noswap_splat_lane_s16(__rev1_659, __p2_659)); \ + __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 3, 2, 1, 0); \ + __ret_659; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ + uint32x4_t __s0_660 = __p0_660; \ + uint32x4_t __s1_660 = __p1_660; \ + uint64x2_t __ret_660; \ + __ret_660 = vmull_u32(vget_high_u32(__s0_660), splat_laneq_u32(__s1_660, __p2_660)); \ + __ret_660; \ +}) +#else +#define vmull_high_laneq_u32(__p0_661, __p1_661, __p2_661) __extension__ ({ \ + uint32x4_t __s0_661 = __p0_661; \ + uint32x4_t __s1_661 = __p1_661; \ + uint32x4_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \ + uint32x4_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 3, 2, 1, 0); \ + uint64x2_t __ret_661; \ + __ret_661 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_661), __noswap_splat_laneq_u32(__rev1_661, __p2_661)); \ + __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \ + __ret_661; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ + uint16x8_t __s0_662 = __p0_662; \ + uint16x8_t __s1_662 = __p1_662; \ + uint32x4_t __ret_662; \ + __ret_662 = vmull_u16(vget_high_u16(__s0_662), splat_laneq_u16(__s1_662, __p2_662)); \ + __ret_662; \ +}) +#else +#define vmull_high_laneq_u16(__p0_663, __p1_663, __p2_663) __extension__ ({ \ + uint16x8_t __s0_663 = __p0_663; \ + uint16x8_t __s1_663 = __p1_663; \ + uint16x8_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_663; \ + __ret_663 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_663), __noswap_splat_laneq_u16(__rev1_663, __p2_663)); \ + __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \ + __ret_663; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ + int32x4_t __s0_664 = __p0_664; \ + int32x4_t __s1_664 = __p1_664; \ + int64x2_t __ret_664; \ + __ret_664 = vmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \ + __ret_664; \ +}) +#else +#define vmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \ + int32x4_t __s0_665 = __p0_665; \ + int32x4_t __s1_665 = __p1_665; \ + int32x4_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \ + int32x4_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \ + int64x2_t __ret_665; \ + __ret_665 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \ + __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \ + __ret_665; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ + int16x8_t __s0_666 = __p0_666; \ + int16x8_t __s1_666 = __p1_666; \ + int32x4_t __ret_666; \ + __ret_666 = vmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \ + __ret_666; \ +}) +#else +#define vmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \ + int16x8_t __s0_667 = __p0_667; \ + int16x8_t __s1_667 = __p1_667; \ + int16x8_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_667; \ + __ret_667 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \ + __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \ + __ret_667; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_n_u32(vget_high_u32(__p0), __p1); + return __ret; +} +#else +__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_n_u16(vget_high_u16(__p0), __p1); + return __ret; +} +#else +__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ + uint32x2_t __s0_668 = __p0_668; \ + uint32x4_t __s1_668 = __p1_668; \ + uint64x2_t __ret_668; \ + __ret_668 = vmull_u32(__s0_668, splat_laneq_u32(__s1_668, __p2_668)); \ + __ret_668; \ +}) +#else +#define vmull_laneq_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \ + uint32x2_t __s0_669 = __p0_669; \ + uint32x4_t __s1_669 = __p1_669; \ + uint32x2_t __rev0_669; __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 1, 0); \ + uint32x4_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 3, 2, 1, 0); \ + uint64x2_t __ret_669; \ + __ret_669 = __noswap_vmull_u32(__rev0_669, __noswap_splat_laneq_u32(__rev1_669, __p2_669)); \ + __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \ + __ret_669; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \ + uint16x4_t __s0_670 = __p0_670; \ + uint16x8_t __s1_670 = __p1_670; \ + uint32x4_t __ret_670; \ + __ret_670 = vmull_u16(__s0_670, splat_laneq_u16(__s1_670, __p2_670)); \ + __ret_670; \ +}) +#else +#define vmull_laneq_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ + uint16x4_t __s0_671 = __p0_671; \ + uint16x8_t __s1_671 = __p1_671; \ + uint16x4_t __rev0_671; __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 3, 2, 1, 0); \ + uint16x8_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_671; \ + __ret_671 = __noswap_vmull_u16(__rev0_671, __noswap_splat_laneq_u16(__rev1_671, __p2_671)); \ + __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \ + __ret_671; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \ + int32x2_t __s0_672 = __p0_672; \ + int32x4_t __s1_672 = __p1_672; \ + int64x2_t __ret_672; \ + __ret_672 = vmull_s32(__s0_672, splat_laneq_s32(__s1_672, __p2_672)); \ + __ret_672; \ +}) +#else +#define vmull_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ + int32x2_t __s0_673 = __p0_673; \ + int32x4_t __s1_673 = __p1_673; \ + int32x2_t __rev0_673; __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 1, 0); \ + int32x4_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \ + int64x2_t __ret_673; \ + __ret_673 = __noswap_vmull_s32(__rev0_673, __noswap_splat_laneq_s32(__rev1_673, __p2_673)); \ + __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \ + __ret_673; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \ + int16x4_t __s0_674 = __p0_674; \ + int16x8_t __s1_674 = __p1_674; \ + int32x4_t __ret_674; \ + __ret_674 = vmull_s16(__s0_674, splat_laneq_s16(__s1_674, __p2_674)); \ + __ret_674; \ +}) +#else +#define vmull_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \ + int16x4_t __s0_675 = __p0_675; \ + int16x8_t __s1_675 = __p1_675; \ + int16x4_t __rev0_675; __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 3, 2, 1, 0); \ + int16x8_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_675; \ + __ret_675 = __noswap_vmull_s16(__rev0_675, __noswap_splat_laneq_s16(__rev1_675, __p2_675)); \ + __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \ + __ret_675; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#endif + +__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#endif + +__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +#define vmulxd_lane_f64(__p0_676, __p1_676, __p2_676) __extension__ ({ \ + float64_t __s0_676 = __p0_676; \ + float64x1_t __s1_676 = __p1_676; \ + float64_t __ret_676; \ + __ret_676 = vmulxd_f64(__s0_676, vget_lane_f64(__s1_676, __p2_676)); \ + __ret_676; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_lane_f32(__p0_677, __p1_677, __p2_677) __extension__ ({ \ + float32_t __s0_677 = __p0_677; \ + float32x2_t __s1_677 = __p1_677; \ + float32_t __ret_677; \ + __ret_677 = vmulxs_f32(__s0_677, vget_lane_f32(__s1_677, __p2_677)); \ + __ret_677; \ +}) +#else +#define vmulxs_lane_f32(__p0_678, __p1_678, __p2_678) __extension__ ({ \ + float32_t __s0_678 = __p0_678; \ + float32x2_t __s1_678 = __p1_678; \ + float32x2_t __rev1_678; __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \ + float32_t __ret_678; \ + __ret_678 = vmulxs_f32(__s0_678, __noswap_vget_lane_f32(__rev1_678, __p2_678)); \ + __ret_678; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f64(__p0_679, __p1_679, __p2_679) __extension__ ({ \ + float64x2_t __s0_679 = __p0_679; \ + float64x1_t __s1_679 = __p1_679; \ + float64x2_t __ret_679; \ + __ret_679 = vmulxq_f64(__s0_679, splatq_lane_f64(__s1_679, __p2_679)); \ + __ret_679; \ +}) +#else +#define vmulxq_lane_f64(__p0_680, __p1_680, __p2_680) __extension__ ({ \ + float64x2_t __s0_680 = __p0_680; \ + float64x1_t __s1_680 = __p1_680; \ + float64x2_t __rev0_680; __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 1, 0); \ + float64x2_t __ret_680; \ + __ret_680 = __noswap_vmulxq_f64(__rev0_680, __noswap_splatq_lane_f64(__s1_680, __p2_680)); \ + __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 1, 0); \ + __ret_680; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f32(__p0_681, __p1_681, __p2_681) __extension__ ({ \ + float32x4_t __s0_681 = __p0_681; \ + float32x2_t __s1_681 = __p1_681; \ + float32x4_t __ret_681; \ + __ret_681 = vmulxq_f32(__s0_681, splatq_lane_f32(__s1_681, __p2_681)); \ + __ret_681; \ +}) +#else +#define vmulxq_lane_f32(__p0_682, __p1_682, __p2_682) __extension__ ({ \ + float32x4_t __s0_682 = __p0_682; \ + float32x2_t __s1_682 = __p1_682; \ + float32x4_t __rev0_682; __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \ + float32x2_t __rev1_682; __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \ + float32x4_t __ret_682; \ + __ret_682 = __noswap_vmulxq_f32(__rev0_682, __noswap_splatq_lane_f32(__rev1_682, __p2_682)); \ + __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \ + __ret_682; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f32(__p0_683, __p1_683, __p2_683) __extension__ ({ \ + float32x2_t __s0_683 = __p0_683; \ + float32x2_t __s1_683 = __p1_683; \ + float32x2_t __ret_683; \ + __ret_683 = vmulx_f32(__s0_683, splat_lane_f32(__s1_683, __p2_683)); \ + __ret_683; \ +}) +#else +#define vmulx_lane_f32(__p0_684, __p1_684, __p2_684) __extension__ ({ \ + float32x2_t __s0_684 = __p0_684; \ + float32x2_t __s1_684 = __p1_684; \ + float32x2_t __rev0_684; __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \ + float32x2_t __rev1_684; __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \ + float32x2_t __ret_684; \ + __ret_684 = __noswap_vmulx_f32(__rev0_684, __noswap_splat_lane_f32(__rev1_684, __p2_684)); \ + __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 1, 0); \ + __ret_684; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxd_laneq_f64(__p0_685, __p1_685, __p2_685) __extension__ ({ \ + float64_t __s0_685 = __p0_685; \ + float64x2_t __s1_685 = __p1_685; \ + float64_t __ret_685; \ + __ret_685 = vmulxd_f64(__s0_685, vgetq_lane_f64(__s1_685, __p2_685)); \ + __ret_685; \ +}) +#else +#define vmulxd_laneq_f64(__p0_686, __p1_686, __p2_686) __extension__ ({ \ + float64_t __s0_686 = __p0_686; \ + float64x2_t __s1_686 = __p1_686; \ + float64x2_t __rev1_686; __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 1, 0); \ + float64_t __ret_686; \ + __ret_686 = vmulxd_f64(__s0_686, __noswap_vgetq_lane_f64(__rev1_686, __p2_686)); \ + __ret_686; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_laneq_f32(__p0_687, __p1_687, __p2_687) __extension__ ({ \ + float32_t __s0_687 = __p0_687; \ + float32x4_t __s1_687 = __p1_687; \ + float32_t __ret_687; \ + __ret_687 = vmulxs_f32(__s0_687, vgetq_lane_f32(__s1_687, __p2_687)); \ + __ret_687; \ +}) +#else +#define vmulxs_laneq_f32(__p0_688, __p1_688, __p2_688) __extension__ ({ \ + float32_t __s0_688 = __p0_688; \ + float32x4_t __s1_688 = __p1_688; \ + float32x4_t __rev1_688; __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \ + float32_t __ret_688; \ + __ret_688 = vmulxs_f32(__s0_688, __noswap_vgetq_lane_f32(__rev1_688, __p2_688)); \ + __ret_688; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f64(__p0_689, __p1_689, __p2_689) __extension__ ({ \ + float64x2_t __s0_689 = __p0_689; \ + float64x2_t __s1_689 = __p1_689; \ + float64x2_t __ret_689; \ + __ret_689 = vmulxq_f64(__s0_689, splatq_laneq_f64(__s1_689, __p2_689)); \ + __ret_689; \ +}) +#else +#define vmulxq_laneq_f64(__p0_690, __p1_690, __p2_690) __extension__ ({ \ + float64x2_t __s0_690 = __p0_690; \ + float64x2_t __s1_690 = __p1_690; \ + float64x2_t __rev0_690; __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \ + float64x2_t __rev1_690; __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \ + float64x2_t __ret_690; \ + __ret_690 = __noswap_vmulxq_f64(__rev0_690, __noswap_splatq_laneq_f64(__rev1_690, __p2_690)); \ + __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 1, 0); \ + __ret_690; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f32(__p0_691, __p1_691, __p2_691) __extension__ ({ \ + float32x4_t __s0_691 = __p0_691; \ + float32x4_t __s1_691 = __p1_691; \ + float32x4_t __ret_691; \ + __ret_691 = vmulxq_f32(__s0_691, splatq_laneq_f32(__s1_691, __p2_691)); \ + __ret_691; \ +}) +#else +#define vmulxq_laneq_f32(__p0_692, __p1_692, __p2_692) __extension__ ({ \ + float32x4_t __s0_692 = __p0_692; \ + float32x4_t __s1_692 = __p1_692; \ + float32x4_t __rev0_692; __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \ + float32x4_t __rev1_692; __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \ + float32x4_t __ret_692; \ + __ret_692 = __noswap_vmulxq_f32(__rev0_692, __noswap_splatq_laneq_f32(__rev1_692, __p2_692)); \ + __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 3, 2, 1, 0); \ + __ret_692; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \ + float32x2_t __s0_693 = __p0_693; \ + float32x4_t __s1_693 = __p1_693; \ + float32x2_t __ret_693; \ + __ret_693 = vmulx_f32(__s0_693, splat_laneq_f32(__s1_693, __p2_693)); \ + __ret_693; \ +}) +#else +#define vmulx_laneq_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \ + float32x2_t __s0_694 = __p0_694; \ + float32x4_t __s1_694 = __p1_694; \ + float32x2_t __rev0_694; __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \ + float32x4_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \ + float32x2_t __ret_694; \ + __ret_694 = __noswap_vmulx_f32(__rev0_694, __noswap_splat_laneq_f32(__rev1_694, __p2_694)); \ + __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 1, 0); \ + __ret_694; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vneg_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai int64x1_t vneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai int64_t vnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); + return __ret; +} +#else +__ai uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpaddd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); + return __ret; +} +#else +__ai int64_t vpaddd_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); + return __ret; +} +#else +__ai float32_t vpadds_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpmaxqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); + return __ret; +} +#else +__ai float32_t vpmaxs_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); + return __ret; +} +#else +__ai float32_t vpmaxnms_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpminqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); + return __ret; +} +#else +__ai float32_t vpmins_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); + return __ret; +} +#else +__ai float64_t vpminnmqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); + return __ret; +} +#else +__ai float32_t vpminnms_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int8_t vqabsb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); + return __ret; +} +__ai int32_t vqabss_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); + return __ret; +} +__ai int64_t vqabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); + return __ret; +} +__ai int16_t vqabsh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); + return __ret; +} +__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); + return __ret; +} +__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s32(__p0_695, __p1_695, __p2_695, __p3_695) __extension__ ({ \ + int64x2_t __s0_695 = __p0_695; \ + int32x4_t __s1_695 = __p1_695; \ + int32x2_t __s2_695 = __p2_695; \ + int64x2_t __ret_695; \ + __ret_695 = vqdmlal_s32(__s0_695, vget_high_s32(__s1_695), splat_lane_s32(__s2_695, __p3_695)); \ + __ret_695; \ +}) +#else +#define vqdmlal_high_lane_s32(__p0_696, __p1_696, __p2_696, __p3_696) __extension__ ({ \ + int64x2_t __s0_696 = __p0_696; \ + int32x4_t __s1_696 = __p1_696; \ + int32x2_t __s2_696 = __p2_696; \ + int64x2_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \ + int32x4_t __rev1_696; __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 3, 2, 1, 0); \ + int32x2_t __rev2_696; __rev2_696 = __builtin_shufflevector(__s2_696, __s2_696, 1, 0); \ + int64x2_t __ret_696; \ + __ret_696 = __noswap_vqdmlal_s32(__rev0_696, __noswap_vget_high_s32(__rev1_696), __noswap_splat_lane_s32(__rev2_696, __p3_696)); \ + __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \ + __ret_696; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s16(__p0_697, __p1_697, __p2_697, __p3_697) __extension__ ({ \ + int32x4_t __s0_697 = __p0_697; \ + int16x8_t __s1_697 = __p1_697; \ + int16x4_t __s2_697 = __p2_697; \ + int32x4_t __ret_697; \ + __ret_697 = vqdmlal_s16(__s0_697, vget_high_s16(__s1_697), splat_lane_s16(__s2_697, __p3_697)); \ + __ret_697; \ +}) +#else +#define vqdmlal_high_lane_s16(__p0_698, __p1_698, __p2_698, __p3_698) __extension__ ({ \ + int32x4_t __s0_698 = __p0_698; \ + int16x8_t __s1_698 = __p1_698; \ + int16x4_t __s2_698 = __p2_698; \ + int32x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ + int16x8_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_698; __rev2_698 = __builtin_shufflevector(__s2_698, __s2_698, 3, 2, 1, 0); \ + int32x4_t __ret_698; \ + __ret_698 = __noswap_vqdmlal_s16(__rev0_698, __noswap_vget_high_s16(__rev1_698), __noswap_splat_lane_s16(__rev2_698, __p3_698)); \ + __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \ + __ret_698; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s32(__p0_699, __p1_699, __p2_699, __p3_699) __extension__ ({ \ + int64x2_t __s0_699 = __p0_699; \ + int32x4_t __s1_699 = __p1_699; \ + int32x4_t __s2_699 = __p2_699; \ + int64x2_t __ret_699; \ + __ret_699 = vqdmlal_s32(__s0_699, vget_high_s32(__s1_699), splat_laneq_s32(__s2_699, __p3_699)); \ + __ret_699; \ +}) +#else +#define vqdmlal_high_laneq_s32(__p0_700, __p1_700, __p2_700, __p3_700) __extension__ ({ \ + int64x2_t __s0_700 = __p0_700; \ + int32x4_t __s1_700 = __p1_700; \ + int32x4_t __s2_700 = __p2_700; \ + int64x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ + int32x4_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \ + int32x4_t __rev2_700; __rev2_700 = __builtin_shufflevector(__s2_700, __s2_700, 3, 2, 1, 0); \ + int64x2_t __ret_700; \ + __ret_700 = __noswap_vqdmlal_s32(__rev0_700, __noswap_vget_high_s32(__rev1_700), __noswap_splat_laneq_s32(__rev2_700, __p3_700)); \ + __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \ + __ret_700; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s16(__p0_701, __p1_701, __p2_701, __p3_701) __extension__ ({ \ + int32x4_t __s0_701 = __p0_701; \ + int16x8_t __s1_701 = __p1_701; \ + int16x8_t __s2_701 = __p2_701; \ + int32x4_t __ret_701; \ + __ret_701 = vqdmlal_s16(__s0_701, vget_high_s16(__s1_701), splat_laneq_s16(__s2_701, __p3_701)); \ + __ret_701; \ +}) +#else +#define vqdmlal_high_laneq_s16(__p0_702, __p1_702, __p2_702, __p3_702) __extension__ ({ \ + int32x4_t __s0_702 = __p0_702; \ + int16x8_t __s1_702 = __p1_702; \ + int16x8_t __s2_702 = __p2_702; \ + int32x4_t __rev0_702; __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 3, 2, 1, 0); \ + int16x8_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_702; __rev2_702 = __builtin_shufflevector(__s2_702, __s2_702, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_702; \ + __ret_702 = __noswap_vqdmlal_s16(__rev0_702, __noswap_vget_high_s16(__rev1_702), __noswap_splat_laneq_s16(__rev2_702, __p3_702)); \ + __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \ + __ret_702; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s32(__p0_703, __p1_703, __p2_703, __p3_703) __extension__ ({ \ + int64x2_t __s0_703 = __p0_703; \ + int32x2_t __s1_703 = __p1_703; \ + int32x4_t __s2_703 = __p2_703; \ + int64x2_t __ret_703; \ + __ret_703 = vqdmlal_s32(__s0_703, __s1_703, splat_laneq_s32(__s2_703, __p3_703)); \ + __ret_703; \ +}) +#else +#define vqdmlal_laneq_s32(__p0_704, __p1_704, __p2_704, __p3_704) __extension__ ({ \ + int64x2_t __s0_704 = __p0_704; \ + int32x2_t __s1_704 = __p1_704; \ + int32x4_t __s2_704 = __p2_704; \ + int64x2_t __rev0_704; __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 1, 0); \ + int32x2_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 1, 0); \ + int32x4_t __rev2_704; __rev2_704 = __builtin_shufflevector(__s2_704, __s2_704, 3, 2, 1, 0); \ + int64x2_t __ret_704; \ + __ret_704 = __noswap_vqdmlal_s32(__rev0_704, __rev1_704, __noswap_splat_laneq_s32(__rev2_704, __p3_704)); \ + __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 1, 0); \ + __ret_704; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s16(__p0_705, __p1_705, __p2_705, __p3_705) __extension__ ({ \ + int32x4_t __s0_705 = __p0_705; \ + int16x4_t __s1_705 = __p1_705; \ + int16x8_t __s2_705 = __p2_705; \ + int32x4_t __ret_705; \ + __ret_705 = vqdmlal_s16(__s0_705, __s1_705, splat_laneq_s16(__s2_705, __p3_705)); \ + __ret_705; \ +}) +#else +#define vqdmlal_laneq_s16(__p0_706, __p1_706, __p2_706, __p3_706) __extension__ ({ \ + int32x4_t __s0_706 = __p0_706; \ + int16x4_t __s1_706 = __p1_706; \ + int16x8_t __s2_706 = __p2_706; \ + int32x4_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \ + int16x4_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \ + int16x8_t __rev2_706; __rev2_706 = __builtin_shufflevector(__s2_706, __s2_706, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_706; \ + __ret_706 = __noswap_vqdmlal_s16(__rev0_706, __rev1_706, __noswap_splat_laneq_s16(__rev2_706, __p3_706)); \ + __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \ + __ret_706; \ +}) +#endif + +__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); + return __ret; +} +__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s32(__p0_707, __p1_707, __p2_707, __p3_707) __extension__ ({ \ + int64x2_t __s0_707 = __p0_707; \ + int32x4_t __s1_707 = __p1_707; \ + int32x2_t __s2_707 = __p2_707; \ + int64x2_t __ret_707; \ + __ret_707 = vqdmlsl_s32(__s0_707, vget_high_s32(__s1_707), splat_lane_s32(__s2_707, __p3_707)); \ + __ret_707; \ +}) +#else +#define vqdmlsl_high_lane_s32(__p0_708, __p1_708, __p2_708, __p3_708) __extension__ ({ \ + int64x2_t __s0_708 = __p0_708; \ + int32x4_t __s1_708 = __p1_708; \ + int32x2_t __s2_708 = __p2_708; \ + int64x2_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \ + int32x4_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \ + int32x2_t __rev2_708; __rev2_708 = __builtin_shufflevector(__s2_708, __s2_708, 1, 0); \ + int64x2_t __ret_708; \ + __ret_708 = __noswap_vqdmlsl_s32(__rev0_708, __noswap_vget_high_s32(__rev1_708), __noswap_splat_lane_s32(__rev2_708, __p3_708)); \ + __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 1, 0); \ + __ret_708; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s16(__p0_709, __p1_709, __p2_709, __p3_709) __extension__ ({ \ + int32x4_t __s0_709 = __p0_709; \ + int16x8_t __s1_709 = __p1_709; \ + int16x4_t __s2_709 = __p2_709; \ + int32x4_t __ret_709; \ + __ret_709 = vqdmlsl_s16(__s0_709, vget_high_s16(__s1_709), splat_lane_s16(__s2_709, __p3_709)); \ + __ret_709; \ +}) +#else +#define vqdmlsl_high_lane_s16(__p0_710, __p1_710, __p2_710, __p3_710) __extension__ ({ \ + int32x4_t __s0_710 = __p0_710; \ + int16x8_t __s1_710 = __p1_710; \ + int16x4_t __s2_710 = __p2_710; \ + int32x4_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \ + int16x8_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_710; __rev2_710 = __builtin_shufflevector(__s2_710, __s2_710, 3, 2, 1, 0); \ + int32x4_t __ret_710; \ + __ret_710 = __noswap_vqdmlsl_s16(__rev0_710, __noswap_vget_high_s16(__rev1_710), __noswap_splat_lane_s16(__rev2_710, __p3_710)); \ + __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 3, 2, 1, 0); \ + __ret_710; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \ + int64x2_t __s0_711 = __p0_711; \ + int32x4_t __s1_711 = __p1_711; \ + int32x4_t __s2_711 = __p2_711; \ + int64x2_t __ret_711; \ + __ret_711 = vqdmlsl_s32(__s0_711, vget_high_s32(__s1_711), splat_laneq_s32(__s2_711, __p3_711)); \ + __ret_711; \ +}) +#else +#define vqdmlsl_high_laneq_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \ + int64x2_t __s0_712 = __p0_712; \ + int32x4_t __s1_712 = __p1_712; \ + int32x4_t __s2_712 = __p2_712; \ + int64x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ + int32x4_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \ + int32x4_t __rev2_712; __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 3, 2, 1, 0); \ + int64x2_t __ret_712; \ + __ret_712 = __noswap_vqdmlsl_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_laneq_s32(__rev2_712, __p3_712)); \ + __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \ + __ret_712; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \ + int32x4_t __s0_713 = __p0_713; \ + int16x8_t __s1_713 = __p1_713; \ + int16x8_t __s2_713 = __p2_713; \ + int32x4_t __ret_713; \ + __ret_713 = vqdmlsl_s16(__s0_713, vget_high_s16(__s1_713), splat_laneq_s16(__s2_713, __p3_713)); \ + __ret_713; \ +}) +#else +#define vqdmlsl_high_laneq_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \ + int32x4_t __s0_714 = __p0_714; \ + int16x8_t __s1_714 = __p1_714; \ + int16x8_t __s2_714 = __p2_714; \ + int32x4_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \ + int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_714; __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_714; \ + __ret_714 = __noswap_vqdmlsl_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_laneq_s16(__rev2_714, __p3_714)); \ + __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \ + __ret_714; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \ + int64x2_t __s0_715 = __p0_715; \ + int32x2_t __s1_715 = __p1_715; \ + int32x4_t __s2_715 = __p2_715; \ + int64x2_t __ret_715; \ + __ret_715 = vqdmlsl_s32(__s0_715, __s1_715, splat_laneq_s32(__s2_715, __p3_715)); \ + __ret_715; \ +}) +#else +#define vqdmlsl_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \ + int64x2_t __s0_716 = __p0_716; \ + int32x2_t __s1_716 = __p1_716; \ + int32x4_t __s2_716 = __p2_716; \ + int64x2_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \ + int32x2_t __rev1_716; __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 1, 0); \ + int32x4_t __rev2_716; __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \ + int64x2_t __ret_716; \ + __ret_716 = __noswap_vqdmlsl_s32(__rev0_716, __rev1_716, __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \ + __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \ + __ret_716; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \ + int32x4_t __s0_717 = __p0_717; \ + int16x4_t __s1_717 = __p1_717; \ + int16x8_t __s2_717 = __p2_717; \ + int32x4_t __ret_717; \ + __ret_717 = vqdmlsl_s16(__s0_717, __s1_717, splat_laneq_s16(__s2_717, __p3_717)); \ + __ret_717; \ +}) +#else +#define vqdmlsl_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \ + int32x4_t __s0_718 = __p0_718; \ + int16x4_t __s1_718 = __p1_718; \ + int16x8_t __s2_718 = __p2_718; \ + int32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ + int16x4_t __rev1_718; __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 3, 2, 1, 0); \ + int16x8_t __rev2_718; __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_718; \ + __ret_718 = __noswap_vqdmlsl_s16(__rev0_718, __rev1_718, __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \ + __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \ + __ret_718; \ +}) +#endif + +__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_lane_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \ + int32_t __s0_719 = __p0_719; \ + int32x2_t __s1_719 = __p1_719; \ + int32_t __ret_719; \ + __ret_719 = vqdmulhs_s32(__s0_719, vget_lane_s32(__s1_719, __p2_719)); \ + __ret_719; \ +}) +#else +#define vqdmulhs_lane_s32(__p0_720, __p1_720, __p2_720) __extension__ ({ \ + int32_t __s0_720 = __p0_720; \ + int32x2_t __s1_720 = __p1_720; \ + int32x2_t __rev1_720; __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \ + int32_t __ret_720; \ + __ret_720 = vqdmulhs_s32(__s0_720, __noswap_vget_lane_s32(__rev1_720, __p2_720)); \ + __ret_720; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_lane_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \ + int16_t __s0_721 = __p0_721; \ + int16x4_t __s1_721 = __p1_721; \ + int16_t __ret_721; \ + __ret_721 = vqdmulhh_s16(__s0_721, vget_lane_s16(__s1_721, __p2_721)); \ + __ret_721; \ +}) +#else +#define vqdmulhh_lane_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \ + int16_t __s0_722 = __p0_722; \ + int16x4_t __s1_722 = __p1_722; \ + int16x4_t __rev1_722; __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \ + int16_t __ret_722; \ + __ret_722 = vqdmulhh_s16(__s0_722, __noswap_vget_lane_s16(__rev1_722, __p2_722)); \ + __ret_722; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_laneq_s32(__p0_723, __p1_723, __p2_723) __extension__ ({ \ + int32_t __s0_723 = __p0_723; \ + int32x4_t __s1_723 = __p1_723; \ + int32_t __ret_723; \ + __ret_723 = vqdmulhs_s32(__s0_723, vgetq_lane_s32(__s1_723, __p2_723)); \ + __ret_723; \ +}) +#else +#define vqdmulhs_laneq_s32(__p0_724, __p1_724, __p2_724) __extension__ ({ \ + int32_t __s0_724 = __p0_724; \ + int32x4_t __s1_724 = __p1_724; \ + int32x4_t __rev1_724; __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \ + int32_t __ret_724; \ + __ret_724 = vqdmulhs_s32(__s0_724, __noswap_vgetq_lane_s32(__rev1_724, __p2_724)); \ + __ret_724; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_laneq_s16(__p0_725, __p1_725, __p2_725) __extension__ ({ \ + int16_t __s0_725 = __p0_725; \ + int16x8_t __s1_725 = __p1_725; \ + int16_t __ret_725; \ + __ret_725 = vqdmulhh_s16(__s0_725, vgetq_lane_s16(__s1_725, __p2_725)); \ + __ret_725; \ +}) +#else +#define vqdmulhh_laneq_s16(__p0_726, __p1_726, __p2_726) __extension__ ({ \ + int16_t __s0_726 = __p0_726; \ + int16x8_t __s1_726 = __p1_726; \ + int16x8_t __rev1_726; __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_726; \ + __ret_726 = vqdmulhh_s16(__s0_726, __noswap_vgetq_lane_s16(__rev1_726, __p2_726)); \ + __ret_726; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); + return __ret; +} +__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s32(__p0_727, __p1_727, __p2_727) __extension__ ({ \ + int32x4_t __s0_727 = __p0_727; \ + int32x2_t __s1_727 = __p1_727; \ + int64x2_t __ret_727; \ + __ret_727 = vqdmull_s32(vget_high_s32(__s0_727), splat_lane_s32(__s1_727, __p2_727)); \ + __ret_727; \ +}) +#else +#define vqdmull_high_lane_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \ + int32x4_t __s0_728 = __p0_728; \ + int32x2_t __s1_728 = __p1_728; \ + int32x4_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \ + int32x2_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 1, 0); \ + int64x2_t __ret_728; \ + __ret_728 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_728), __noswap_splat_lane_s32(__rev1_728, __p2_728)); \ + __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \ + __ret_728; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s16(__p0_729, __p1_729, __p2_729) __extension__ ({ \ + int16x8_t __s0_729 = __p0_729; \ + int16x4_t __s1_729 = __p1_729; \ + int32x4_t __ret_729; \ + __ret_729 = vqdmull_s16(vget_high_s16(__s0_729), splat_lane_s16(__s1_729, __p2_729)); \ + __ret_729; \ +}) +#else +#define vqdmull_high_lane_s16(__p0_730, __p1_730, __p2_730) __extension__ ({ \ + int16x8_t __s0_730 = __p0_730; \ + int16x4_t __s1_730 = __p1_730; \ + int16x8_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \ + int32x4_t __ret_730; \ + __ret_730 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_730), __noswap_splat_lane_s16(__rev1_730, __p2_730)); \ + __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ + __ret_730; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \ + int32x4_t __s0_731 = __p0_731; \ + int32x4_t __s1_731 = __p1_731; \ + int64x2_t __ret_731; \ + __ret_731 = vqdmull_s32(vget_high_s32(__s0_731), splat_laneq_s32(__s1_731, __p2_731)); \ + __ret_731; \ +}) +#else +#define vqdmull_high_laneq_s32(__p0_732, __p1_732, __p2_732) __extension__ ({ \ + int32x4_t __s0_732 = __p0_732; \ + int32x4_t __s1_732 = __p1_732; \ + int32x4_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 3, 2, 1, 0); \ + int32x4_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 3, 2, 1, 0); \ + int64x2_t __ret_732; \ + __ret_732 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_732), __noswap_splat_laneq_s32(__rev1_732, __p2_732)); \ + __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \ + __ret_732; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \ + int16x8_t __s0_733 = __p0_733; \ + int16x8_t __s1_733 = __p1_733; \ + int32x4_t __ret_733; \ + __ret_733 = vqdmull_s16(vget_high_s16(__s0_733), splat_laneq_s16(__s1_733, __p2_733)); \ + __ret_733; \ +}) +#else +#define vqdmull_high_laneq_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \ + int16x8_t __s0_734 = __p0_734; \ + int16x8_t __s1_734 = __p1_734; \ + int16x8_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_734; \ + __ret_734 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_734), __noswap_splat_laneq_s16(__rev1_734, __p2_734)); \ + __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \ + __ret_734; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \ + int32_t __s0_735 = __p0_735; \ + int32x2_t __s1_735 = __p1_735; \ + int64_t __ret_735; \ + __ret_735 = vqdmulls_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \ + __ret_735; \ +}) +#else +#define vqdmulls_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \ + int32_t __s0_736 = __p0_736; \ + int32x2_t __s1_736 = __p1_736; \ + int32x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ + int64_t __ret_736; \ + __ret_736 = vqdmulls_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \ + __ret_736; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ + int16_t __s0_737 = __p0_737; \ + int16x4_t __s1_737 = __p1_737; \ + int32_t __ret_737; \ + __ret_737 = vqdmullh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \ + __ret_737; \ +}) +#else +#define vqdmullh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ + int16_t __s0_738 = __p0_738; \ + int16x4_t __s1_738 = __p1_738; \ + int16x4_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \ + int32_t __ret_738; \ + __ret_738 = vqdmullh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \ + __ret_738; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \ + int32_t __s0_739 = __p0_739; \ + int32x4_t __s1_739 = __p1_739; \ + int64_t __ret_739; \ + __ret_739 = vqdmulls_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \ + __ret_739; \ +}) +#else +#define vqdmulls_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \ + int32_t __s0_740 = __p0_740; \ + int32x4_t __s1_740 = __p1_740; \ + int32x4_t __rev1_740; __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \ + int64_t __ret_740; \ + __ret_740 = vqdmulls_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \ + __ret_740; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \ + int16_t __s0_741 = __p0_741; \ + int16x8_t __s1_741 = __p1_741; \ + int32_t __ret_741; \ + __ret_741 = vqdmullh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \ + __ret_741; \ +}) +#else +#define vqdmullh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \ + int16_t __s0_742 = __p0_742; \ + int16x8_t __s1_742 = __p1_742; \ + int16x8_t __rev1_742; __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32_t __ret_742; \ + __ret_742 = vqdmullh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \ + __ret_742; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \ + int32x2_t __s0_743 = __p0_743; \ + int32x4_t __s1_743 = __p1_743; \ + int64x2_t __ret_743; \ + __ret_743 = vqdmull_s32(__s0_743, splat_laneq_s32(__s1_743, __p2_743)); \ + __ret_743; \ +}) +#else +#define vqdmull_laneq_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \ + int32x2_t __s0_744 = __p0_744; \ + int32x4_t __s1_744 = __p1_744; \ + int32x2_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 1, 0); \ + int32x4_t __rev1_744; __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 3, 2, 1, 0); \ + int64x2_t __ret_744; \ + __ret_744 = __noswap_vqdmull_s32(__rev0_744, __noswap_splat_laneq_s32(__rev1_744, __p2_744)); \ + __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \ + __ret_744; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \ + int16x4_t __s0_745 = __p0_745; \ + int16x8_t __s1_745 = __p1_745; \ + int32x4_t __ret_745; \ + __ret_745 = vqdmull_s16(__s0_745, splat_laneq_s16(__s1_745, __p2_745)); \ + __ret_745; \ +}) +#else +#define vqdmull_laneq_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \ + int16x4_t __s0_746 = __p0_746; \ + int16x8_t __s1_746 = __p1_746; \ + int16x4_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \ + int16x8_t __rev1_746; __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_746; \ + __ret_746 = __noswap_vqdmull_s16(__rev0_746, __noswap_splat_laneq_s16(__rev1_746, __p2_746)); \ + __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ + __ret_746; \ +}) +#endif + +__ai int16_t vqmovns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); + return __ret; +} +__ai int32_t vqmovnd_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); + return __ret; +} +__ai int8_t vqmovnh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); + return __ret; +} +__ai uint16_t vqmovns_u32(uint32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); + return __ret; +} +__ai uint32_t vqmovnd_u64(uint64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); + return __ret; +} +__ai uint8_t vqmovnh_u16(uint16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); + return __ret; +} +#else +__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); + return __ret; +} +#else +__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); + return __ret; +} +#else +__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint16_t vqmovuns_s32(int32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); + return __ret; +} +__ai uint32_t vqmovund_s64(int64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); + return __ret; +} +__ai uint8_t vqmovunh_s16(int16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vqneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); + return __ret; +} +__ai int8_t vqnegb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); + return __ret; +} +__ai int32_t vqnegs_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); + return __ret; +} +__ai int64_t vqnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); + return __ret; +} +__ai int16_t vqnegh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); + return __ret; +} +__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); + return __ret; +} +__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_lane_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \ + int32_t __s0_747 = __p0_747; \ + int32x2_t __s1_747 = __p1_747; \ + int32_t __ret_747; \ + __ret_747 = vqrdmulhs_s32(__s0_747, vget_lane_s32(__s1_747, __p2_747)); \ + __ret_747; \ +}) +#else +#define vqrdmulhs_lane_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \ + int32_t __s0_748 = __p0_748; \ + int32x2_t __s1_748 = __p1_748; \ + int32x2_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 1, 0); \ + int32_t __ret_748; \ + __ret_748 = vqrdmulhs_s32(__s0_748, __noswap_vget_lane_s32(__rev1_748, __p2_748)); \ + __ret_748; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_lane_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \ + int16_t __s0_749 = __p0_749; \ + int16x4_t __s1_749 = __p1_749; \ + int16_t __ret_749; \ + __ret_749 = vqrdmulhh_s16(__s0_749, vget_lane_s16(__s1_749, __p2_749)); \ + __ret_749; \ +}) +#else +#define vqrdmulhh_lane_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \ + int16_t __s0_750 = __p0_750; \ + int16x4_t __s1_750 = __p1_750; \ + int16x4_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 3, 2, 1, 0); \ + int16_t __ret_750; \ + __ret_750 = vqrdmulhh_s16(__s0_750, __noswap_vget_lane_s16(__rev1_750, __p2_750)); \ + __ret_750; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_laneq_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \ + int32_t __s0_751 = __p0_751; \ + int32x4_t __s1_751 = __p1_751; \ + int32_t __ret_751; \ + __ret_751 = vqrdmulhs_s32(__s0_751, vgetq_lane_s32(__s1_751, __p2_751)); \ + __ret_751; \ +}) +#else +#define vqrdmulhs_laneq_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \ + int32_t __s0_752 = __p0_752; \ + int32x4_t __s1_752 = __p1_752; \ + int32x4_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \ + int32_t __ret_752; \ + __ret_752 = vqrdmulhs_s32(__s0_752, __noswap_vgetq_lane_s32(__rev1_752, __p2_752)); \ + __ret_752; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_laneq_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \ + int16_t __s0_753 = __p0_753; \ + int16x8_t __s1_753 = __p1_753; \ + int16_t __ret_753; \ + __ret_753 = vqrdmulhh_s16(__s0_753, vgetq_lane_s16(__s1_753, __p2_753)); \ + __ret_753; \ +}) +#else +#define vqrdmulhh_laneq_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \ + int16_t __s0_754 = __p0_754; \ + int16x8_t __s1_754 = __p1_754; \ + int16x8_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_754; \ + __ret_754 = vqrdmulhh_s16(__s0_754, __noswap_vgetq_lane_s16(__rev1_754, __p2_754)); \ + __ret_754; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u32(__p0_755, __p1_755, __p2_755) __extension__ ({ \ + uint16x4_t __s0_755 = __p0_755; \ + uint32x4_t __s1_755 = __p1_755; \ + uint16x8_t __ret_755; \ + __ret_755 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_755), (uint16x4_t)(vqrshrn_n_u32(__s1_755, __p2_755)))); \ + __ret_755; \ +}) +#else +#define vqrshrn_high_n_u32(__p0_756, __p1_756, __p2_756) __extension__ ({ \ + uint16x4_t __s0_756 = __p0_756; \ + uint32x4_t __s1_756 = __p1_756; \ + uint16x4_t __rev0_756; __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \ + uint32x4_t __rev1_756; __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \ + uint16x8_t __ret_756; \ + __ret_756 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_756), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_756, __p2_756)))); \ + __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_756; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u64(__p0_757, __p1_757, __p2_757) __extension__ ({ \ + uint32x2_t __s0_757 = __p0_757; \ + uint64x2_t __s1_757 = __p1_757; \ + uint32x4_t __ret_757; \ + __ret_757 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_757), (uint32x2_t)(vqrshrn_n_u64(__s1_757, __p2_757)))); \ + __ret_757; \ +}) +#else +#define vqrshrn_high_n_u64(__p0_758, __p1_758, __p2_758) __extension__ ({ \ + uint32x2_t __s0_758 = __p0_758; \ + uint64x2_t __s1_758 = __p1_758; \ + uint32x2_t __rev0_758; __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \ + uint64x2_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \ + uint32x4_t __ret_758; \ + __ret_758 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_758), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_758, __p2_758)))); \ + __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 3, 2, 1, 0); \ + __ret_758; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u16(__p0_759, __p1_759, __p2_759) __extension__ ({ \ + uint8x8_t __s0_759 = __p0_759; \ + uint16x8_t __s1_759 = __p1_759; \ + uint8x16_t __ret_759; \ + __ret_759 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_759), (uint8x8_t)(vqrshrn_n_u16(__s1_759, __p2_759)))); \ + __ret_759; \ +}) +#else +#define vqrshrn_high_n_u16(__p0_760, __p1_760, __p2_760) __extension__ ({ \ + uint8x8_t __s0_760 = __p0_760; \ + uint16x8_t __s1_760 = __p1_760; \ + uint8x8_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_760; \ + __ret_760 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_760), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_760, __p2_760)))); \ + __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_760; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s32(__p0_761, __p1_761, __p2_761) __extension__ ({ \ + int16x4_t __s0_761 = __p0_761; \ + int32x4_t __s1_761 = __p1_761; \ + int16x8_t __ret_761; \ + __ret_761 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_761), (int16x4_t)(vqrshrn_n_s32(__s1_761, __p2_761)))); \ + __ret_761; \ +}) +#else +#define vqrshrn_high_n_s32(__p0_762, __p1_762, __p2_762) __extension__ ({ \ + int16x4_t __s0_762 = __p0_762; \ + int32x4_t __s1_762 = __p1_762; \ + int16x4_t __rev0_762; __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \ + int32x4_t __rev1_762; __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 3, 2, 1, 0); \ + int16x8_t __ret_762; \ + __ret_762 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_762), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_762, __p2_762)))); \ + __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_762; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s64(__p0_763, __p1_763, __p2_763) __extension__ ({ \ + int32x2_t __s0_763 = __p0_763; \ + int64x2_t __s1_763 = __p1_763; \ + int32x4_t __ret_763; \ + __ret_763 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_763), (int32x2_t)(vqrshrn_n_s64(__s1_763, __p2_763)))); \ + __ret_763; \ +}) +#else +#define vqrshrn_high_n_s64(__p0_764, __p1_764, __p2_764) __extension__ ({ \ + int32x2_t __s0_764 = __p0_764; \ + int64x2_t __s1_764 = __p1_764; \ + int32x2_t __rev0_764; __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 1, 0); \ + int64x2_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \ + int32x4_t __ret_764; \ + __ret_764 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_764), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_764, __p2_764)))); \ + __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \ + __ret_764; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \ + int8x8_t __s0_765 = __p0_765; \ + int16x8_t __s1_765 = __p1_765; \ + int8x16_t __ret_765; \ + __ret_765 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_765), (int8x8_t)(vqrshrn_n_s16(__s1_765, __p2_765)))); \ + __ret_765; \ +}) +#else +#define vqrshrn_high_n_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \ + int8x8_t __s0_766 = __p0_766; \ + int16x8_t __s1_766 = __p1_766; \ + int8x8_t __rev0_766; __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_766; \ + __ret_766 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_766), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_766, __p2_766)))); \ + __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_766; \ +}) +#endif + +#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \ + int16x4_t __s0_767 = __p0_767; \ + int32x4_t __s1_767 = __p1_767; \ + int16x8_t __ret_767; \ + __ret_767 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_767), (int16x4_t)(vqrshrun_n_s32(__s1_767, __p2_767)))); \ + __ret_767; \ +}) +#else +#define vqrshrun_high_n_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \ + int16x4_t __s0_768 = __p0_768; \ + int32x4_t __s1_768 = __p1_768; \ + int16x4_t __rev0_768; __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 3, 2, 1, 0); \ + int32x4_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \ + int16x8_t __ret_768; \ + __ret_768 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_768), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_768, __p2_768)))); \ + __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_768; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s64(__p0_769, __p1_769, __p2_769) __extension__ ({ \ + int32x2_t __s0_769 = __p0_769; \ + int64x2_t __s1_769 = __p1_769; \ + int32x4_t __ret_769; \ + __ret_769 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_769), (int32x2_t)(vqrshrun_n_s64(__s1_769, __p2_769)))); \ + __ret_769; \ +}) +#else +#define vqrshrun_high_n_s64(__p0_770, __p1_770, __p2_770) __extension__ ({ \ + int32x2_t __s0_770 = __p0_770; \ + int64x2_t __s1_770 = __p1_770; \ + int32x2_t __rev0_770; __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 1, 0); \ + int64x2_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 1, 0); \ + int32x4_t __ret_770; \ + __ret_770 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_770), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_770, __p2_770)))); \ + __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \ + __ret_770; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s16(__p0_771, __p1_771, __p2_771) __extension__ ({ \ + int8x8_t __s0_771 = __p0_771; \ + int16x8_t __s1_771 = __p1_771; \ + int8x16_t __ret_771; \ + __ret_771 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_771), (int8x8_t)(vqrshrun_n_s16(__s1_771, __p2_771)))); \ + __ret_771; \ +}) +#else +#define vqrshrun_high_n_s16(__p0_772, __p1_772, __p2_772) __extension__ ({ \ + int8x8_t __s0_772 = __p0_772; \ + int16x8_t __s1_772 = __p1_772; \ + int8x8_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_772; \ + __ret_772 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_772), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_772, __p2_772)))); \ + __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_772; \ +}) +#endif + +#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); + return __ret; +} +#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ + __ret; \ +}) +#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u32(__p0_773, __p1_773, __p2_773) __extension__ ({ \ + uint16x4_t __s0_773 = __p0_773; \ + uint32x4_t __s1_773 = __p1_773; \ + uint16x8_t __ret_773; \ + __ret_773 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_773), (uint16x4_t)(vqshrn_n_u32(__s1_773, __p2_773)))); \ + __ret_773; \ +}) +#else +#define vqshrn_high_n_u32(__p0_774, __p1_774, __p2_774) __extension__ ({ \ + uint16x4_t __s0_774 = __p0_774; \ + uint32x4_t __s1_774 = __p1_774; \ + uint16x4_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \ + uint32x4_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \ + uint16x8_t __ret_774; \ + __ret_774 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_774), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_774, __p2_774)))); \ + __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_774; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u64(__p0_775, __p1_775, __p2_775) __extension__ ({ \ + uint32x2_t __s0_775 = __p0_775; \ + uint64x2_t __s1_775 = __p1_775; \ + uint32x4_t __ret_775; \ + __ret_775 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_775), (uint32x2_t)(vqshrn_n_u64(__s1_775, __p2_775)))); \ + __ret_775; \ +}) +#else +#define vqshrn_high_n_u64(__p0_776, __p1_776, __p2_776) __extension__ ({ \ + uint32x2_t __s0_776 = __p0_776; \ + uint64x2_t __s1_776 = __p1_776; \ + uint32x2_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 1, 0); \ + uint64x2_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 1, 0); \ + uint32x4_t __ret_776; \ + __ret_776 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_776), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_776, __p2_776)))); \ + __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \ + __ret_776; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u16(__p0_777, __p1_777, __p2_777) __extension__ ({ \ + uint8x8_t __s0_777 = __p0_777; \ + uint16x8_t __s1_777 = __p1_777; \ + uint8x16_t __ret_777; \ + __ret_777 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_777), (uint8x8_t)(vqshrn_n_u16(__s1_777, __p2_777)))); \ + __ret_777; \ +}) +#else +#define vqshrn_high_n_u16(__p0_778, __p1_778, __p2_778) __extension__ ({ \ + uint8x8_t __s0_778 = __p0_778; \ + uint16x8_t __s1_778 = __p1_778; \ + uint8x8_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_778; \ + __ret_778 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_778), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_778, __p2_778)))); \ + __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_778; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s32(__p0_779, __p1_779, __p2_779) __extension__ ({ \ + int16x4_t __s0_779 = __p0_779; \ + int32x4_t __s1_779 = __p1_779; \ + int16x8_t __ret_779; \ + __ret_779 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_779), (int16x4_t)(vqshrn_n_s32(__s1_779, __p2_779)))); \ + __ret_779; \ +}) +#else +#define vqshrn_high_n_s32(__p0_780, __p1_780, __p2_780) __extension__ ({ \ + int16x4_t __s0_780 = __p0_780; \ + int32x4_t __s1_780 = __p1_780; \ + int16x4_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \ + int32x4_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \ + int16x8_t __ret_780; \ + __ret_780 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_780), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_780, __p2_780)))); \ + __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_780; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s64(__p0_781, __p1_781, __p2_781) __extension__ ({ \ + int32x2_t __s0_781 = __p0_781; \ + int64x2_t __s1_781 = __p1_781; \ + int32x4_t __ret_781; \ + __ret_781 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_781), (int32x2_t)(vqshrn_n_s64(__s1_781, __p2_781)))); \ + __ret_781; \ +}) +#else +#define vqshrn_high_n_s64(__p0_782, __p1_782, __p2_782) __extension__ ({ \ + int32x2_t __s0_782 = __p0_782; \ + int64x2_t __s1_782 = __p1_782; \ + int32x2_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \ + int64x2_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 1, 0); \ + int32x4_t __ret_782; \ + __ret_782 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_782), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_782, __p2_782)))); \ + __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \ + __ret_782; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s16(__p0_783, __p1_783, __p2_783) __extension__ ({ \ + int8x8_t __s0_783 = __p0_783; \ + int16x8_t __s1_783 = __p1_783; \ + int8x16_t __ret_783; \ + __ret_783 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_783), (int8x8_t)(vqshrn_n_s16(__s1_783, __p2_783)))); \ + __ret_783; \ +}) +#else +#define vqshrn_high_n_s16(__p0_784, __p1_784, __p2_784) __extension__ ({ \ + int8x8_t __s0_784 = __p0_784; \ + int16x8_t __s1_784 = __p1_784; \ + int8x8_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_784; \ + __ret_784 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_784), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_784, __p2_784)))); \ + __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_784; \ +}) +#endif + +#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s32(__p0_785, __p1_785, __p2_785) __extension__ ({ \ + int16x4_t __s0_785 = __p0_785; \ + int32x4_t __s1_785 = __p1_785; \ + int16x8_t __ret_785; \ + __ret_785 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_785), (int16x4_t)(vqshrun_n_s32(__s1_785, __p2_785)))); \ + __ret_785; \ +}) +#else +#define vqshrun_high_n_s32(__p0_786, __p1_786, __p2_786) __extension__ ({ \ + int16x4_t __s0_786 = __p0_786; \ + int32x4_t __s1_786 = __p1_786; \ + int16x4_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \ + int32x4_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 3, 2, 1, 0); \ + int16x8_t __ret_786; \ + __ret_786 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_786), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_786, __p2_786)))); \ + __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_786; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s64(__p0_787, __p1_787, __p2_787) __extension__ ({ \ + int32x2_t __s0_787 = __p0_787; \ + int64x2_t __s1_787 = __p1_787; \ + int32x4_t __ret_787; \ + __ret_787 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_787), (int32x2_t)(vqshrun_n_s64(__s1_787, __p2_787)))); \ + __ret_787; \ +}) +#else +#define vqshrun_high_n_s64(__p0_788, __p1_788, __p2_788) __extension__ ({ \ + int32x2_t __s0_788 = __p0_788; \ + int64x2_t __s1_788 = __p1_788; \ + int32x2_t __rev0_788; __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \ + int64x2_t __rev1_788; __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 1, 0); \ + int32x4_t __ret_788; \ + __ret_788 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_788), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_788, __p2_788)))); \ + __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 3, 2, 1, 0); \ + __ret_788; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s16(__p0_789, __p1_789, __p2_789) __extension__ ({ \ + int8x8_t __s0_789 = __p0_789; \ + int16x8_t __s1_789 = __p1_789; \ + int8x16_t __ret_789; \ + __ret_789 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_789), (int8x8_t)(vqshrun_n_s16(__s1_789, __p2_789)))); \ + __ret_789; \ +}) +#else +#define vqshrun_high_n_s16(__p0_790, __p1_790, __p2_790) __extension__ ({ \ + int8x8_t __s0_790 = __p0_790; \ + int16x8_t __s1_790 = __p1_790; \ + int8x8_t __rev0_790; __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_790; __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_790; \ + __ret_790 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_790), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_790, __p2_790)))); \ + __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_790; \ +}) +#endif + +#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); + return __ret; +} +__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); + return __ret; +} +__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrecpe_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); + return __ret; +} +__ai float64_t vrecped_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); + return __ret; +} +__ai float32_t vrecpes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); + return __ret; +} +__ai float64_t vrecpxd_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); + return __ret; +} +__ai float32_t vrecpxs_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); + return __ret; +} +__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); + return __ret; +} +__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); + return __ret; +} +#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u32(__p0_791, __p1_791, __p2_791) __extension__ ({ \ + uint16x4_t __s0_791 = __p0_791; \ + uint32x4_t __s1_791 = __p1_791; \ + uint16x8_t __ret_791; \ + __ret_791 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_791), (uint16x4_t)(vrshrn_n_u32(__s1_791, __p2_791)))); \ + __ret_791; \ +}) +#else +#define vrshrn_high_n_u32(__p0_792, __p1_792, __p2_792) __extension__ ({ \ + uint16x4_t __s0_792 = __p0_792; \ + uint32x4_t __s1_792 = __p1_792; \ + uint16x4_t __rev0_792; __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 3, 2, 1, 0); \ + uint32x4_t __rev1_792; __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \ + uint16x8_t __ret_792; \ + __ret_792 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_792), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_792, __p2_792)))); \ + __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_792; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u64(__p0_793, __p1_793, __p2_793) __extension__ ({ \ + uint32x2_t __s0_793 = __p0_793; \ + uint64x2_t __s1_793 = __p1_793; \ + uint32x4_t __ret_793; \ + __ret_793 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_793), (uint32x2_t)(vrshrn_n_u64(__s1_793, __p2_793)))); \ + __ret_793; \ +}) +#else +#define vrshrn_high_n_u64(__p0_794, __p1_794, __p2_794) __extension__ ({ \ + uint32x2_t __s0_794 = __p0_794; \ + uint64x2_t __s1_794 = __p1_794; \ + uint32x2_t __rev0_794; __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \ + uint64x2_t __rev1_794; __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \ + uint32x4_t __ret_794; \ + __ret_794 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_794), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_794, __p2_794)))); \ + __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \ + __ret_794; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u16(__p0_795, __p1_795, __p2_795) __extension__ ({ \ + uint8x8_t __s0_795 = __p0_795; \ + uint16x8_t __s1_795 = __p1_795; \ + uint8x16_t __ret_795; \ + __ret_795 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_795), (uint8x8_t)(vrshrn_n_u16(__s1_795, __p2_795)))); \ + __ret_795; \ +}) +#else +#define vrshrn_high_n_u16(__p0_796, __p1_796, __p2_796) __extension__ ({ \ + uint8x8_t __s0_796 = __p0_796; \ + uint16x8_t __s1_796 = __p1_796; \ + uint8x8_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_796; \ + __ret_796 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_796), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_796, __p2_796)))); \ + __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_796; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s32(__p0_797, __p1_797, __p2_797) __extension__ ({ \ + int16x4_t __s0_797 = __p0_797; \ + int32x4_t __s1_797 = __p1_797; \ + int16x8_t __ret_797; \ + __ret_797 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_797), (int16x4_t)(vrshrn_n_s32(__s1_797, __p2_797)))); \ + __ret_797; \ +}) +#else +#define vrshrn_high_n_s32(__p0_798, __p1_798, __p2_798) __extension__ ({ \ + int16x4_t __s0_798 = __p0_798; \ + int32x4_t __s1_798 = __p1_798; \ + int16x4_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \ + int32x4_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 3, 2, 1, 0); \ + int16x8_t __ret_798; \ + __ret_798 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_798), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_798, __p2_798)))); \ + __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_798; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s64(__p0_799, __p1_799, __p2_799) __extension__ ({ \ + int32x2_t __s0_799 = __p0_799; \ + int64x2_t __s1_799 = __p1_799; \ + int32x4_t __ret_799; \ + __ret_799 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_799), (int32x2_t)(vrshrn_n_s64(__s1_799, __p2_799)))); \ + __ret_799; \ +}) +#else +#define vrshrn_high_n_s64(__p0_800, __p1_800, __p2_800) __extension__ ({ \ + int32x2_t __s0_800 = __p0_800; \ + int64x2_t __s1_800 = __p1_800; \ + int32x2_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \ + int64x2_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \ + int32x4_t __ret_800; \ + __ret_800 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_800), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_800, __p2_800)))); \ + __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 3, 2, 1, 0); \ + __ret_800; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s16(__p0_801, __p1_801, __p2_801) __extension__ ({ \ + int8x8_t __s0_801 = __p0_801; \ + int16x8_t __s1_801 = __p1_801; \ + int8x16_t __ret_801; \ + __ret_801 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_801), (int8x8_t)(vrshrn_n_s16(__s1_801, __p2_801)))); \ + __ret_801; \ +}) +#else +#define vrshrn_high_n_s16(__p0_802, __p1_802, __p2_802) __extension__ ({ \ + int8x8_t __s0_802 = __p0_802; \ + int16x8_t __s1_802 = __p1_802; \ + int8x8_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_802; \ + __ret_802 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_802), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_802, __p2_802)))); \ + __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_802; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); + return __ret; +} +__ai float64_t vrsqrted_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); + return __ret; +} +__ai float32_t vrsqrtes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); + return __ret; +} +__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); + return __ret; +} +#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ + __ret; \ +}) +__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); + return __ret; +} +__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); + return __ret; +} +#define vshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u8(__p0_803, __p1_803) __extension__ ({ \ + uint8x16_t __s0_803 = __p0_803; \ + uint16x8_t __ret_803; \ + __ret_803 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_803), __p1_803)); \ + __ret_803; \ +}) +#else +#define vshll_high_n_u8(__p0_804, __p1_804) __extension__ ({ \ + uint8x16_t __s0_804 = __p0_804; \ + uint8x16_t __rev0_804; __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_804; \ + __ret_804 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_804), __p1_804)); \ + __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_804; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u32(__p0_805, __p1_805) __extension__ ({ \ + uint32x4_t __s0_805 = __p0_805; \ + uint64x2_t __ret_805; \ + __ret_805 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_805), __p1_805)); \ + __ret_805; \ +}) +#else +#define vshll_high_n_u32(__p0_806, __p1_806) __extension__ ({ \ + uint32x4_t __s0_806 = __p0_806; \ + uint32x4_t __rev0_806; __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \ + uint64x2_t __ret_806; \ + __ret_806 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_806), __p1_806)); \ + __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 1, 0); \ + __ret_806; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u16(__p0_807, __p1_807) __extension__ ({ \ + uint16x8_t __s0_807 = __p0_807; \ + uint32x4_t __ret_807; \ + __ret_807 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_807), __p1_807)); \ + __ret_807; \ +}) +#else +#define vshll_high_n_u16(__p0_808, __p1_808) __extension__ ({ \ + uint16x8_t __s0_808 = __p0_808; \ + uint16x8_t __rev0_808; __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_808; \ + __ret_808 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_808), __p1_808)); \ + __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 3, 2, 1, 0); \ + __ret_808; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s8(__p0_809, __p1_809) __extension__ ({ \ + int8x16_t __s0_809 = __p0_809; \ + int16x8_t __ret_809; \ + __ret_809 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_809), __p1_809)); \ + __ret_809; \ +}) +#else +#define vshll_high_n_s8(__p0_810, __p1_810) __extension__ ({ \ + int8x16_t __s0_810 = __p0_810; \ + int8x16_t __rev0_810; __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_810; \ + __ret_810 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_810), __p1_810)); \ + __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_810; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s32(__p0_811, __p1_811) __extension__ ({ \ + int32x4_t __s0_811 = __p0_811; \ + int64x2_t __ret_811; \ + __ret_811 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_811), __p1_811)); \ + __ret_811; \ +}) +#else +#define vshll_high_n_s32(__p0_812, __p1_812) __extension__ ({ \ + int32x4_t __s0_812 = __p0_812; \ + int32x4_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \ + int64x2_t __ret_812; \ + __ret_812 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_812), __p1_812)); \ + __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \ + __ret_812; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s16(__p0_813, __p1_813) __extension__ ({ \ + int16x8_t __s0_813 = __p0_813; \ + int32x4_t __ret_813; \ + __ret_813 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_813), __p1_813)); \ + __ret_813; \ +}) +#else +#define vshll_high_n_s16(__p0_814, __p1_814) __extension__ ({ \ + int16x8_t __s0_814 = __p0_814; \ + int16x8_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_814; \ + __ret_814 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_814), __p1_814)); \ + __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \ + __ret_814; \ +}) +#endif + +#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u32(__p0_815, __p1_815, __p2_815) __extension__ ({ \ + uint16x4_t __s0_815 = __p0_815; \ + uint32x4_t __s1_815 = __p1_815; \ + uint16x8_t __ret_815; \ + __ret_815 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_815), (uint16x4_t)(vshrn_n_u32(__s1_815, __p2_815)))); \ + __ret_815; \ +}) +#else +#define vshrn_high_n_u32(__p0_816, __p1_816, __p2_816) __extension__ ({ \ + uint16x4_t __s0_816 = __p0_816; \ + uint32x4_t __s1_816 = __p1_816; \ + uint16x4_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 3, 2, 1, 0); \ + uint32x4_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 3, 2, 1, 0); \ + uint16x8_t __ret_816; \ + __ret_816 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_816), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_816, __p2_816)))); \ + __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_816; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u64(__p0_817, __p1_817, __p2_817) __extension__ ({ \ + uint32x2_t __s0_817 = __p0_817; \ + uint64x2_t __s1_817 = __p1_817; \ + uint32x4_t __ret_817; \ + __ret_817 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_817), (uint32x2_t)(vshrn_n_u64(__s1_817, __p2_817)))); \ + __ret_817; \ +}) +#else +#define vshrn_high_n_u64(__p0_818, __p1_818, __p2_818) __extension__ ({ \ + uint32x2_t __s0_818 = __p0_818; \ + uint64x2_t __s1_818 = __p1_818; \ + uint32x2_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 1, 0); \ + uint64x2_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 1, 0); \ + uint32x4_t __ret_818; \ + __ret_818 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_818), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_818, __p2_818)))); \ + __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \ + __ret_818; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u16(__p0_819, __p1_819, __p2_819) __extension__ ({ \ + uint8x8_t __s0_819 = __p0_819; \ + uint16x8_t __s1_819 = __p1_819; \ + uint8x16_t __ret_819; \ + __ret_819 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_819), (uint8x8_t)(vshrn_n_u16(__s1_819, __p2_819)))); \ + __ret_819; \ +}) +#else +#define vshrn_high_n_u16(__p0_820, __p1_820, __p2_820) __extension__ ({ \ + uint8x8_t __s0_820 = __p0_820; \ + uint16x8_t __s1_820 = __p1_820; \ + uint8x8_t __rev0_820; __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_820; __rev1_820 = __builtin_shufflevector(__s1_820, __s1_820, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_820; \ + __ret_820 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_820), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_820, __p2_820)))); \ + __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_820; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s32(__p0_821, __p1_821, __p2_821) __extension__ ({ \ + int16x4_t __s0_821 = __p0_821; \ + int32x4_t __s1_821 = __p1_821; \ + int16x8_t __ret_821; \ + __ret_821 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_821), (int16x4_t)(vshrn_n_s32(__s1_821, __p2_821)))); \ + __ret_821; \ +}) +#else +#define vshrn_high_n_s32(__p0_822, __p1_822, __p2_822) __extension__ ({ \ + int16x4_t __s0_822 = __p0_822; \ + int32x4_t __s1_822 = __p1_822; \ + int16x4_t __rev0_822; __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \ + int32x4_t __rev1_822; __rev1_822 = __builtin_shufflevector(__s1_822, __s1_822, 3, 2, 1, 0); \ + int16x8_t __ret_822; \ + __ret_822 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_822), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_822, __p2_822)))); \ + __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_822; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s64(__p0_823, __p1_823, __p2_823) __extension__ ({ \ + int32x2_t __s0_823 = __p0_823; \ + int64x2_t __s1_823 = __p1_823; \ + int32x4_t __ret_823; \ + __ret_823 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_823), (int32x2_t)(vshrn_n_s64(__s1_823, __p2_823)))); \ + __ret_823; \ +}) +#else +#define vshrn_high_n_s64(__p0_824, __p1_824, __p2_824) __extension__ ({ \ + int32x2_t __s0_824 = __p0_824; \ + int64x2_t __s1_824 = __p1_824; \ + int32x2_t __rev0_824; __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 1, 0); \ + int64x2_t __rev1_824; __rev1_824 = __builtin_shufflevector(__s1_824, __s1_824, 1, 0); \ + int32x4_t __ret_824; \ + __ret_824 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_824), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_824, __p2_824)))); \ + __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \ + __ret_824; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s16(__p0_825, __p1_825, __p2_825) __extension__ ({ \ + int8x8_t __s0_825 = __p0_825; \ + int16x8_t __s1_825 = __p1_825; \ + int8x16_t __ret_825; \ + __ret_825 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_825), (int8x8_t)(vshrn_n_s16(__s1_825, __p2_825)))); \ + __ret_825; \ +}) +#else +#define vshrn_high_n_s16(__p0_826, __p1_826, __p2_826) __extension__ ({ \ + int8x8_t __s0_826 = __p0_826; \ + int16x8_t __s1_826 = __p1_826; \ + int8x8_t __rev0_826; __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_826; __rev1_826 = __builtin_shufflevector(__s1_826, __s1_826, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_826; \ + __ret_826 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_826), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_826, __p2_826)))); \ + __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_826; \ +}) +#endif + +#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); + return __ret; +} +__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); + return __ret; +} +__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); + return __ret; +} +__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vsqrt_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vst1_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ +}) +#else +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ +}) +#else +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ +}) +#endif + +#define vst1_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ +}) +#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ +}) +#else +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ +}) +#else +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) +#endif + +#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst2_p64(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) +#else +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) +#endif + +#define vst2_f64(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ +}) +#else +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ +}) +#else +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ +}) +#else +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ +}) +#else +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ +}) +#else +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ +}) +#else +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ +}) +#else +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ +}) +#endif + +#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ +}) +#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ +}) +#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ +}) +#define vst3_p64(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) +#else +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#define vst3_f64(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ +}) +#else +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ +}) +#else +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ +}) +#else +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ +}) +#else +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ +}) +#else +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ +}) +#else +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ +}) +#else +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ +}) +#endif + +#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ +}) +#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ +}) +#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ +}) +#define vst4_p64(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#define vst4_f64(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ +}) +#else +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ +}) +#else +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ +}) +#else +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ +}) +#else +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ +}) +#else +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ +}) +#else +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ +}) +#else +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ +}) +#endif + +#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +}) +#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ +}) +#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ +}) +#define vstrq_p128(__p0, __p1) __extension__ ({ \ + poly128_t __s1 = __p1; \ + __builtin_neon_vstrq_p128(__p0, __s1); \ +}) +__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); + return __ret; +} +__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_laneq_s32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ + int32x4_t __s0_827 = __p0_827; \ + int8x16_t __s1_827 = __p1_827; \ + uint8x16_t __s2_827 = __p2_827; \ + int32x4_t __ret_827; \ +uint8x16_t __reint_827 = __s2_827; \ + __ret_827 = vusdotq_s32(__s0_827, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_827, __p3_827)), __s1_827); \ + __ret_827; \ +}) +#else +#define vsudotq_laneq_s32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ + int32x4_t __s0_828 = __p0_828; \ + int8x16_t __s1_828 = __p1_828; \ + uint8x16_t __s2_828 = __p2_828; \ + int32x4_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \ + int8x16_t __rev1_828; __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_828; __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_828; \ +uint8x16_t __reint_828 = __rev2_828; \ + __ret_828 = __noswap_vusdotq_s32(__rev0_828, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_828, __p3_828)), __rev1_828); \ + __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 3, 2, 1, 0); \ + __ret_828; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ + int32x2_t __s0_829 = __p0_829; \ + int8x8_t __s1_829 = __p1_829; \ + uint8x16_t __s2_829 = __p2_829; \ + int32x2_t __ret_829; \ +uint8x16_t __reint_829 = __s2_829; \ + __ret_829 = vusdot_s32(__s0_829, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_829, __p3_829)), __s1_829); \ + __ret_829; \ +}) +#else +#define vsudot_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ + int32x2_t __s0_830 = __p0_830; \ + int8x8_t __s1_830 = __p1_830; \ + uint8x16_t __s2_830 = __p2_830; \ + int32x2_t __rev0_830; __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 1, 0); \ + int8x8_t __rev1_830; __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_830; __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x2_t __ret_830; \ +uint8x16_t __reint_830 = __rev2_830; \ + __ret_830 = __noswap_vusdot_s32(__rev0_830, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_830, __p3_830)), __rev1_830); \ + __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 1, 0); \ + __ret_830; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); + return __ret; +} +__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); + return __ret; +} +__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); + return __ret; +} +__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); + return __ret; +} +__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); + return __ret; +} +__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_laneq_s32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ + int32x4_t __s0_831 = __p0_831; \ + uint8x16_t __s1_831 = __p1_831; \ + int8x16_t __s2_831 = __p2_831; \ + int32x4_t __ret_831; \ +int8x16_t __reint_831 = __s2_831; \ + __ret_831 = vusdotq_s32(__s0_831, __s1_831, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_831, __p3_831))); \ + __ret_831; \ +}) +#else +#define vusdotq_laneq_s32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ + int32x4_t __s0_832 = __p0_832; \ + uint8x16_t __s1_832 = __p1_832; \ + int8x16_t __s2_832 = __p2_832; \ + int32x4_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \ + uint8x16_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_832; __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_832; \ +int8x16_t __reint_832 = __rev2_832; \ + __ret_832 = __noswap_vusdotq_s32(__rev0_832, __rev1_832, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_832, __p3_832))); \ + __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 3, 2, 1, 0); \ + __ret_832; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_laneq_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ + int32x2_t __s0_833 = __p0_833; \ + uint8x8_t __s1_833 = __p1_833; \ + int8x16_t __s2_833 = __p2_833; \ + int32x2_t __ret_833; \ +int8x16_t __reint_833 = __s2_833; \ + __ret_833 = vusdot_s32(__s0_833, __s1_833, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_833, __p3_833))); \ + __ret_833; \ +}) +#else +#define vusdot_laneq_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ + int32x2_t __s0_834 = __p0_834; \ + uint8x8_t __s1_834 = __p1_834; \ + int8x16_t __s2_834 = __p2_834; \ + int32x2_t __rev0_834; __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \ + uint8x8_t __rev1_834; __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_834; __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x2_t __ret_834; \ +int8x16_t __reint_834 = __rev2_834; \ + __ret_834 = __noswap_vusdot_s32(__rev0_834, __rev1_834, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_834, __p3_834))); \ + __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \ + __ret_834; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + vabdq_u8(__p1, __p2); + return __ret; +} +#else +__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdq_u32(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdq_u16(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + vabdq_s8(__p1, __p2); + return __ret; +} +#else +__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdq_s32(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdq_s16(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + vabd_u8(__p1, __p2); + return __ret; +} +#else +__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + vabd_u32(__p1, __p2); + return __ret; +} +#else +__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + vabd_u16(__p1, __p2); + return __ret; +} +#else +__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + vabd_s8(__p1, __p2); + return __ret; +} +#else +__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + vabd_s32(__p1, __p2); + return __ret; +} +#else +__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + vabd_s16(__p1, __p2); + return __ret; +} +#else +__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); + return __ret; +} +#else +__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); + return __ret; +} +#else +__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); + return __ret; +} +#else +__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); + return __ret; +} +#else +__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); + return __ret; +} +#else +__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); + return __ret; +} +#else +__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) + vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) + vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) + vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) + vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) + vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) + vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f16(__p0_835, __p1_835) __extension__ ({ \ + float16x4_t __s0_835 = __p0_835; \ + float16_t __ret_835; \ +float16x4_t __reint_835 = __s0_835; \ +int16_t __reint1_835 = vget_lane_s16(*(int16x4_t *) &__reint_835, __p1_835); \ + __ret_835 = *(float16_t *) &__reint1_835; \ + __ret_835; \ +}) +#else +#define vget_lane_f16(__p0_836, __p1_836) __extension__ ({ \ + float16x4_t __s0_836 = __p0_836; \ + float16x4_t __rev0_836; __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 3, 2, 1, 0); \ + float16_t __ret_836; \ +float16x4_t __reint_836 = __rev0_836; \ +int16_t __reint1_836 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_836, __p1_836); \ + __ret_836 = *(float16_t *) &__reint1_836; \ + __ret_836; \ +}) +#define __noswap_vget_lane_f16(__p0_837, __p1_837) __extension__ ({ \ + float16x4_t __s0_837 = __p0_837; \ + float16_t __ret_837; \ +float16x4_t __reint_837 = __s0_837; \ +int16_t __reint1_837 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_837, __p1_837); \ + __ret_837 = *(float16_t *) &__reint1_837; \ + __ret_837; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f16(__p0_838, __p1_838) __extension__ ({ \ + float16x8_t __s0_838 = __p0_838; \ + float16_t __ret_838; \ +float16x8_t __reint_838 = __s0_838; \ +int16_t __reint1_838 = vgetq_lane_s16(*(int16x8_t *) &__reint_838, __p1_838); \ + __ret_838 = *(float16_t *) &__reint1_838; \ + __ret_838; \ +}) +#else +#define vgetq_lane_f16(__p0_839, __p1_839) __extension__ ({ \ + float16x8_t __s0_839 = __p0_839; \ + float16x8_t __rev0_839; __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret_839; \ +float16x8_t __reint_839 = __rev0_839; \ +int16_t __reint1_839 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_839, __p1_839); \ + __ret_839 = *(float16_t *) &__reint1_839; \ + __ret_839; \ +}) +#define __noswap_vgetq_lane_f16(__p0_840, __p1_840) __extension__ ({ \ + float16x8_t __s0_840 = __p0_840; \ + float16_t __ret_840; \ +float16x8_t __reint_840 = __s0_840; \ +int16_t __reint1_840 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_840, __p1_840); \ + __ret_840 = *(float16_t *) &__reint1_840; \ + __ret_840; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ + uint64x2_t __s0_841 = __p0_841; \ + uint32x2_t __s1_841 = __p1_841; \ + uint32x2_t __s2_841 = __p2_841; \ + uint64x2_t __ret_841; \ + __ret_841 = __s0_841 + vmull_u32(__s1_841, splat_lane_u32(__s2_841, __p3_841)); \ + __ret_841; \ +}) +#else +#define vmlal_lane_u32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ + uint64x2_t __s0_842 = __p0_842; \ + uint32x2_t __s1_842 = __p1_842; \ + uint32x2_t __s2_842 = __p2_842; \ + uint64x2_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \ + uint32x2_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \ + uint32x2_t __rev2_842; __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \ + uint64x2_t __ret_842; \ + __ret_842 = __rev0_842 + __noswap_vmull_u32(__rev1_842, __noswap_splat_lane_u32(__rev2_842, __p3_842)); \ + __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \ + __ret_842; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u16(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ + uint32x4_t __s0_843 = __p0_843; \ + uint16x4_t __s1_843 = __p1_843; \ + uint16x4_t __s2_843 = __p2_843; \ + uint32x4_t __ret_843; \ + __ret_843 = __s0_843 + vmull_u16(__s1_843, splat_lane_u16(__s2_843, __p3_843)); \ + __ret_843; \ +}) +#else +#define vmlal_lane_u16(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ + uint32x4_t __s0_844 = __p0_844; \ + uint16x4_t __s1_844 = __p1_844; \ + uint16x4_t __s2_844 = __p2_844; \ + uint32x4_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \ + uint16x4_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 3, 2, 1, 0); \ + uint16x4_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 3, 2, 1, 0); \ + uint32x4_t __ret_844; \ + __ret_844 = __rev0_844 + __noswap_vmull_u16(__rev1_844, __noswap_splat_lane_u16(__rev2_844, __p3_844)); \ + __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \ + __ret_844; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ + int64x2_t __s0_845 = __p0_845; \ + int32x2_t __s1_845 = __p1_845; \ + int32x2_t __s2_845 = __p2_845; \ + int64x2_t __ret_845; \ + __ret_845 = __s0_845 + vmull_s32(__s1_845, splat_lane_s32(__s2_845, __p3_845)); \ + __ret_845; \ +}) +#else +#define vmlal_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ + int64x2_t __s0_846 = __p0_846; \ + int32x2_t __s1_846 = __p1_846; \ + int32x2_t __s2_846 = __p2_846; \ + int64x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ + int32x2_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \ + int32x2_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \ + int64x2_t __ret_846; \ + __ret_846 = __rev0_846 + __noswap_vmull_s32(__rev1_846, __noswap_splat_lane_s32(__rev2_846, __p3_846)); \ + __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ + __ret_846; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s16(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \ + int32x4_t __s0_847 = __p0_847; \ + int16x4_t __s1_847 = __p1_847; \ + int16x4_t __s2_847 = __p2_847; \ + int32x4_t __ret_847; \ + __ret_847 = __s0_847 + vmull_s16(__s1_847, splat_lane_s16(__s2_847, __p3_847)); \ + __ret_847; \ +}) +#else +#define vmlal_lane_s16(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \ + int32x4_t __s0_848 = __p0_848; \ + int16x4_t __s1_848 = __p1_848; \ + int16x4_t __s2_848 = __p2_848; \ + int32x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ + int16x4_t __rev1_848; __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \ + int16x4_t __rev2_848; __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 3, 2, 1, 0); \ + int32x4_t __ret_848; \ + __ret_848 = __rev0_848 + __noswap_vmull_s16(__rev1_848, __noswap_splat_lane_s16(__rev2_848, __p3_848)); \ + __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \ + __ret_848; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \ + uint64x2_t __s0_849 = __p0_849; \ + uint32x2_t __s1_849 = __p1_849; \ + uint32x2_t __s2_849 = __p2_849; \ + uint64x2_t __ret_849; \ + __ret_849 = __s0_849 - vmull_u32(__s1_849, splat_lane_u32(__s2_849, __p3_849)); \ + __ret_849; \ +}) +#else +#define vmlsl_lane_u32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \ + uint64x2_t __s0_850 = __p0_850; \ + uint32x2_t __s1_850 = __p1_850; \ + uint32x2_t __s2_850 = __p2_850; \ + uint64x2_t __rev0_850; __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \ + uint32x2_t __rev1_850; __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 1, 0); \ + uint32x2_t __rev2_850; __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 1, 0); \ + uint64x2_t __ret_850; \ + __ret_850 = __rev0_850 - __noswap_vmull_u32(__rev1_850, __noswap_splat_lane_u32(__rev2_850, __p3_850)); \ + __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \ + __ret_850; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u16(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \ + uint32x4_t __s0_851 = __p0_851; \ + uint16x4_t __s1_851 = __p1_851; \ + uint16x4_t __s2_851 = __p2_851; \ + uint32x4_t __ret_851; \ + __ret_851 = __s0_851 - vmull_u16(__s1_851, splat_lane_u16(__s2_851, __p3_851)); \ + __ret_851; \ +}) +#else +#define vmlsl_lane_u16(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \ + uint32x4_t __s0_852 = __p0_852; \ + uint16x4_t __s1_852 = __p1_852; \ + uint16x4_t __s2_852 = __p2_852; \ + uint32x4_t __rev0_852; __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \ + uint16x4_t __rev1_852; __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 3, 2, 1, 0); \ + uint16x4_t __rev2_852; __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 3, 2, 1, 0); \ + uint32x4_t __ret_852; \ + __ret_852 = __rev0_852 - __noswap_vmull_u16(__rev1_852, __noswap_splat_lane_u16(__rev2_852, __p3_852)); \ + __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \ + __ret_852; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ + int64x2_t __s0_853 = __p0_853; \ + int32x2_t __s1_853 = __p1_853; \ + int32x2_t __s2_853 = __p2_853; \ + int64x2_t __ret_853; \ + __ret_853 = __s0_853 - vmull_s32(__s1_853, splat_lane_s32(__s2_853, __p3_853)); \ + __ret_853; \ +}) +#else +#define vmlsl_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ + int64x2_t __s0_854 = __p0_854; \ + int32x2_t __s1_854 = __p1_854; \ + int32x2_t __s2_854 = __p2_854; \ + int64x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ + int32x2_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \ + int32x2_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \ + int64x2_t __ret_854; \ + __ret_854 = __rev0_854 - __noswap_vmull_s32(__rev1_854, __noswap_splat_lane_s32(__rev2_854, __p3_854)); \ + __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ + __ret_854; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ + int32x4_t __s0_855 = __p0_855; \ + int16x4_t __s1_855 = __p1_855; \ + int16x4_t __s2_855 = __p2_855; \ + int32x4_t __ret_855; \ + __ret_855 = __s0_855 - vmull_s16(__s1_855, splat_lane_s16(__s2_855, __p3_855)); \ + __ret_855; \ +}) +#else +#define vmlsl_lane_s16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ + int32x4_t __s0_856 = __p0_856; \ + int16x4_t __s1_856 = __p1_856; \ + int16x4_t __s2_856 = __p2_856; \ + int32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ + int16x4_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \ + int16x4_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \ + int32x4_t __ret_856; \ + __ret_856 = __rev0_856 - __noswap_vmull_s16(__rev1_856, __noswap_splat_lane_s16(__rev2_856, __p3_856)); \ + __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ + __ret_856; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f16(__p0_857, __p1_857, __p2_857) __extension__ ({ \ + float16_t __s0_857 = __p0_857; \ + float16x4_t __s1_857 = __p1_857; \ + float16x4_t __ret_857; \ +float16_t __reint_857 = __s0_857; \ +float16x4_t __reint1_857 = __s1_857; \ +int16x4_t __reint2_857 = vset_lane_s16(*(int16_t *) &__reint_857, *(int16x4_t *) &__reint1_857, __p2_857); \ + __ret_857 = *(float16x4_t *) &__reint2_857; \ + __ret_857; \ +}) +#else +#define vset_lane_f16(__p0_858, __p1_858, __p2_858) __extension__ ({ \ + float16_t __s0_858 = __p0_858; \ + float16x4_t __s1_858 = __p1_858; \ + float16x4_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 3, 2, 1, 0); \ + float16x4_t __ret_858; \ +float16_t __reint_858 = __s0_858; \ +float16x4_t __reint1_858 = __rev1_858; \ +int16x4_t __reint2_858 = __noswap_vset_lane_s16(*(int16_t *) &__reint_858, *(int16x4_t *) &__reint1_858, __p2_858); \ + __ret_858 = *(float16x4_t *) &__reint2_858; \ + __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 3, 2, 1, 0); \ + __ret_858; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f16(__p0_859, __p1_859, __p2_859) __extension__ ({ \ + float16_t __s0_859 = __p0_859; \ + float16x8_t __s1_859 = __p1_859; \ + float16x8_t __ret_859; \ +float16_t __reint_859 = __s0_859; \ +float16x8_t __reint1_859 = __s1_859; \ +int16x8_t __reint2_859 = vsetq_lane_s16(*(int16_t *) &__reint_859, *(int16x8_t *) &__reint1_859, __p2_859); \ + __ret_859 = *(float16x8_t *) &__reint2_859; \ + __ret_859; \ +}) +#else +#define vsetq_lane_f16(__p0_860, __p1_860, __p2_860) __extension__ ({ \ + float16_t __s0_860 = __p0_860; \ + float16x8_t __s1_860 = __p1_860; \ + float16x8_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_860; \ +float16_t __reint_860 = __s0_860; \ +float16x8_t __reint1_860 = __rev1_860; \ +int16x8_t __reint2_860 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_860, *(int16x8_t *) &__reint1_860, __p2_860); \ + __ret_860 = *(float16x8_t *) &__reint2_860; \ + __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_860; \ +}) +#endif + +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_lane_f32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ + float32x4_t __s0_861 = __p0_861; \ + bfloat16x8_t __s1_861 = __p1_861; \ + bfloat16x4_t __s2_861 = __p2_861; \ + float32x4_t __ret_861; \ + __ret_861 = vbfmlalbq_f32(__s0_861, __s1_861, (bfloat16x8_t) {vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861), vget_lane_bf16(__s2_861, __p3_861)}); \ + __ret_861; \ +}) +#else +#define vbfmlalbq_lane_f32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ + float32x4_t __s0_862 = __p0_862; \ + bfloat16x8_t __s1_862 = __p1_862; \ + bfloat16x4_t __s2_862 = __p2_862; \ + float32x4_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 3, 2, 1, 0); \ + float32x4_t __ret_862; \ + __ret_862 = __noswap_vbfmlalbq_f32(__rev0_862, __rev1_862, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862), __noswap_vget_lane_bf16(__rev2_862, __p3_862)}); \ + __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 3, 2, 1, 0); \ + __ret_862; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_laneq_f32(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ + float32x4_t __s0_863 = __p0_863; \ + bfloat16x8_t __s1_863 = __p1_863; \ + bfloat16x8_t __s2_863 = __p2_863; \ + float32x4_t __ret_863; \ + __ret_863 = vbfmlalbq_f32(__s0_863, __s1_863, (bfloat16x8_t) {vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863), vgetq_lane_bf16(__s2_863, __p3_863)}); \ + __ret_863; \ +}) +#else +#define vbfmlalbq_laneq_f32(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ + float32x4_t __s0_864 = __p0_864; \ + bfloat16x8_t __s1_864 = __p1_864; \ + bfloat16x8_t __s2_864 = __p2_864; \ + float32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_864; \ + __ret_864 = __noswap_vbfmlalbq_f32(__rev0_864, __rev1_864, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864), __noswap_vgetq_lane_bf16(__rev2_864, __p3_864)}); \ + __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ + __ret_864; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_lane_f32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ + float32x4_t __s0_865 = __p0_865; \ + bfloat16x8_t __s1_865 = __p1_865; \ + bfloat16x4_t __s2_865 = __p2_865; \ + float32x4_t __ret_865; \ + __ret_865 = vbfmlaltq_f32(__s0_865, __s1_865, (bfloat16x8_t) {vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865), vget_lane_bf16(__s2_865, __p3_865)}); \ + __ret_865; \ +}) +#else +#define vbfmlaltq_lane_f32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ + float32x4_t __s0_866 = __p0_866; \ + bfloat16x8_t __s1_866 = __p1_866; \ + bfloat16x4_t __s2_866 = __p2_866; \ + float32x4_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 3, 2, 1, 0); \ + float32x4_t __ret_866; \ + __ret_866 = __noswap_vbfmlaltq_f32(__rev0_866, __rev1_866, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866), __noswap_vget_lane_bf16(__rev2_866, __p3_866)}); \ + __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 3, 2, 1, 0); \ + __ret_866; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_laneq_f32(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ + float32x4_t __s0_867 = __p0_867; \ + bfloat16x8_t __s1_867 = __p1_867; \ + bfloat16x8_t __s2_867 = __p2_867; \ + float32x4_t __ret_867; \ + __ret_867 = vbfmlaltq_f32(__s0_867, __s1_867, (bfloat16x8_t) {vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867), vgetq_lane_bf16(__s2_867, __p3_867)}); \ + __ret_867; \ +}) +#else +#define vbfmlaltq_laneq_f32(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ + float32x4_t __s0_868 = __p0_868; \ + bfloat16x8_t __s1_868 = __p1_868; \ + bfloat16x8_t __s2_868 = __p2_868; \ + float32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_868; \ + __ret_868 = __noswap_vbfmlaltq_f32(__rev0_868, __rev1_868, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868), __noswap_vgetq_lane_bf16(__rev2_868, __p3_868)}); \ + __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ + __ret_868; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); + return __ret; +} +#else +__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); + return __ret; +} +#else +__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_high_f16(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \ + float32x4_t __s0_869 = __p0_869; \ + float16x8_t __s1_869 = __p1_869; \ + float16x4_t __s2_869 = __p2_869; \ + float32x4_t __ret_869; \ + __ret_869 = vfmlalq_high_f16(__s0_869, __s1_869, (float16x8_t) {vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869), vget_lane_f16(__s2_869, __p3_869)}); \ + __ret_869; \ +}) +#else +#define vfmlalq_lane_high_f16(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \ + float32x4_t __s0_870 = __p0_870; \ + float16x8_t __s1_870 = __p1_870; \ + float16x4_t __s2_870 = __p2_870; \ + float32x4_t __rev0_870; __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 3, 2, 1, 0); \ + float16x8_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_870; __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 3, 2, 1, 0); \ + float32x4_t __ret_870; \ + __ret_870 = __noswap_vfmlalq_high_f16(__rev0_870, __rev1_870, (float16x8_t) {__noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870), __noswap_vget_lane_f16(__rev2_870, __p3_870)}); \ + __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \ + __ret_870; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_high_f16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \ + float32x2_t __s0_871 = __p0_871; \ + float16x4_t __s1_871 = __p1_871; \ + float16x4_t __s2_871 = __p2_871; \ + float32x2_t __ret_871; \ + __ret_871 = vfmlal_high_f16(__s0_871, __s1_871, (float16x4_t) {vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871), vget_lane_f16(__s2_871, __p3_871)}); \ + __ret_871; \ +}) +#else +#define vfmlal_lane_high_f16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \ + float32x2_t __s0_872 = __p0_872; \ + float16x4_t __s1_872 = __p1_872; \ + float16x4_t __s2_872 = __p2_872; \ + float32x2_t __rev0_872; __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 1, 0); \ + float16x4_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \ + float16x4_t __rev2_872; __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \ + float32x2_t __ret_872; \ + __ret_872 = __noswap_vfmlal_high_f16(__rev0_872, __rev1_872, (float16x4_t) {__noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872), __noswap_vget_lane_f16(__rev2_872, __p3_872)}); \ + __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 1, 0); \ + __ret_872; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_low_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ + float32x4_t __s0_873 = __p0_873; \ + float16x8_t __s1_873 = __p1_873; \ + float16x4_t __s2_873 = __p2_873; \ + float32x4_t __ret_873; \ + __ret_873 = vfmlalq_low_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \ + __ret_873; \ +}) +#else +#define vfmlalq_lane_low_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ + float32x4_t __s0_874 = __p0_874; \ + float16x8_t __s1_874 = __p1_874; \ + float16x4_t __s2_874 = __p2_874; \ + float32x4_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \ + float16x8_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \ + float32x4_t __ret_874; \ + __ret_874 = __noswap_vfmlalq_low_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \ + __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \ + __ret_874; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_low_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ + float32x2_t __s0_875 = __p0_875; \ + float16x4_t __s1_875 = __p1_875; \ + float16x4_t __s2_875 = __p2_875; \ + float32x2_t __ret_875; \ + __ret_875 = vfmlal_low_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \ + __ret_875; \ +}) +#else +#define vfmlal_lane_low_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ + float32x2_t __s0_876 = __p0_876; \ + float16x4_t __s1_876 = __p1_876; \ + float16x4_t __s2_876 = __p2_876; \ + float32x2_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 1, 0); \ + float16x4_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \ + float16x4_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \ + float32x2_t __ret_876; \ + __ret_876 = __noswap_vfmlal_low_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \ + __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 1, 0); \ + __ret_876; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_high_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ + float32x4_t __s0_877 = __p0_877; \ + float16x8_t __s1_877 = __p1_877; \ + float16x8_t __s2_877 = __p2_877; \ + float32x4_t __ret_877; \ + __ret_877 = vfmlalq_high_f16(__s0_877, __s1_877, (float16x8_t) {vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877), vgetq_lane_f16(__s2_877, __p3_877)}); \ + __ret_877; \ +}) +#else +#define vfmlalq_laneq_high_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ + float32x4_t __s0_878 = __p0_878; \ + float16x8_t __s1_878 = __p1_878; \ + float16x8_t __s2_878 = __p2_878; \ + float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \ + float16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_878; \ + __ret_878 = __noswap_vfmlalq_high_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878), __noswap_vgetq_lane_f16(__rev2_878, __p3_878)}); \ + __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \ + __ret_878; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_high_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ + float32x2_t __s0_879 = __p0_879; \ + float16x4_t __s1_879 = __p1_879; \ + float16x8_t __s2_879 = __p2_879; \ + float32x2_t __ret_879; \ + __ret_879 = vfmlal_high_f16(__s0_879, __s1_879, (float16x4_t) {vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879), vgetq_lane_f16(__s2_879, __p3_879)}); \ + __ret_879; \ +}) +#else +#define vfmlal_laneq_high_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ + float32x2_t __s0_880 = __p0_880; \ + float16x4_t __s1_880 = __p1_880; \ + float16x8_t __s2_880 = __p2_880; \ + float32x2_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 1, 0); \ + float16x4_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \ + float16x8_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x2_t __ret_880; \ + __ret_880 = __noswap_vfmlal_high_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880), __noswap_vgetq_lane_f16(__rev2_880, __p3_880)}); \ + __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 1, 0); \ + __ret_880; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_low_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ + float32x4_t __s0_881 = __p0_881; \ + float16x8_t __s1_881 = __p1_881; \ + float16x8_t __s2_881 = __p2_881; \ + float32x4_t __ret_881; \ + __ret_881 = vfmlalq_low_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \ + __ret_881; \ +}) +#else +#define vfmlalq_laneq_low_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ + float32x4_t __s0_882 = __p0_882; \ + float16x8_t __s1_882 = __p1_882; \ + float16x8_t __s2_882 = __p2_882; \ + float32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ + float16x8_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_882; \ + __ret_882 = __noswap_vfmlalq_low_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \ + __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ + __ret_882; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_low_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ + float32x2_t __s0_883 = __p0_883; \ + float16x4_t __s1_883 = __p1_883; \ + float16x8_t __s2_883 = __p2_883; \ + float32x2_t __ret_883; \ + __ret_883 = vfmlal_low_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \ + __ret_883; \ +}) +#else +#define vfmlal_laneq_low_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ + float32x2_t __s0_884 = __p0_884; \ + float16x4_t __s1_884 = __p1_884; \ + float16x8_t __s2_884 = __p2_884; \ + float32x2_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \ + float16x4_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 3, 2, 1, 0); \ + float16x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x2_t __ret_884; \ + __ret_884 = __noswap_vfmlal_low_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \ + __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \ + __ret_884; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ + float32x4_t __s0_885 = __p0_885; \ + float16x8_t __s1_885 = __p1_885; \ + float16x4_t __s2_885 = __p2_885; \ + float32x4_t __ret_885; \ + __ret_885 = vfmlslq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \ + __ret_885; \ +}) +#else +#define vfmlslq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ + float32x4_t __s0_886 = __p0_886; \ + float16x8_t __s1_886 = __p1_886; \ + float16x4_t __s2_886 = __p2_886; \ + float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \ + float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \ + float32x4_t __ret_886; \ + __ret_886 = __noswap_vfmlslq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \ + __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \ + __ret_886; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ + float32x2_t __s0_887 = __p0_887; \ + float16x4_t __s1_887 = __p1_887; \ + float16x4_t __s2_887 = __p2_887; \ + float32x2_t __ret_887; \ + __ret_887 = vfmlsl_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \ + __ret_887; \ +}) +#else +#define vfmlsl_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ + float32x2_t __s0_888 = __p0_888; \ + float16x4_t __s1_888 = __p1_888; \ + float16x4_t __s2_888 = __p2_888; \ + float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ + float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \ + float16x4_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \ + float32x2_t __ret_888; \ + __ret_888 = __noswap_vfmlsl_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \ + __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ + __ret_888; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ + float32x4_t __s0_889 = __p0_889; \ + float16x8_t __s1_889 = __p1_889; \ + float16x4_t __s2_889 = __p2_889; \ + float32x4_t __ret_889; \ + __ret_889 = vfmlslq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \ + __ret_889; \ +}) +#else +#define vfmlslq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ + float32x4_t __s0_890 = __p0_890; \ + float16x8_t __s1_890 = __p1_890; \ + float16x4_t __s2_890 = __p2_890; \ + float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \ + float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \ + float32x4_t __ret_890; \ + __ret_890 = __noswap_vfmlslq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \ + __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \ + __ret_890; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ + float32x2_t __s0_891 = __p0_891; \ + float16x4_t __s1_891 = __p1_891; \ + float16x4_t __s2_891 = __p2_891; \ + float32x2_t __ret_891; \ + __ret_891 = vfmlsl_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \ + __ret_891; \ +}) +#else +#define vfmlsl_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ + float32x2_t __s0_892 = __p0_892; \ + float16x4_t __s1_892 = __p1_892; \ + float16x4_t __s2_892 = __p2_892; \ + float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ + float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \ + float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \ + float32x2_t __ret_892; \ + __ret_892 = __noswap_vfmlsl_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \ + __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ + __ret_892; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ + float32x4_t __s0_893 = __p0_893; \ + float16x8_t __s1_893 = __p1_893; \ + float16x8_t __s2_893 = __p2_893; \ + float32x4_t __ret_893; \ + __ret_893 = vfmlslq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \ + __ret_893; \ +}) +#else +#define vfmlslq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ + float32x4_t __s0_894 = __p0_894; \ + float16x8_t __s1_894 = __p1_894; \ + float16x8_t __s2_894 = __p2_894; \ + float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \ + float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_894; \ + __ret_894 = __noswap_vfmlslq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \ + __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \ + __ret_894; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ + float32x2_t __s0_895 = __p0_895; \ + float16x4_t __s1_895 = __p1_895; \ + float16x8_t __s2_895 = __p2_895; \ + float32x2_t __ret_895; \ + __ret_895 = vfmlsl_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \ + __ret_895; \ +}) +#else +#define vfmlsl_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ + float32x2_t __s0_896 = __p0_896; \ + float16x4_t __s1_896 = __p1_896; \ + float16x8_t __s2_896 = __p2_896; \ + float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \ + float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \ + float16x8_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x2_t __ret_896; \ + __ret_896 = __noswap_vfmlsl_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \ + __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \ + __ret_896; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ + float32x4_t __s0_897 = __p0_897; \ + float16x8_t __s1_897 = __p1_897; \ + float16x8_t __s2_897 = __p2_897; \ + float32x4_t __ret_897; \ + __ret_897 = vfmlslq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \ + __ret_897; \ +}) +#else +#define vfmlslq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ + float32x4_t __s0_898 = __p0_898; \ + float16x8_t __s1_898 = __p1_898; \ + float16x8_t __s2_898 = __p2_898; \ + float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \ + float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x4_t __ret_898; \ + __ret_898 = __noswap_vfmlslq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \ + __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \ + __ret_898; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \ + float32x2_t __s0_899 = __p0_899; \ + float16x4_t __s1_899 = __p1_899; \ + float16x8_t __s2_899 = __p2_899; \ + float32x2_t __ret_899; \ + __ret_899 = vfmlsl_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \ + __ret_899; \ +}) +#else +#define vfmlsl_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \ + float32x2_t __s0_900 = __p0_900; \ + float16x4_t __s1_900 = __p1_900; \ + float16x8_t __s2_900 = __p2_900; \ + float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \ + float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \ + float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \ + float32x2_t __ret_900; \ + __ret_900 = __noswap_vfmlsl_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \ + __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \ + __ret_900; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vmulh_lane_f16(__p0_901, __p1_901, __p2_901) __extension__ ({ \ + float16_t __s0_901 = __p0_901; \ + float16x4_t __s1_901 = __p1_901; \ + float16_t __ret_901; \ + __ret_901 = __s0_901 * vget_lane_f16(__s1_901, __p2_901); \ + __ret_901; \ +}) +#else +#define vmulh_lane_f16(__p0_902, __p1_902, __p2_902) __extension__ ({ \ + float16_t __s0_902 = __p0_902; \ + float16x4_t __s1_902 = __p1_902; \ + float16x4_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 3, 2, 1, 0); \ + float16_t __ret_902; \ + __ret_902 = __s0_902 * __noswap_vget_lane_f16(__rev1_902, __p2_902); \ + __ret_902; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_laneq_f16(__p0_903, __p1_903, __p2_903) __extension__ ({ \ + float16_t __s0_903 = __p0_903; \ + float16x8_t __s1_903 = __p1_903; \ + float16_t __ret_903; \ + __ret_903 = __s0_903 * vgetq_lane_f16(__s1_903, __p2_903); \ + __ret_903; \ +}) +#else +#define vmulh_laneq_f16(__p0_904, __p1_904, __p2_904) __extension__ ({ \ + float16_t __s0_904 = __p0_904; \ + float16x8_t __s1_904 = __p1_904; \ + float16x8_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret_904; \ + __ret_904 = __s0_904 * __noswap_vgetq_lane_f16(__rev1_904, __p2_904); \ + __ret_904; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_MATMUL_INT8) +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_lane_s32(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ + int32x4_t __s0_905 = __p0_905; \ + int8x16_t __s1_905 = __p1_905; \ + uint8x8_t __s2_905 = __p2_905; \ + int32x4_t __ret_905; \ +uint8x8_t __reint_905 = __s2_905; \ + __ret_905 = vusdotq_s32(__s0_905, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_905, __p3_905)), __s1_905); \ + __ret_905; \ +}) +#else +#define vsudotq_lane_s32(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ + int32x4_t __s0_906 = __p0_906; \ + int8x16_t __s1_906 = __p1_906; \ + uint8x8_t __s2_906 = __p2_906; \ + int32x4_t __rev0_906; __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \ + int8x16_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_906; __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_906; \ +uint8x8_t __reint_906 = __rev2_906; \ + __ret_906 = __noswap_vusdotq_s32(__rev0_906, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_906, __p3_906)), __rev1_906); \ + __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \ + __ret_906; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_lane_s32(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ + int32x2_t __s0_907 = __p0_907; \ + int8x8_t __s1_907 = __p1_907; \ + uint8x8_t __s2_907 = __p2_907; \ + int32x2_t __ret_907; \ +uint8x8_t __reint_907 = __s2_907; \ + __ret_907 = vusdot_s32(__s0_907, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_907, __p3_907)), __s1_907); \ + __ret_907; \ +}) +#else +#define vsudot_lane_s32(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ + int32x2_t __s0_908 = __p0_908; \ + int8x8_t __s1_908 = __p1_908; \ + uint8x8_t __s2_908 = __p2_908; \ + int32x2_t __rev0_908; __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \ + int8x8_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_908; __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x2_t __ret_908; \ +uint8x8_t __reint_908 = __rev2_908; \ + __ret_908 = __noswap_vusdot_s32(__rev0_908, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_908, __p3_908)), __rev1_908); \ + __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \ + __ret_908; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) +__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2)); + return __ret; +} +__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2)); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_lane_s32(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ + int32_t __s0_909 = __p0_909; \ + int32_t __s1_909 = __p1_909; \ + int32x2_t __s2_909 = __p2_909; \ + int32_t __ret_909; \ + __ret_909 = vqadds_s32(__s0_909, vqrdmulhs_s32(__s1_909, vget_lane_s32(__s2_909, __p3_909))); \ + __ret_909; \ +}) +#else +#define vqrdmlahs_lane_s32(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ + int32_t __s0_910 = __p0_910; \ + int32_t __s1_910 = __p1_910; \ + int32x2_t __s2_910 = __p2_910; \ + int32x2_t __rev2_910; __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 1, 0); \ + int32_t __ret_910; \ + __ret_910 = vqadds_s32(__s0_910, vqrdmulhs_s32(__s1_910, __noswap_vget_lane_s32(__rev2_910, __p3_910))); \ + __ret_910; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_lane_s16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ + int16_t __s0_911 = __p0_911; \ + int16_t __s1_911 = __p1_911; \ + int16x4_t __s2_911 = __p2_911; \ + int16_t __ret_911; \ + __ret_911 = vqaddh_s16(__s0_911, vqrdmulhh_s16(__s1_911, vget_lane_s16(__s2_911, __p3_911))); \ + __ret_911; \ +}) +#else +#define vqrdmlahh_lane_s16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ + int16_t __s0_912 = __p0_912; \ + int16_t __s1_912 = __p1_912; \ + int16x4_t __s2_912 = __p2_912; \ + int16x4_t __rev2_912; __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 3, 2, 1, 0); \ + int16_t __ret_912; \ + __ret_912 = vqaddh_s16(__s0_912, vqrdmulhh_s16(__s1_912, __noswap_vget_lane_s16(__rev2_912, __p3_912))); \ + __ret_912; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_laneq_s32(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ + int32_t __s0_913 = __p0_913; \ + int32_t __s1_913 = __p1_913; \ + int32x4_t __s2_913 = __p2_913; \ + int32_t __ret_913; \ + __ret_913 = vqadds_s32(__s0_913, vqrdmulhs_s32(__s1_913, vgetq_lane_s32(__s2_913, __p3_913))); \ + __ret_913; \ +}) +#else +#define vqrdmlahs_laneq_s32(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ + int32_t __s0_914 = __p0_914; \ + int32_t __s1_914 = __p1_914; \ + int32x4_t __s2_914 = __p2_914; \ + int32x4_t __rev2_914; __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 3, 2, 1, 0); \ + int32_t __ret_914; \ + __ret_914 = vqadds_s32(__s0_914, vqrdmulhs_s32(__s1_914, __noswap_vgetq_lane_s32(__rev2_914, __p3_914))); \ + __ret_914; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_laneq_s16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ + int16_t __s0_915 = __p0_915; \ + int16_t __s1_915 = __p1_915; \ + int16x8_t __s2_915 = __p2_915; \ + int16_t __ret_915; \ + __ret_915 = vqaddh_s16(__s0_915, vqrdmulhh_s16(__s1_915, vgetq_lane_s16(__s2_915, __p3_915))); \ + __ret_915; \ +}) +#else +#define vqrdmlahh_laneq_s16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ + int16_t __s0_916 = __p0_916; \ + int16_t __s1_916 = __p1_916; \ + int16x8_t __s2_916 = __p2_916; \ + int16x8_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_916; \ + __ret_916 = vqaddh_s16(__s0_916, vqrdmulhh_s16(__s1_916, __noswap_vgetq_lane_s16(__rev2_916, __p3_916))); \ + __ret_916; \ +}) +#endif + +__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2)); + return __ret; +} +__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2)); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_lane_s32(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \ + int32_t __s0_917 = __p0_917; \ + int32_t __s1_917 = __p1_917; \ + int32x2_t __s2_917 = __p2_917; \ + int32_t __ret_917; \ + __ret_917 = vqsubs_s32(__s0_917, vqrdmulhs_s32(__s1_917, vget_lane_s32(__s2_917, __p3_917))); \ + __ret_917; \ +}) +#else +#define vqrdmlshs_lane_s32(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \ + int32_t __s0_918 = __p0_918; \ + int32_t __s1_918 = __p1_918; \ + int32x2_t __s2_918 = __p2_918; \ + int32x2_t __rev2_918; __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, 1, 0); \ + int32_t __ret_918; \ + __ret_918 = vqsubs_s32(__s0_918, vqrdmulhs_s32(__s1_918, __noswap_vget_lane_s32(__rev2_918, __p3_918))); \ + __ret_918; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_lane_s16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \ + int16_t __s0_919 = __p0_919; \ + int16_t __s1_919 = __p1_919; \ + int16x4_t __s2_919 = __p2_919; \ + int16_t __ret_919; \ + __ret_919 = vqsubh_s16(__s0_919, vqrdmulhh_s16(__s1_919, vget_lane_s16(__s2_919, __p3_919))); \ + __ret_919; \ +}) +#else +#define vqrdmlshh_lane_s16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \ + int16_t __s0_920 = __p0_920; \ + int16_t __s1_920 = __p1_920; \ + int16x4_t __s2_920 = __p2_920; \ + int16x4_t __rev2_920; __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, 3, 2, 1, 0); \ + int16_t __ret_920; \ + __ret_920 = vqsubh_s16(__s0_920, vqrdmulhh_s16(__s1_920, __noswap_vget_lane_s16(__rev2_920, __p3_920))); \ + __ret_920; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_laneq_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ + int32_t __s0_921 = __p0_921; \ + int32_t __s1_921 = __p1_921; \ + int32x4_t __s2_921 = __p2_921; \ + int32_t __ret_921; \ + __ret_921 = vqsubs_s32(__s0_921, vqrdmulhs_s32(__s1_921, vgetq_lane_s32(__s2_921, __p3_921))); \ + __ret_921; \ +}) +#else +#define vqrdmlshs_laneq_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ + int32_t __s0_922 = __p0_922; \ + int32_t __s1_922 = __p1_922; \ + int32x4_t __s2_922 = __p2_922; \ + int32x4_t __rev2_922; __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 3, 2, 1, 0); \ + int32_t __ret_922; \ + __ret_922 = vqsubs_s32(__s0_922, vqrdmulhs_s32(__s1_922, __noswap_vgetq_lane_s32(__rev2_922, __p3_922))); \ + __ret_922; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_laneq_s16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ + int16_t __s0_923 = __p0_923; \ + int16_t __s1_923 = __p1_923; \ + int16x8_t __s2_923 = __p2_923; \ + int16_t __ret_923; \ + __ret_923 = vqsubh_s16(__s0_923, vqrdmulhh_s16(__s1_923, vgetq_lane_s16(__s2_923, __p3_923))); \ + __ret_923; \ +}) +#else +#define vqrdmlshh_laneq_s16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ + int16_t __s0_924 = __p0_924; \ + int16_t __s1_924 = __p1_924; \ + int16x8_t __s2_924 = __p2_924; \ + int16x8_t __rev2_924; __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_924; \ + __ret_924 = vqsubh_s16(__s0_924, vqrdmulhh_s16(__s1_924, __noswap_vgetq_lane_s16(__rev2_924, __p3_924))); \ + __ret_924; \ +}) +#endif + +#endif +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ + poly64x2_t __s0_925 = __p0_925; \ + poly64x1_t __s2_925 = __p2_925; \ + poly64x2_t __ret_925; \ + __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \ + __ret_925; \ +}) +#else +#define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ + poly64x2_t __s0_926 = __p0_926; \ + poly64x1_t __s2_926 = __p2_926; \ + poly64x2_t __rev0_926; __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \ + poly64x2_t __ret_926; \ + __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \ + __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \ + __ret_926; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ + float64x2_t __s0_927 = __p0_927; \ + float64x1_t __s2_927 = __p2_927; \ + float64x2_t __ret_927; \ + __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \ + __ret_927; \ +}) +#else +#define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ + float64x2_t __s0_928 = __p0_928; \ + float64x1_t __s2_928 = __p2_928; \ + float64x2_t __rev0_928; __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \ + float64x2_t __ret_928; \ + __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \ + __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \ + __ret_928; \ +}) +#endif + +#define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ + poly64x1_t __s0_929 = __p0_929; \ + poly64x1_t __s2_929 = __p2_929; \ + poly64x1_t __ret_929; \ + __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \ + __ret_929; \ +}) +#define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ + float64x1_t __s0_930 = __p0_930; \ + float64x1_t __s2_930 = __p2_930; \ + float64x1_t __ret_930; \ + __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \ + __ret_930; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ + poly64x2_t __s0_931 = __p0_931; \ + poly64x2_t __s2_931 = __p2_931; \ + poly64x2_t __ret_931; \ + __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \ + __ret_931; \ +}) +#else +#define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ + poly64x2_t __s0_932 = __p0_932; \ + poly64x2_t __s2_932 = __p2_932; \ + poly64x2_t __rev0_932; __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \ + poly64x2_t __rev2_932; __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \ + poly64x2_t __ret_932; \ + __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \ + __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \ + __ret_932; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ + float64x2_t __s0_933 = __p0_933; \ + float64x2_t __s2_933 = __p2_933; \ + float64x2_t __ret_933; \ + __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \ + __ret_933; \ +}) +#else +#define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \ + float64x2_t __s0_934 = __p0_934; \ + float64x2_t __s2_934 = __p2_934; \ + float64x2_t __rev0_934; __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \ + float64x2_t __rev2_934; __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \ + float64x2_t __ret_934; \ + __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \ + __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \ + __ret_934; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \ + poly64x1_t __s0_935 = __p0_935; \ + poly64x2_t __s2_935 = __p2_935; \ + poly64x1_t __ret_935; \ + __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \ + __ret_935; \ +}) +#else +#define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \ + poly64x1_t __s0_936 = __p0_936; \ + poly64x2_t __s2_936 = __p2_936; \ + poly64x2_t __rev2_936; __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \ + poly64x1_t __ret_936; \ + __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \ + __ret_936; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \ + float64x1_t __s0_937 = __p0_937; \ + float64x2_t __s2_937 = __p2_937; \ + float64x1_t __ret_937; \ + __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \ + __ret_937; \ +}) +#else +#define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \ + float64x1_t __s0_938 = __p0_938; \ + float64x2_t __s2_938 = __p2_938; \ + float64x2_t __rev2_938; __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \ + float64x1_t __ret_938; \ + __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \ + __ret_938; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \ + float64x1_t __s0_939 = __p0_939; \ + float64x1_t __s1_939 = __p1_939; \ + float64x1_t __ret_939; \ + float64_t __x_939 = vget_lane_f64(__s0_939, 0); \ + float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \ + float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \ + __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \ + __ret_939; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \ + float64x1_t __s0_940 = __p0_940; \ + float64x2_t __s1_940 = __p1_940; \ + float64x1_t __ret_940; \ + float64_t __x_940 = vget_lane_f64(__s0_940, 0); \ + float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \ + float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \ + __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \ + __ret_940; \ +}) +#else +#define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \ + float64x1_t __s0_941 = __p0_941; \ + float64x2_t __s1_941 = __p1_941; \ + float64x2_t __rev1_941; __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \ + float64x1_t __ret_941; \ + float64_t __x_941 = vget_lane_f64(__s0_941, 0); \ + float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \ + float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \ + __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \ + __ret_941; \ +}) +#endif + +#endif +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdl_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vabdl_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdl_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdl_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vabdl_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdl_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); + return __ret; +} +#endif + +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif + +#undef __ai + +#endif /* if !defined(__ARM_NEON) */ +#endif /* ifndef __ARM_FP */ +#endif /* __ARM_NEON_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_sve.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_sve.h new file mode 100644 index 0000000..909634a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/arm_sve.h @@ -0,0 +1,24043 @@ +/*===---- arm_sve.h - ARM SVE intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_SVE_H +#define __ARM_SVE_H + +#if !defined(__ARM_FEATURE_SVE) +#error "SVE support not enabled" +#else + +#if !defined(__LITTLE_ENDIAN__) +#error "Big endian is currently not supported for arm_sve.h" +#endif +#include + +#ifdef __cplusplus +extern "C" { +#else +#include +#endif + +typedef __fp16 float16_t; +typedef float float32_t; +typedef double float64_t; +typedef __SVInt8_t svint8_t; +typedef __SVInt16_t svint16_t; +typedef __SVInt32_t svint32_t; +typedef __SVInt64_t svint64_t; +typedef __SVUint8_t svuint8_t; +typedef __SVUint16_t svuint16_t; +typedef __SVUint32_t svuint32_t; +typedef __SVUint64_t svuint64_t; +typedef __SVFloat16_t svfloat16_t; + +#if defined(__ARM_FEATURE_SVE_BF16) && !defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) +#error "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when __ARM_FEATURE_SVE_BF16 is defined" +#endif + +#if defined(__ARM_FEATURE_SVE_BF16) +typedef __SVBFloat16_t svbfloat16_t; +#endif + +#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) +#include +typedef __bf16 bfloat16_t; +#endif + +typedef __SVFloat32_t svfloat32_t; +typedef __SVFloat64_t svfloat64_t; +typedef __clang_svint8x2_t svint8x2_t; +typedef __clang_svint16x2_t svint16x2_t; +typedef __clang_svint32x2_t svint32x2_t; +typedef __clang_svint64x2_t svint64x2_t; +typedef __clang_svuint8x2_t svuint8x2_t; +typedef __clang_svuint16x2_t svuint16x2_t; +typedef __clang_svuint32x2_t svuint32x2_t; +typedef __clang_svuint64x2_t svuint64x2_t; +typedef __clang_svfloat16x2_t svfloat16x2_t; +typedef __clang_svfloat32x2_t svfloat32x2_t; +typedef __clang_svfloat64x2_t svfloat64x2_t; +typedef __clang_svint8x3_t svint8x3_t; +typedef __clang_svint16x3_t svint16x3_t; +typedef __clang_svint32x3_t svint32x3_t; +typedef __clang_svint64x3_t svint64x3_t; +typedef __clang_svuint8x3_t svuint8x3_t; +typedef __clang_svuint16x3_t svuint16x3_t; +typedef __clang_svuint32x3_t svuint32x3_t; +typedef __clang_svuint64x3_t svuint64x3_t; +typedef __clang_svfloat16x3_t svfloat16x3_t; +typedef __clang_svfloat32x3_t svfloat32x3_t; +typedef __clang_svfloat64x3_t svfloat64x3_t; +typedef __clang_svint8x4_t svint8x4_t; +typedef __clang_svint16x4_t svint16x4_t; +typedef __clang_svint32x4_t svint32x4_t; +typedef __clang_svint64x4_t svint64x4_t; +typedef __clang_svuint8x4_t svuint8x4_t; +typedef __clang_svuint16x4_t svuint16x4_t; +typedef __clang_svuint32x4_t svuint32x4_t; +typedef __clang_svuint64x4_t svuint64x4_t; +typedef __clang_svfloat16x4_t svfloat16x4_t; +typedef __clang_svfloat32x4_t svfloat32x4_t; +typedef __clang_svfloat64x4_t svfloat64x4_t; +typedef __SVBool_t svbool_t; + +#ifdef __ARM_FEATURE_SVE_BF16 +typedef __clang_svbfloat16x2_t svbfloat16x2_t; +typedef __clang_svbfloat16x3_t svbfloat16x3_t; +typedef __clang_svbfloat16x4_t svbfloat16x4_t; +#endif +enum svpattern +{ + SV_POW2 = 0, + SV_VL1 = 1, + SV_VL2 = 2, + SV_VL3 = 3, + SV_VL4 = 4, + SV_VL5 = 5, + SV_VL6 = 6, + SV_VL7 = 7, + SV_VL8 = 8, + SV_VL16 = 9, + SV_VL32 = 10, + SV_VL64 = 11, + SV_VL128 = 12, + SV_VL256 = 13, + SV_MUL4 = 29, + SV_MUL3 = 30, + SV_ALL = 31 +}; + +enum svprfop +{ + SV_PLDL1KEEP = 0, + SV_PLDL1STRM = 1, + SV_PLDL2KEEP = 2, + SV_PLDL2STRM = 3, + SV_PLDL3KEEP = 4, + SV_PLDL3STRM = 5, + SV_PSTL1KEEP = 8, + SV_PSTL1STRM = 9, + SV_PSTL2KEEP = 10, + SV_PSTL2STRM = 11, + SV_PSTL3KEEP = 12, + SV_PSTL3STRM = 13 +}; + +/* Function attributes */ +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__)) + +#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__) +#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__) +#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__) +#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__) +#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__) +#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__) +#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__) +#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__) +#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__) +#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__) +#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__) +#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__) +#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__) +#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__) +#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__) +#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__) +#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__) +#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__) +#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__) +#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__) +#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__) +#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__) +#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__) +#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__) +#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__) +#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__) +#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__) +#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__) +#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__) +#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__) +#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__) +#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__) +#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__) +#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__) +#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__) +#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__) +#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__) +#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__) +#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__) +#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__) +#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__) +#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__) +#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__) +#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__) +#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__) +#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__) +#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__) +#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__) +#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__) +#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__) +#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__) +#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__) +#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__) +#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__) +#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__) +#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__) +#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__) +#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__) +#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__) +#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__) +#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__) +#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__) +#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__) +#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__) +#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__) +#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__) +#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__) +#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__) +#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__) +#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__) +#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__) +#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__) +#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__) +#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__) +#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__) +#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__) +#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__) +#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__) +#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__) +#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__) +#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__) +#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__) +#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__) +#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__) +#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__) +#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__) +#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__) +#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__) +#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__) +#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__) +#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__) +#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__) +#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__) +#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__) +#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__) +#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__) +#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__) +#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__) +#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__) +#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__) +#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__) +#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__) +#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__) +#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__) +#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__) +#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__) +#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__) +#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__) +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__) +#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__) +__aio svint8_t svreinterpret_s8(svint8_t op) { + return __builtin_sve_reinterpret_s8_s8(op); +} + +__aio svint8_t svreinterpret_s8(svint16_t op) { + return __builtin_sve_reinterpret_s8_s16(op); +} + +__aio svint8_t svreinterpret_s8(svint32_t op) { + return __builtin_sve_reinterpret_s8_s32(op); +} + +__aio svint8_t svreinterpret_s8(svint64_t op) { + return __builtin_sve_reinterpret_s8_s64(op); +} + +__aio svint8_t svreinterpret_s8(svuint8_t op) { + return __builtin_sve_reinterpret_s8_u8(op); +} + +__aio svint8_t svreinterpret_s8(svuint16_t op) { + return __builtin_sve_reinterpret_s8_u16(op); +} + +__aio svint8_t svreinterpret_s8(svuint32_t op) { + return __builtin_sve_reinterpret_s8_u32(op); +} + +__aio svint8_t svreinterpret_s8(svuint64_t op) { + return __builtin_sve_reinterpret_s8_u64(op); +} + +__aio svint8_t svreinterpret_s8(svfloat16_t op) { + return __builtin_sve_reinterpret_s8_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svint8_t svreinterpret_s8(svbfloat16_t op) { + return __builtin_sve_reinterpret_s8_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svint8_t svreinterpret_s8(svfloat32_t op) { + return __builtin_sve_reinterpret_s8_f32(op); +} + +__aio svint8_t svreinterpret_s8(svfloat64_t op) { + return __builtin_sve_reinterpret_s8_f64(op); +} + +__aio svint16_t svreinterpret_s16(svint8_t op) { + return __builtin_sve_reinterpret_s16_s8(op); +} + +__aio svint16_t svreinterpret_s16(svint16_t op) { + return __builtin_sve_reinterpret_s16_s16(op); +} + +__aio svint16_t svreinterpret_s16(svint32_t op) { + return __builtin_sve_reinterpret_s16_s32(op); +} + +__aio svint16_t svreinterpret_s16(svint64_t op) { + return __builtin_sve_reinterpret_s16_s64(op); +} + +__aio svint16_t svreinterpret_s16(svuint8_t op) { + return __builtin_sve_reinterpret_s16_u8(op); +} + +__aio svint16_t svreinterpret_s16(svuint16_t op) { + return __builtin_sve_reinterpret_s16_u16(op); +} + +__aio svint16_t svreinterpret_s16(svuint32_t op) { + return __builtin_sve_reinterpret_s16_u32(op); +} + +__aio svint16_t svreinterpret_s16(svuint64_t op) { + return __builtin_sve_reinterpret_s16_u64(op); +} + +__aio svint16_t svreinterpret_s16(svfloat16_t op) { + return __builtin_sve_reinterpret_s16_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svint16_t svreinterpret_s16(svbfloat16_t op) { + return __builtin_sve_reinterpret_s16_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svint16_t svreinterpret_s16(svfloat32_t op) { + return __builtin_sve_reinterpret_s16_f32(op); +} + +__aio svint16_t svreinterpret_s16(svfloat64_t op) { + return __builtin_sve_reinterpret_s16_f64(op); +} + +__aio svint32_t svreinterpret_s32(svint8_t op) { + return __builtin_sve_reinterpret_s32_s8(op); +} + +__aio svint32_t svreinterpret_s32(svint16_t op) { + return __builtin_sve_reinterpret_s32_s16(op); +} + +__aio svint32_t svreinterpret_s32(svint32_t op) { + return __builtin_sve_reinterpret_s32_s32(op); +} + +__aio svint32_t svreinterpret_s32(svint64_t op) { + return __builtin_sve_reinterpret_s32_s64(op); +} + +__aio svint32_t svreinterpret_s32(svuint8_t op) { + return __builtin_sve_reinterpret_s32_u8(op); +} + +__aio svint32_t svreinterpret_s32(svuint16_t op) { + return __builtin_sve_reinterpret_s32_u16(op); +} + +__aio svint32_t svreinterpret_s32(svuint32_t op) { + return __builtin_sve_reinterpret_s32_u32(op); +} + +__aio svint32_t svreinterpret_s32(svuint64_t op) { + return __builtin_sve_reinterpret_s32_u64(op); +} + +__aio svint32_t svreinterpret_s32(svfloat16_t op) { + return __builtin_sve_reinterpret_s32_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svint32_t svreinterpret_s32(svbfloat16_t op) { + return __builtin_sve_reinterpret_s32_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svint32_t svreinterpret_s32(svfloat32_t op) { + return __builtin_sve_reinterpret_s32_f32(op); +} + +__aio svint32_t svreinterpret_s32(svfloat64_t op) { + return __builtin_sve_reinterpret_s32_f64(op); +} + +__aio svint64_t svreinterpret_s64(svint8_t op) { + return __builtin_sve_reinterpret_s64_s8(op); +} + +__aio svint64_t svreinterpret_s64(svint16_t op) { + return __builtin_sve_reinterpret_s64_s16(op); +} + +__aio svint64_t svreinterpret_s64(svint32_t op) { + return __builtin_sve_reinterpret_s64_s32(op); +} + +__aio svint64_t svreinterpret_s64(svint64_t op) { + return __builtin_sve_reinterpret_s64_s64(op); +} + +__aio svint64_t svreinterpret_s64(svuint8_t op) { + return __builtin_sve_reinterpret_s64_u8(op); +} + +__aio svint64_t svreinterpret_s64(svuint16_t op) { + return __builtin_sve_reinterpret_s64_u16(op); +} + +__aio svint64_t svreinterpret_s64(svuint32_t op) { + return __builtin_sve_reinterpret_s64_u32(op); +} + +__aio svint64_t svreinterpret_s64(svuint64_t op) { + return __builtin_sve_reinterpret_s64_u64(op); +} + +__aio svint64_t svreinterpret_s64(svfloat16_t op) { + return __builtin_sve_reinterpret_s64_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svint64_t svreinterpret_s64(svbfloat16_t op) { + return __builtin_sve_reinterpret_s64_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svint64_t svreinterpret_s64(svfloat32_t op) { + return __builtin_sve_reinterpret_s64_f32(op); +} + +__aio svint64_t svreinterpret_s64(svfloat64_t op) { + return __builtin_sve_reinterpret_s64_f64(op); +} + +__aio svuint8_t svreinterpret_u8(svint8_t op) { + return __builtin_sve_reinterpret_u8_s8(op); +} + +__aio svuint8_t svreinterpret_u8(svint16_t op) { + return __builtin_sve_reinterpret_u8_s16(op); +} + +__aio svuint8_t svreinterpret_u8(svint32_t op) { + return __builtin_sve_reinterpret_u8_s32(op); +} + +__aio svuint8_t svreinterpret_u8(svint64_t op) { + return __builtin_sve_reinterpret_u8_s64(op); +} + +__aio svuint8_t svreinterpret_u8(svuint8_t op) { + return __builtin_sve_reinterpret_u8_u8(op); +} + +__aio svuint8_t svreinterpret_u8(svuint16_t op) { + return __builtin_sve_reinterpret_u8_u16(op); +} + +__aio svuint8_t svreinterpret_u8(svuint32_t op) { + return __builtin_sve_reinterpret_u8_u32(op); +} + +__aio svuint8_t svreinterpret_u8(svuint64_t op) { + return __builtin_sve_reinterpret_u8_u64(op); +} + +__aio svuint8_t svreinterpret_u8(svfloat16_t op) { + return __builtin_sve_reinterpret_u8_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svuint8_t svreinterpret_u8(svbfloat16_t op) { + return __builtin_sve_reinterpret_u8_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svuint8_t svreinterpret_u8(svfloat32_t op) { + return __builtin_sve_reinterpret_u8_f32(op); +} + +__aio svuint8_t svreinterpret_u8(svfloat64_t op) { + return __builtin_sve_reinterpret_u8_f64(op); +} + +__aio svuint16_t svreinterpret_u16(svint8_t op) { + return __builtin_sve_reinterpret_u16_s8(op); +} + +__aio svuint16_t svreinterpret_u16(svint16_t op) { + return __builtin_sve_reinterpret_u16_s16(op); +} + +__aio svuint16_t svreinterpret_u16(svint32_t op) { + return __builtin_sve_reinterpret_u16_s32(op); +} + +__aio svuint16_t svreinterpret_u16(svint64_t op) { + return __builtin_sve_reinterpret_u16_s64(op); +} + +__aio svuint16_t svreinterpret_u16(svuint8_t op) { + return __builtin_sve_reinterpret_u16_u8(op); +} + +__aio svuint16_t svreinterpret_u16(svuint16_t op) { + return __builtin_sve_reinterpret_u16_u16(op); +} + +__aio svuint16_t svreinterpret_u16(svuint32_t op) { + return __builtin_sve_reinterpret_u16_u32(op); +} + +__aio svuint16_t svreinterpret_u16(svuint64_t op) { + return __builtin_sve_reinterpret_u16_u64(op); +} + +__aio svuint16_t svreinterpret_u16(svfloat16_t op) { + return __builtin_sve_reinterpret_u16_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svuint16_t svreinterpret_u16(svbfloat16_t op) { + return __builtin_sve_reinterpret_u16_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svuint16_t svreinterpret_u16(svfloat32_t op) { + return __builtin_sve_reinterpret_u16_f32(op); +} + +__aio svuint16_t svreinterpret_u16(svfloat64_t op) { + return __builtin_sve_reinterpret_u16_f64(op); +} + +__aio svuint32_t svreinterpret_u32(svint8_t op) { + return __builtin_sve_reinterpret_u32_s8(op); +} + +__aio svuint32_t svreinterpret_u32(svint16_t op) { + return __builtin_sve_reinterpret_u32_s16(op); +} + +__aio svuint32_t svreinterpret_u32(svint32_t op) { + return __builtin_sve_reinterpret_u32_s32(op); +} + +__aio svuint32_t svreinterpret_u32(svint64_t op) { + return __builtin_sve_reinterpret_u32_s64(op); +} + +__aio svuint32_t svreinterpret_u32(svuint8_t op) { + return __builtin_sve_reinterpret_u32_u8(op); +} + +__aio svuint32_t svreinterpret_u32(svuint16_t op) { + return __builtin_sve_reinterpret_u32_u16(op); +} + +__aio svuint32_t svreinterpret_u32(svuint32_t op) { + return __builtin_sve_reinterpret_u32_u32(op); +} + +__aio svuint32_t svreinterpret_u32(svuint64_t op) { + return __builtin_sve_reinterpret_u32_u64(op); +} + +__aio svuint32_t svreinterpret_u32(svfloat16_t op) { + return __builtin_sve_reinterpret_u32_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svuint32_t svreinterpret_u32(svbfloat16_t op) { + return __builtin_sve_reinterpret_u32_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svuint32_t svreinterpret_u32(svfloat32_t op) { + return __builtin_sve_reinterpret_u32_f32(op); +} + +__aio svuint32_t svreinterpret_u32(svfloat64_t op) { + return __builtin_sve_reinterpret_u32_f64(op); +} + +__aio svuint64_t svreinterpret_u64(svint8_t op) { + return __builtin_sve_reinterpret_u64_s8(op); +} + +__aio svuint64_t svreinterpret_u64(svint16_t op) { + return __builtin_sve_reinterpret_u64_s16(op); +} + +__aio svuint64_t svreinterpret_u64(svint32_t op) { + return __builtin_sve_reinterpret_u64_s32(op); +} + +__aio svuint64_t svreinterpret_u64(svint64_t op) { + return __builtin_sve_reinterpret_u64_s64(op); +} + +__aio svuint64_t svreinterpret_u64(svuint8_t op) { + return __builtin_sve_reinterpret_u64_u8(op); +} + +__aio svuint64_t svreinterpret_u64(svuint16_t op) { + return __builtin_sve_reinterpret_u64_u16(op); +} + +__aio svuint64_t svreinterpret_u64(svuint32_t op) { + return __builtin_sve_reinterpret_u64_u32(op); +} + +__aio svuint64_t svreinterpret_u64(svuint64_t op) { + return __builtin_sve_reinterpret_u64_u64(op); +} + +__aio svuint64_t svreinterpret_u64(svfloat16_t op) { + return __builtin_sve_reinterpret_u64_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svuint64_t svreinterpret_u64(svbfloat16_t op) { + return __builtin_sve_reinterpret_u64_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svuint64_t svreinterpret_u64(svfloat32_t op) { + return __builtin_sve_reinterpret_u64_f32(op); +} + +__aio svuint64_t svreinterpret_u64(svfloat64_t op) { + return __builtin_sve_reinterpret_u64_f64(op); +} + +__aio svfloat16_t svreinterpret_f16(svint8_t op) { + return __builtin_sve_reinterpret_f16_s8(op); +} + +__aio svfloat16_t svreinterpret_f16(svint16_t op) { + return __builtin_sve_reinterpret_f16_s16(op); +} + +__aio svfloat16_t svreinterpret_f16(svint32_t op) { + return __builtin_sve_reinterpret_f16_s32(op); +} + +__aio svfloat16_t svreinterpret_f16(svint64_t op) { + return __builtin_sve_reinterpret_f16_s64(op); +} + +__aio svfloat16_t svreinterpret_f16(svuint8_t op) { + return __builtin_sve_reinterpret_f16_u8(op); +} + +__aio svfloat16_t svreinterpret_f16(svuint16_t op) { + return __builtin_sve_reinterpret_f16_u16(op); +} + +__aio svfloat16_t svreinterpret_f16(svuint32_t op) { + return __builtin_sve_reinterpret_f16_u32(op); +} + +__aio svfloat16_t svreinterpret_f16(svuint64_t op) { + return __builtin_sve_reinterpret_f16_u64(op); +} + +__aio svfloat16_t svreinterpret_f16(svfloat16_t op) { + return __builtin_sve_reinterpret_f16_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svfloat16_t svreinterpret_f16(svbfloat16_t op) { + return __builtin_sve_reinterpret_f16_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svfloat16_t svreinterpret_f16(svfloat32_t op) { + return __builtin_sve_reinterpret_f16_f32(op); +} + +__aio svfloat16_t svreinterpret_f16(svfloat64_t op) { + return __builtin_sve_reinterpret_f16_f64(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svint8_t op) { + return __builtin_sve_reinterpret_bf16_s8(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svint16_t op) { + return __builtin_sve_reinterpret_bf16_s16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svint32_t op) { + return __builtin_sve_reinterpret_bf16_s32(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svint64_t op) { + return __builtin_sve_reinterpret_bf16_s64(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svuint8_t op) { + return __builtin_sve_reinterpret_bf16_u8(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svuint16_t op) { + return __builtin_sve_reinterpret_bf16_u16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svuint32_t op) { + return __builtin_sve_reinterpret_bf16_u32(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svuint64_t op) { + return __builtin_sve_reinterpret_bf16_u64(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svfloat16_t op) { + return __builtin_sve_reinterpret_bf16_f16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svbfloat16_t op) { + return __builtin_sve_reinterpret_bf16_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svfloat32_t op) { + return __builtin_sve_reinterpret_bf16_f32(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svbfloat16_t svreinterpret_bf16(svfloat64_t op) { + return __builtin_sve_reinterpret_bf16_f64(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svfloat32_t svreinterpret_f32(svint8_t op) { + return __builtin_sve_reinterpret_f32_s8(op); +} + +__aio svfloat32_t svreinterpret_f32(svint16_t op) { + return __builtin_sve_reinterpret_f32_s16(op); +} + +__aio svfloat32_t svreinterpret_f32(svint32_t op) { + return __builtin_sve_reinterpret_f32_s32(op); +} + +__aio svfloat32_t svreinterpret_f32(svint64_t op) { + return __builtin_sve_reinterpret_f32_s64(op); +} + +__aio svfloat32_t svreinterpret_f32(svuint8_t op) { + return __builtin_sve_reinterpret_f32_u8(op); +} + +__aio svfloat32_t svreinterpret_f32(svuint16_t op) { + return __builtin_sve_reinterpret_f32_u16(op); +} + +__aio svfloat32_t svreinterpret_f32(svuint32_t op) { + return __builtin_sve_reinterpret_f32_u32(op); +} + +__aio svfloat32_t svreinterpret_f32(svuint64_t op) { + return __builtin_sve_reinterpret_f32_u64(op); +} + +__aio svfloat32_t svreinterpret_f32(svfloat16_t op) { + return __builtin_sve_reinterpret_f32_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svfloat32_t svreinterpret_f32(svbfloat16_t op) { + return __builtin_sve_reinterpret_f32_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svfloat32_t svreinterpret_f32(svfloat32_t op) { + return __builtin_sve_reinterpret_f32_f32(op); +} + +__aio svfloat32_t svreinterpret_f32(svfloat64_t op) { + return __builtin_sve_reinterpret_f32_f64(op); +} + +__aio svfloat64_t svreinterpret_f64(svint8_t op) { + return __builtin_sve_reinterpret_f64_s8(op); +} + +__aio svfloat64_t svreinterpret_f64(svint16_t op) { + return __builtin_sve_reinterpret_f64_s16(op); +} + +__aio svfloat64_t svreinterpret_f64(svint32_t op) { + return __builtin_sve_reinterpret_f64_s32(op); +} + +__aio svfloat64_t svreinterpret_f64(svint64_t op) { + return __builtin_sve_reinterpret_f64_s64(op); +} + +__aio svfloat64_t svreinterpret_f64(svuint8_t op) { + return __builtin_sve_reinterpret_f64_u8(op); +} + +__aio svfloat64_t svreinterpret_f64(svuint16_t op) { + return __builtin_sve_reinterpret_f64_u16(op); +} + +__aio svfloat64_t svreinterpret_f64(svuint32_t op) { + return __builtin_sve_reinterpret_f64_u32(op); +} + +__aio svfloat64_t svreinterpret_f64(svuint64_t op) { + return __builtin_sve_reinterpret_f64_u64(op); +} + +__aio svfloat64_t svreinterpret_f64(svfloat16_t op) { + return __builtin_sve_reinterpret_f64_f16(op); +} + +#if defined(__ARM_FEATURE_SVE_BF16) +__aio svfloat64_t svreinterpret_f64(svbfloat16_t op) { + return __builtin_sve_reinterpret_f64_bf16(op); +} + +#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ +__aio svfloat64_t svreinterpret_f64(svfloat32_t op) { + return __builtin_sve_reinterpret_f64_f32(op); +} + +__aio svfloat64_t svreinterpret_f64(svfloat64_t op) { + return __builtin_sve_reinterpret_f64_f64(op); +} + +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) +svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) +svfloat32_t svabd_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) +svfloat16_t svabd_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) +svfloat64_t svabd_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) +svfloat32_t svabd_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) +svfloat16_t svabd_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) +svfloat64_t svabd_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) +svfloat32_t svabd_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) +svfloat16_t svabd_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) +svint8_t svabd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) +svint32_t svabd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) +svint64_t svabd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) +svint16_t svabd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) +svint8_t svabd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) +svint32_t svabd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) +svint64_t svabd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) +svint16_t svabd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) +svint8_t svabd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) +svint32_t svabd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) +svint64_t svabd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) +svint16_t svabd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) +svuint8_t svabd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) +svuint32_t svabd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) +svuint64_t svabd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) +svuint16_t svabd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) +svuint8_t svabd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) +svuint32_t svabd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) +svuint64_t svabd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) +svuint16_t svabd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) +svuint8_t svabd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) +svuint32_t svabd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) +svuint64_t svabd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) +svuint16_t svabd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) +svfloat64_t svabd_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) +svfloat32_t svabd_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) +svfloat16_t svabd_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) +svfloat64_t svabd_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) +svfloat32_t svabd_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) +svfloat16_t svabd_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) +svfloat64_t svabd_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) +svfloat32_t svabd_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) +svfloat16_t svabd_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) +svint8_t svabd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) +svint32_t svabd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) +svint64_t svabd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) +svint16_t svabd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) +svint8_t svabd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) +svint32_t svabd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) +svint64_t svabd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) +svint16_t svabd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) +svint8_t svabd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) +svint32_t svabd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) +svint64_t svabd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) +svint16_t svabd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) +svuint8_t svabd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) +svuint32_t svabd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) +svuint64_t svabd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) +svuint16_t svabd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) +svuint8_t svabd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) +svuint32_t svabd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) +svuint64_t svabd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) +svuint16_t svabd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) +svuint8_t svabd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) +svuint32_t svabd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) +svuint64_t svabd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) +svuint16_t svabd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) +svfloat64_t svabs_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) +svfloat32_t svabs_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) +svfloat16_t svabs_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) +svfloat64_t svabs_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) +svfloat32_t svabs_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) +svfloat16_t svabs_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) +svfloat64_t svabs_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) +svfloat32_t svabs_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) +svfloat16_t svabs_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) +svint8_t svabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) +svint32_t svabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) +svint64_t svabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) +svint16_t svabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) +svint8_t svabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) +svint32_t svabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) +svint64_t svabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) +svint16_t svabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) +svint8_t svabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) +svint32_t svabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) +svint64_t svabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) +svint16_t svabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) +svbool_t svacge_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) +svbool_t svacge_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) +svbool_t svacge_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) +svbool_t svacge_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) +svbool_t svacge_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) +svbool_t svacge_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) +svbool_t svacgt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) +svbool_t svacgt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) +svbool_t svacgt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) +svbool_t svacgt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) +svbool_t svacgt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) +svbool_t svacgt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) +svbool_t svacle_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) +svbool_t svacle_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) +svbool_t svacle_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) +svbool_t svacle_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) +svbool_t svacle_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) +svbool_t svacle_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) +svbool_t svaclt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) +svbool_t svaclt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) +svbool_t svaclt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) +svbool_t svaclt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) +svbool_t svaclt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) +svbool_t svaclt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) +svfloat64_t svadd_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) +svfloat32_t svadd_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) +svfloat16_t svadd_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) +svfloat64_t svadd_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) +svfloat32_t svadd_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) +svfloat16_t svadd_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) +svfloat64_t svadd_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) +svfloat32_t svadd_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) +svfloat16_t svadd_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) +svuint8_t svadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) +svuint32_t svadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) +svuint64_t svadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) +svuint16_t svadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) +svint8_t svadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) +svint32_t svadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) +svint64_t svadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) +svint16_t svadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) +svuint8_t svadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) +svuint32_t svadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) +svuint64_t svadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) +svuint16_t svadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) +svint8_t svadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) +svint32_t svadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) +svint64_t svadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) +svint16_t svadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) +svuint8_t svadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) +svuint32_t svadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) +svuint64_t svadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) +svuint16_t svadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) +svint8_t svadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) +svint32_t svadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) +svint64_t svadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) +svint16_t svadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) +svfloat64_t svadd_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) +svfloat32_t svadd_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) +svfloat16_t svadd_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) +svfloat64_t svadd_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) +svfloat32_t svadd_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) +svfloat16_t svadd_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) +svfloat64_t svadd_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) +svfloat32_t svadd_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) +svfloat16_t svadd_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) +svuint8_t svadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) +svuint32_t svadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) +svuint64_t svadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) +svuint16_t svadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) +svint8_t svadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) +svint32_t svadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) +svint64_t svadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) +svint16_t svadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) +svuint8_t svadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) +svuint32_t svadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) +svuint64_t svadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) +svuint16_t svadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) +svint8_t svadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) +svint32_t svadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) +svint64_t svadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) +svint16_t svadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) +svuint8_t svadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) +svuint32_t svadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) +svuint64_t svadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) +svuint16_t svadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) +svint8_t svadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) +svint32_t svadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) +svint64_t svadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) +svint16_t svadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) +float64_t svadda_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) +float32_t svadda_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) +float16_t svadda_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) +int64_t svaddv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) +int64_t svaddv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) +int64_t svaddv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) +int64_t svaddv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) +uint64_t svaddv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) +uint64_t svaddv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) +uint64_t svaddv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) +uint64_t svaddv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) +float64_t svaddv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) +float32_t svaddv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) +float16_t svaddv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) +svbool_t svand_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) +svuint8_t svand_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) +svuint32_t svand_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) +svuint64_t svand_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) +svuint16_t svand_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) +svint8_t svand_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) +svint32_t svand_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) +svint64_t svand_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) +svint16_t svand_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) +svuint8_t svand_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) +svuint32_t svand_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) +svuint64_t svand_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) +svuint16_t svand_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) +svint8_t svand_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) +svint32_t svand_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) +svint64_t svand_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) +svint16_t svand_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) +svuint8_t svand_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) +svuint32_t svand_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) +svuint64_t svand_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) +svuint16_t svand_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) +svint8_t svand_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) +svint32_t svand_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) +svint64_t svand_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) +svint16_t svand_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) +svuint8_t svand_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) +svuint32_t svand_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) +svuint64_t svand_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) +svuint16_t svand_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) +svint8_t svand_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) +svint32_t svand_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) +svint64_t svand_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) +svint16_t svand_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) +svuint8_t svand_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) +svuint32_t svand_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) +svuint64_t svand_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) +svuint16_t svand_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) +svint8_t svand_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) +svint32_t svand_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) +svint64_t svand_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) +svint16_t svand_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) +svuint8_t svand_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) +svuint32_t svand_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) +svuint64_t svand_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) +svuint16_t svand_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) +svint8_t svand_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) +svint32_t svand_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) +svint64_t svand_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) +svint16_t svand_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) +uint8_t svandv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) +uint32_t svandv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) +uint64_t svandv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) +uint16_t svandv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) +int8_t svandv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) +int32_t svandv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) +int64_t svandv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) +int16_t svandv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) +svint8_t svasr_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) +svint32_t svasr_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) +svint64_t svasr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) +svint16_t svasr_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) +svint8_t svasr_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) +svint32_t svasr_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) +svint64_t svasr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) +svint16_t svasr_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) +svint8_t svasr_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) +svint32_t svasr_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) +svint64_t svasr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) +svint16_t svasr_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) +svint8_t svasr_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) +svint32_t svasr_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) +svint64_t svasr_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) +svint16_t svasr_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) +svint8_t svasr_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) +svint32_t svasr_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) +svint64_t svasr_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) +svint16_t svasr_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) +svint8_t svasr_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) +svint32_t svasr_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) +svint64_t svasr_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) +svint16_t svasr_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) +svint8_t svasr_wide_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) +svint32_t svasr_wide_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) +svint16_t svasr_wide_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) +svint8_t svasr_wide_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) +svint32_t svasr_wide_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) +svint16_t svasr_wide_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) +svint8_t svasr_wide_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) +svint32_t svasr_wide_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) +svint16_t svasr_wide_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) +svint8_t svasr_wide_s8_m(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) +svint32_t svasr_wide_s32_m(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) +svint16_t svasr_wide_s16_m(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) +svint8_t svasr_wide_s8_x(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) +svint32_t svasr_wide_s32_x(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) +svint16_t svasr_wide_s16_x(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) +svint8_t svasr_wide_s8_z(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) +svint32_t svasr_wide_s32_z(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) +svint16_t svasr_wide_s16_z(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) +svint8_t svasrd_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) +svint32_t svasrd_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) +svint64_t svasrd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) +svint16_t svasrd_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) +svint8_t svasrd_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) +svint32_t svasrd_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) +svint64_t svasrd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) +svint16_t svasrd_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) +svint8_t svasrd_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) +svint32_t svasrd_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) +svint64_t svasrd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) +svint16_t svasrd_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) +svbool_t svbic_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) +svuint8_t svbic_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) +svuint32_t svbic_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) +svuint64_t svbic_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) +svuint16_t svbic_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) +svint8_t svbic_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) +svint32_t svbic_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) +svint64_t svbic_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) +svint16_t svbic_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) +svuint8_t svbic_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) +svuint32_t svbic_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) +svuint64_t svbic_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) +svuint16_t svbic_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) +svint8_t svbic_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) +svint32_t svbic_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) +svint64_t svbic_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) +svint16_t svbic_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) +svuint8_t svbic_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) +svuint32_t svbic_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) +svuint64_t svbic_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) +svuint16_t svbic_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) +svint8_t svbic_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) +svint32_t svbic_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) +svint64_t svbic_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) +svint16_t svbic_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) +svuint8_t svbic_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) +svuint32_t svbic_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) +svuint64_t svbic_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) +svuint16_t svbic_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) +svint8_t svbic_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) +svint32_t svbic_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) +svint64_t svbic_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) +svint16_t svbic_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) +svuint8_t svbic_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) +svuint32_t svbic_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) +svuint64_t svbic_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) +svuint16_t svbic_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) +svint8_t svbic_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) +svint32_t svbic_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) +svint64_t svbic_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) +svint16_t svbic_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) +svuint8_t svbic_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) +svuint32_t svbic_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) +svuint64_t svbic_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) +svuint16_t svbic_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) +svint8_t svbic_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) +svint32_t svbic_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) +svint64_t svbic_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) +svint16_t svbic_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) +svbool_t svbrka_b_m(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) +svbool_t svbrka_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) +svbool_t svbrkb_b_m(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) +svbool_t svbrkb_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) +svbool_t svbrkn_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) +svbool_t svbrkpa_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) +svbool_t svbrkpb_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) +svfloat64_t svcadd_f64_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) +svfloat32_t svcadd_f32_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) +svfloat16_t svcadd_f16_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) +svfloat64_t svcadd_f64_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) +svfloat32_t svcadd_f32_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) +svfloat16_t svcadd_f16_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) +svfloat64_t svcadd_f64_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) +svfloat32_t svcadd_f32_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) +svfloat16_t svcadd_f16_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) +uint8_t svclasta_n_u8(svbool_t, uint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) +uint32_t svclasta_n_u32(svbool_t, uint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) +uint64_t svclasta_n_u64(svbool_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) +uint16_t svclasta_n_u16(svbool_t, uint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) +int8_t svclasta_n_s8(svbool_t, int8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) +float64_t svclasta_n_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) +float32_t svclasta_n_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) +float16_t svclasta_n_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) +int32_t svclasta_n_s32(svbool_t, int32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) +int64_t svclasta_n_s64(svbool_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) +int16_t svclasta_n_s16(svbool_t, int16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) +svuint8_t svclasta_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) +svuint32_t svclasta_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) +svuint64_t svclasta_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) +svuint16_t svclasta_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) +svint8_t svclasta_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) +svfloat64_t svclasta_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) +svfloat32_t svclasta_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) +svfloat16_t svclasta_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) +svint32_t svclasta_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) +svint64_t svclasta_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) +svint16_t svclasta_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) +uint8_t svclastb_n_u8(svbool_t, uint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) +uint32_t svclastb_n_u32(svbool_t, uint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) +uint64_t svclastb_n_u64(svbool_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) +uint16_t svclastb_n_u16(svbool_t, uint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) +int8_t svclastb_n_s8(svbool_t, int8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) +float64_t svclastb_n_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) +float32_t svclastb_n_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) +float16_t svclastb_n_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) +int32_t svclastb_n_s32(svbool_t, int32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) +int64_t svclastb_n_s64(svbool_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) +int16_t svclastb_n_s16(svbool_t, int16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) +svuint8_t svclastb_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) +svuint32_t svclastb_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) +svuint64_t svclastb_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) +svuint16_t svclastb_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) +svint8_t svclastb_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) +svfloat64_t svclastb_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) +svfloat32_t svclastb_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) +svfloat16_t svclastb_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) +svint32_t svclastb_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) +svint64_t svclastb_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) +svint16_t svclastb_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) +svuint8_t svcls_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) +svuint32_t svcls_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) +svuint64_t svcls_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) +svuint16_t svcls_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) +svuint8_t svcls_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) +svuint32_t svcls_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) +svuint64_t svcls_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) +svuint16_t svcls_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) +svuint8_t svcls_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) +svuint32_t svcls_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) +svuint64_t svcls_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) +svuint16_t svcls_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) +svuint8_t svclz_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) +svuint32_t svclz_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) +svuint64_t svclz_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) +svuint16_t svclz_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) +svuint8_t svclz_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) +svuint32_t svclz_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) +svuint64_t svclz_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) +svuint16_t svclz_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) +svuint8_t svclz_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) +svuint32_t svclz_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) +svuint64_t svclz_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) +svuint16_t svclz_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) +svuint8_t svclz_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) +svuint32_t svclz_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) +svuint64_t svclz_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) +svuint16_t svclz_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) +svuint8_t svclz_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) +svuint32_t svclz_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) +svuint64_t svclz_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) +svuint16_t svclz_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) +svuint8_t svclz_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) +svuint32_t svclz_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) +svuint64_t svclz_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) +svuint16_t svclz_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) +svfloat64_t svcmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) +svfloat32_t svcmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) +svfloat16_t svcmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) +svfloat64_t svcmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) +svfloat32_t svcmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) +svfloat16_t svcmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) +svfloat64_t svcmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) +svfloat32_t svcmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) +svfloat16_t svcmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) +svfloat32_t svcmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) +svfloat16_t svcmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) +svbool_t svcmpeq_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) +svbool_t svcmpeq_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) +svbool_t svcmpeq_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) +svbool_t svcmpeq_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) +svbool_t svcmpeq_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) +svbool_t svcmpeq_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) +svbool_t svcmpeq_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) +svbool_t svcmpeq_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) +svbool_t svcmpeq_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) +svbool_t svcmpeq_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) +svbool_t svcmpeq_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) +svbool_t svcmpeq_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) +svbool_t svcmpeq_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) +svbool_t svcmpeq_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) +svbool_t svcmpeq_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) +svbool_t svcmpeq_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) +svbool_t svcmpeq_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) +svbool_t svcmpeq_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) +svbool_t svcmpeq_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) +svbool_t svcmpeq_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) +svbool_t svcmpeq_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) +svbool_t svcmpeq_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) +svbool_t svcmpeq_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) +svbool_t svcmpeq_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) +svbool_t svcmpeq_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) +svbool_t svcmpeq_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) +svbool_t svcmpeq_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) +svbool_t svcmpeq_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) +svbool_t svcmpge_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) +svbool_t svcmpge_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) +svbool_t svcmpge_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) +svbool_t svcmpge_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) +svbool_t svcmpge_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) +svbool_t svcmpge_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) +svbool_t svcmpge_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) +svbool_t svcmpge_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) +svbool_t svcmpge_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) +svbool_t svcmpge_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) +svbool_t svcmpge_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) +svbool_t svcmpge_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) +svbool_t svcmpge_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) +svbool_t svcmpge_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) +svbool_t svcmpge_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) +svbool_t svcmpge_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) +svbool_t svcmpge_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) +svbool_t svcmpge_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) +svbool_t svcmpge_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) +svbool_t svcmpge_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) +svbool_t svcmpge_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) +svbool_t svcmpge_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) +svbool_t svcmpge_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) +svbool_t svcmpge_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) +svbool_t svcmpge_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) +svbool_t svcmpge_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) +svbool_t svcmpge_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) +svbool_t svcmpge_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) +svbool_t svcmpge_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) +svbool_t svcmpge_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) +svbool_t svcmpge_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) +svbool_t svcmpge_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) +svbool_t svcmpge_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) +svbool_t svcmpge_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) +svbool_t svcmpgt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) +svbool_t svcmpgt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) +svbool_t svcmpgt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) +svbool_t svcmpgt_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) +svbool_t svcmpgt_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) +svbool_t svcmpgt_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) +svbool_t svcmpgt_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) +svbool_t svcmpgt_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) +svbool_t svcmpgt_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) +svbool_t svcmpgt_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) +svbool_t svcmpgt_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) +svbool_t svcmpgt_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) +svbool_t svcmpgt_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) +svbool_t svcmpgt_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) +svbool_t svcmpgt_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) +svbool_t svcmpgt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) +svbool_t svcmpgt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) +svbool_t svcmpgt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) +svbool_t svcmpgt_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) +svbool_t svcmpgt_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) +svbool_t svcmpgt_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) +svbool_t svcmpgt_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) +svbool_t svcmpgt_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) +svbool_t svcmpgt_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) +svbool_t svcmpgt_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) +svbool_t svcmpgt_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) +svbool_t svcmpgt_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) +svbool_t svcmpgt_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) +svbool_t svcmpgt_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) +svbool_t svcmpgt_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) +svbool_t svcmpgt_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) +svbool_t svcmpgt_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) +svbool_t svcmpgt_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) +svbool_t svcmpgt_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) +svbool_t svcmple_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) +svbool_t svcmple_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) +svbool_t svcmple_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) +svbool_t svcmple_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) +svbool_t svcmple_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) +svbool_t svcmple_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) +svbool_t svcmple_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) +svbool_t svcmple_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) +svbool_t svcmple_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) +svbool_t svcmple_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) +svbool_t svcmple_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) +svbool_t svcmple_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) +svbool_t svcmple_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) +svbool_t svcmple_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) +svbool_t svcmple_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) +svbool_t svcmple_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) +svbool_t svcmple_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) +svbool_t svcmple_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) +svbool_t svcmple_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) +svbool_t svcmple_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) +svbool_t svcmple_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) +svbool_t svcmple_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) +svbool_t svcmple_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) +svbool_t svcmple_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) +svbool_t svcmple_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) +svbool_t svcmple_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) +svbool_t svcmple_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) +svbool_t svcmple_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) +svbool_t svcmple_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) +svbool_t svcmple_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) +svbool_t svcmple_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) +svbool_t svcmple_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) +svbool_t svcmple_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) +svbool_t svcmple_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) +svbool_t svcmplt_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) +svbool_t svcmplt_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) +svbool_t svcmplt_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) +svbool_t svcmplt_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) +svbool_t svcmplt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) +svbool_t svcmplt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) +svbool_t svcmplt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) +svbool_t svcmplt_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) +svbool_t svcmplt_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) +svbool_t svcmplt_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) +svbool_t svcmplt_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) +svbool_t svcmplt_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) +svbool_t svcmplt_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) +svbool_t svcmplt_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) +svbool_t svcmplt_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) +svbool_t svcmplt_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) +svbool_t svcmplt_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) +svbool_t svcmplt_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) +svbool_t svcmplt_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) +svbool_t svcmplt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) +svbool_t svcmplt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) +svbool_t svcmplt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) +svbool_t svcmplt_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) +svbool_t svcmplt_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) +svbool_t svcmplt_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) +svbool_t svcmplt_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) +svbool_t svcmplt_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) +svbool_t svcmplt_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) +svbool_t svcmplt_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) +svbool_t svcmplt_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) +svbool_t svcmplt_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) +svbool_t svcmplt_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) +svbool_t svcmplt_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) +svbool_t svcmplt_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) +svbool_t svcmpne_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) +svbool_t svcmpne_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) +svbool_t svcmpne_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) +svbool_t svcmpne_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) +svbool_t svcmpne_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) +svbool_t svcmpne_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) +svbool_t svcmpne_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) +svbool_t svcmpne_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) +svbool_t svcmpne_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) +svbool_t svcmpne_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) +svbool_t svcmpne_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) +svbool_t svcmpne_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) +svbool_t svcmpne_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) +svbool_t svcmpne_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) +svbool_t svcmpne_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) +svbool_t svcmpne_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) +svbool_t svcmpne_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) +svbool_t svcmpne_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) +svbool_t svcmpne_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) +svbool_t svcmpne_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) +svbool_t svcmpne_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) +svbool_t svcmpne_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) +svbool_t svcmpne_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) +svbool_t svcmpne_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) +svbool_t svcmpne_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) +svbool_t svcmpne_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) +svbool_t svcmpne_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) +svbool_t svcmpne_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) +svbool_t svcmpuo_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) +svbool_t svcmpuo_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) +svbool_t svcmpuo_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) +svbool_t svcmpuo_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) +svbool_t svcmpuo_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) +svbool_t svcmpuo_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) +svuint8_t svcnot_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) +svuint32_t svcnot_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) +svuint64_t svcnot_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) +svuint16_t svcnot_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) +svint8_t svcnot_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) +svint32_t svcnot_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) +svint64_t svcnot_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) +svint16_t svcnot_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) +svuint8_t svcnot_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) +svuint32_t svcnot_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) +svuint64_t svcnot_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) +svuint16_t svcnot_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) +svint8_t svcnot_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) +svint32_t svcnot_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) +svint64_t svcnot_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) +svint16_t svcnot_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) +svuint8_t svcnot_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) +svuint32_t svcnot_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) +svuint64_t svcnot_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) +svuint16_t svcnot_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) +svint8_t svcnot_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) +svint32_t svcnot_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) +svint64_t svcnot_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) +svint16_t svcnot_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) +svuint8_t svcnt_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) +svuint32_t svcnt_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) +svuint64_t svcnt_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) +svuint16_t svcnt_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) +svuint8_t svcnt_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) +svuint64_t svcnt_f64_m(svuint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) +svuint32_t svcnt_f32_m(svuint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) +svuint16_t svcnt_f16_m(svuint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) +svuint32_t svcnt_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) +svuint64_t svcnt_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) +svuint16_t svcnt_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) +svuint8_t svcnt_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) +svuint32_t svcnt_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) +svuint64_t svcnt_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) +svuint16_t svcnt_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) +svuint8_t svcnt_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) +svuint64_t svcnt_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) +svuint32_t svcnt_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) +svuint16_t svcnt_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) +svuint32_t svcnt_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) +svuint64_t svcnt_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) +svuint16_t svcnt_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) +svuint8_t svcnt_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) +svuint32_t svcnt_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) +svuint64_t svcnt_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) +svuint16_t svcnt_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) +svuint8_t svcnt_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) +svuint64_t svcnt_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) +svuint32_t svcnt_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) +svuint16_t svcnt_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) +svuint32_t svcnt_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) +svuint64_t svcnt_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) +svuint16_t svcnt_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb))) +uint64_t svcntb(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat))) +uint64_t svcntb_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd))) +uint64_t svcntd(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat))) +uint64_t svcntd_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth))) +uint64_t svcnth(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat))) +uint64_t svcnth_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8))) +uint64_t svcntp_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b32))) +uint64_t svcntp_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b64))) +uint64_t svcntp_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16))) +uint64_t svcntp_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw))) +uint64_t svcntw(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat))) +uint64_t svcntw_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) +svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) +svuint32x2_t svcreate2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) +svuint64x2_t svcreate2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) +svuint16x2_t svcreate2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) +svint8x2_t svcreate2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) +svfloat64x2_t svcreate2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) +svfloat32x2_t svcreate2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) +svfloat16x2_t svcreate2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) +svint32x2_t svcreate2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) +svint64x2_t svcreate2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) +svint16x2_t svcreate2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) +svuint8x3_t svcreate3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) +svuint32x3_t svcreate3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) +svuint64x3_t svcreate3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) +svuint16x3_t svcreate3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) +svint8x3_t svcreate3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) +svfloat64x3_t svcreate3_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) +svfloat32x3_t svcreate3_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) +svfloat16x3_t svcreate3_f16(svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) +svint32x3_t svcreate3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) +svint64x3_t svcreate3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) +svint16x3_t svcreate3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) +svuint8x4_t svcreate4_u8(svuint8_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) +svuint32x4_t svcreate4_u32(svuint32_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) +svuint64x4_t svcreate4_u64(svuint64_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) +svuint16x4_t svcreate4_u16(svuint16_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) +svint8x4_t svcreate4_s8(svint8_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) +svfloat64x4_t svcreate4_f64(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) +svfloat32x4_t svcreate4_f32(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) +svfloat16x4_t svcreate4_f16(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) +svint32x4_t svcreate4_s32(svint32_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) +svint64x4_t svcreate4_s64(svint64_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) +svint16x4_t svcreate4_s16(svint16_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) +svfloat16_t svcvt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) +svfloat16_t svcvt_f16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) +svfloat16_t svcvt_f16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) +svfloat16_t svcvt_f16_f64_m(svfloat16_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) +svfloat16_t svcvt_f16_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) +svfloat16_t svcvt_f16_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) +svfloat16_t svcvt_f16_s16_m(svfloat16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) +svfloat16_t svcvt_f16_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) +svfloat16_t svcvt_f16_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) +svfloat16_t svcvt_f16_s32_m(svfloat16_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) +svfloat16_t svcvt_f16_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) +svfloat16_t svcvt_f16_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) +svfloat16_t svcvt_f16_s64_m(svfloat16_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) +svfloat16_t svcvt_f16_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) +svfloat16_t svcvt_f16_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) +svfloat16_t svcvt_f16_u16_m(svfloat16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) +svfloat16_t svcvt_f16_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) +svfloat16_t svcvt_f16_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) +svfloat16_t svcvt_f16_u32_m(svfloat16_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) +svfloat16_t svcvt_f16_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) +svfloat16_t svcvt_f16_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) +svfloat16_t svcvt_f16_u64_m(svfloat16_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) +svfloat16_t svcvt_f16_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) +svfloat16_t svcvt_f16_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) +svfloat32_t svcvt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) +svfloat32_t svcvt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) +svfloat32_t svcvt_f32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) +svfloat32_t svcvt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) +svfloat32_t svcvt_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) +svfloat32_t svcvt_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) +svfloat32_t svcvt_f32_s32_m(svfloat32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) +svfloat32_t svcvt_f32_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) +svfloat32_t svcvt_f32_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) +svfloat32_t svcvt_f32_s64_m(svfloat32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) +svfloat32_t svcvt_f32_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) +svfloat32_t svcvt_f32_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) +svfloat32_t svcvt_f32_u32_m(svfloat32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) +svfloat32_t svcvt_f32_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) +svfloat32_t svcvt_f32_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) +svfloat32_t svcvt_f32_u64_m(svfloat32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) +svfloat32_t svcvt_f32_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) +svfloat32_t svcvt_f32_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) +svfloat64_t svcvt_f64_f16_m(svfloat64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) +svfloat64_t svcvt_f64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) +svfloat64_t svcvt_f64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) +svfloat64_t svcvt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) +svfloat64_t svcvt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) +svfloat64_t svcvt_f64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) +svfloat64_t svcvt_f64_s32_m(svfloat64_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) +svfloat64_t svcvt_f64_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) +svfloat64_t svcvt_f64_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) +svfloat64_t svcvt_f64_s64_m(svfloat64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) +svfloat64_t svcvt_f64_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) +svfloat64_t svcvt_f64_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) +svfloat64_t svcvt_f64_u32_m(svfloat64_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) +svfloat64_t svcvt_f64_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) +svfloat64_t svcvt_f64_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) +svfloat64_t svcvt_f64_u64_m(svfloat64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) +svfloat64_t svcvt_f64_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) +svfloat64_t svcvt_f64_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) +svint16_t svcvt_s16_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) +svint16_t svcvt_s16_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) +svint16_t svcvt_s16_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) +svint32_t svcvt_s32_f16_m(svint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) +svint32_t svcvt_s32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) +svint32_t svcvt_s32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) +svint32_t svcvt_s32_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) +svint32_t svcvt_s32_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) +svint32_t svcvt_s32_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) +svint32_t svcvt_s32_f64_m(svint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) +svint32_t svcvt_s32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) +svint32_t svcvt_s32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) +svint64_t svcvt_s64_f16_m(svint64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) +svint64_t svcvt_s64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) +svint64_t svcvt_s64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) +svint64_t svcvt_s64_f32_m(svint64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) +svint64_t svcvt_s64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) +svint64_t svcvt_s64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) +svint64_t svcvt_s64_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) +svint64_t svcvt_s64_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) +svint64_t svcvt_s64_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) +svuint16_t svcvt_u16_f16_m(svuint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) +svuint16_t svcvt_u16_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) +svuint16_t svcvt_u16_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) +svuint32_t svcvt_u32_f16_m(svuint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) +svuint32_t svcvt_u32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) +svuint32_t svcvt_u32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) +svuint32_t svcvt_u32_f32_m(svuint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) +svuint32_t svcvt_u32_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) +svuint32_t svcvt_u32_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) +svuint32_t svcvt_u32_f64_m(svuint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) +svuint32_t svcvt_u32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) +svuint32_t svcvt_u32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) +svuint64_t svcvt_u64_f16_m(svuint64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) +svuint64_t svcvt_u64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) +svuint64_t svcvt_u64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) +svuint64_t svcvt_u64_f32_m(svuint64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) +svuint64_t svcvt_u64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) +svuint64_t svcvt_u64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) +svuint64_t svcvt_u64_f64_m(svuint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) +svuint64_t svcvt_u64_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) +svuint64_t svcvt_u64_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) +svfloat64_t svdiv_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) +svfloat32_t svdiv_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) +svfloat16_t svdiv_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) +svfloat64_t svdiv_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) +svfloat32_t svdiv_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) +svfloat16_t svdiv_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) +svfloat64_t svdiv_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) +svfloat32_t svdiv_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) +svfloat16_t svdiv_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) +svint32_t svdiv_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) +svint64_t svdiv_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) +svint32_t svdiv_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) +svint64_t svdiv_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) +svint32_t svdiv_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) +svint64_t svdiv_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) +svuint32_t svdiv_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) +svuint64_t svdiv_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) +svuint32_t svdiv_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) +svuint64_t svdiv_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) +svuint32_t svdiv_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) +svuint64_t svdiv_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) +svfloat64_t svdiv_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) +svfloat32_t svdiv_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) +svfloat16_t svdiv_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) +svfloat64_t svdiv_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) +svfloat32_t svdiv_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) +svfloat16_t svdiv_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) +svfloat64_t svdiv_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) +svfloat32_t svdiv_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) +svfloat16_t svdiv_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) +svint32_t svdiv_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) +svint64_t svdiv_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) +svint32_t svdiv_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) +svint64_t svdiv_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) +svint32_t svdiv_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) +svint64_t svdiv_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) +svuint32_t svdiv_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) +svuint64_t svdiv_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) +svuint32_t svdiv_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) +svuint64_t svdiv_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) +svuint32_t svdiv_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) +svuint64_t svdiv_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) +svfloat64_t svdivr_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) +svfloat32_t svdivr_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) +svfloat16_t svdivr_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) +svfloat64_t svdivr_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) +svfloat32_t svdivr_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) +svfloat16_t svdivr_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) +svfloat64_t svdivr_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) +svfloat32_t svdivr_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) +svfloat16_t svdivr_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) +svint32_t svdivr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) +svint64_t svdivr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) +svint32_t svdivr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) +svint64_t svdivr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) +svint32_t svdivr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) +svint64_t svdivr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) +svuint32_t svdivr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) +svuint64_t svdivr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) +svuint32_t svdivr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) +svuint64_t svdivr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) +svuint32_t svdivr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) +svuint64_t svdivr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) +svfloat64_t svdivr_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) +svfloat32_t svdivr_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) +svfloat16_t svdivr_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) +svfloat64_t svdivr_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) +svfloat32_t svdivr_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) +svfloat16_t svdivr_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) +svfloat64_t svdivr_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) +svfloat32_t svdivr_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) +svfloat16_t svdivr_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) +svint32_t svdivr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) +svint64_t svdivr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) +svint32_t svdivr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) +svint64_t svdivr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) +svint32_t svdivr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) +svint64_t svdivr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) +svuint32_t svdivr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) +svuint64_t svdivr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) +svuint32_t svdivr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) +svuint64_t svdivr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) +svuint32_t svdivr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) +svuint64_t svdivr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) +svint32_t svdot_n_s32(svint32_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) +svint64_t svdot_n_s64(svint64_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) +svuint32_t svdot_n_u32(svuint32_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) +svuint64_t svdot_n_u64(svuint64_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) +svint32_t svdot_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) +svint64_t svdot_s64(svint64_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) +svuint32_t svdot_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) +svuint64_t svdot_u64(svuint64_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) +svint32_t svdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) +svint64_t svdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) +svuint32_t svdot_lane_u32(svuint32_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) +svuint64_t svdot_lane_u64(svuint64_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) +svuint8_t svdup_n_u8(uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) +svuint32_t svdup_n_u32(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) +svuint64_t svdup_n_u64(uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) +svuint16_t svdup_n_u16(uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) +svint8_t svdup_n_s8(int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) +svfloat64_t svdup_n_f64(float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) +svfloat32_t svdup_n_f32(float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) +svfloat16_t svdup_n_f16(float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) +svint32_t svdup_n_s32(int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) +svint64_t svdup_n_s64(int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) +svint16_t svdup_n_s16(int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) +svuint8_t svdup_n_u8_m(svuint8_t, svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) +svuint32_t svdup_n_u32_m(svuint32_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) +svuint64_t svdup_n_u64_m(svuint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) +svuint16_t svdup_n_u16_m(svuint16_t, svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) +svint8_t svdup_n_s8_m(svint8_t, svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) +svfloat64_t svdup_n_f64_m(svfloat64_t, svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) +svfloat32_t svdup_n_f32_m(svfloat32_t, svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) +svfloat16_t svdup_n_f16_m(svfloat16_t, svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) +svint32_t svdup_n_s32_m(svint32_t, svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) +svint64_t svdup_n_s64_m(svint64_t, svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) +svint16_t svdup_n_s16_m(svint16_t, svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) +svbool_t svdup_n_b8(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) +svbool_t svdup_n_b32(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) +svbool_t svdup_n_b64(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) +svbool_t svdup_n_b16(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) +svuint8_t svdup_n_u8_x(svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) +svuint32_t svdup_n_u32_x(svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) +svuint64_t svdup_n_u64_x(svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) +svuint16_t svdup_n_u16_x(svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) +svint8_t svdup_n_s8_x(svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) +svfloat64_t svdup_n_f64_x(svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) +svfloat32_t svdup_n_f32_x(svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) +svfloat16_t svdup_n_f16_x(svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) +svint32_t svdup_n_s32_x(svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) +svint64_t svdup_n_s64_x(svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) +svint16_t svdup_n_s16_x(svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) +svuint8_t svdup_n_u8_z(svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) +svuint32_t svdup_n_u32_z(svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) +svuint64_t svdup_n_u64_z(svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) +svuint16_t svdup_n_u16_z(svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) +svint8_t svdup_n_s8_z(svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) +svfloat64_t svdup_n_f64_z(svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) +svfloat32_t svdup_n_f32_z(svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) +svfloat16_t svdup_n_f16_z(svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) +svint32_t svdup_n_s32_z(svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) +svint64_t svdup_n_s64_z(svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) +svint16_t svdup_n_s16_z(svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) +svuint8_t svdup_lane_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) +svuint32_t svdup_lane_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) +svuint64_t svdup_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) +svuint16_t svdup_lane_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) +svint8_t svdup_lane_s8(svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) +svfloat64_t svdup_lane_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) +svfloat32_t svdup_lane_f32(svfloat32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) +svfloat16_t svdup_lane_f16(svfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) +svint32_t svdup_lane_s32(svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) +svint64_t svdup_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) +svint16_t svdup_lane_s16(svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) +svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) +svfloat16_t svdupq_n_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) +svint16_t svdupq_n_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) +svuint32_t svdupq_n_u32(uint32_t, uint32_t, uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) +svfloat32_t svdupq_n_f32(float32_t, float32_t, float32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) +svint32_t svdupq_n_s32(int32_t, int32_t, int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) +svuint64_t svdupq_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) +svfloat64_t svdupq_n_f64(float64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) +svint64_t svdupq_n_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) +svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) +svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) +svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) +svbool_t svdupq_n_b32(bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) +svbool_t svdupq_n_b64(bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) +svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) +svuint8_t svdupq_lane_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) +svuint32_t svdupq_lane_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) +svuint64_t svdupq_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) +svuint16_t svdupq_lane_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) +svint8_t svdupq_lane_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) +svfloat64_t svdupq_lane_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) +svfloat32_t svdupq_lane_f32(svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) +svfloat16_t svdupq_lane_f16(svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) +svint32_t svdupq_lane_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) +svint64_t svdupq_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) +svint16_t svdupq_lane_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) +svbool_t sveor_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) +svuint8_t sveor_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) +svuint32_t sveor_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) +svuint64_t sveor_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) +svuint16_t sveor_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) +svint8_t sveor_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) +svint32_t sveor_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) +svint64_t sveor_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) +svint16_t sveor_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) +svuint8_t sveor_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) +svuint32_t sveor_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) +svuint64_t sveor_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) +svuint16_t sveor_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) +svint8_t sveor_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) +svint32_t sveor_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) +svint64_t sveor_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) +svint16_t sveor_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) +svuint8_t sveor_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) +svuint32_t sveor_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) +svuint64_t sveor_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) +svuint16_t sveor_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) +svint8_t sveor_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) +svint32_t sveor_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) +svint64_t sveor_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) +svint16_t sveor_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) +svuint8_t sveor_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) +svuint32_t sveor_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) +svuint64_t sveor_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) +svuint16_t sveor_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) +svint8_t sveor_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) +svint32_t sveor_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) +svint64_t sveor_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) +svint16_t sveor_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) +svuint8_t sveor_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) +svuint32_t sveor_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) +svuint64_t sveor_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) +svuint16_t sveor_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) +svint8_t sveor_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) +svint32_t sveor_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) +svint64_t sveor_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) +svint16_t sveor_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) +svuint8_t sveor_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) +svuint32_t sveor_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) +svuint64_t sveor_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) +svuint16_t sveor_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) +svint8_t sveor_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) +svint32_t sveor_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) +svint64_t sveor_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) +svint16_t sveor_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) +uint8_t sveorv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) +uint32_t sveorv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) +uint64_t sveorv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) +uint16_t sveorv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) +int8_t sveorv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) +int32_t sveorv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) +int64_t sveorv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) +int16_t sveorv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa_f64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa_f32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa_f16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) +svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) +svuint32_t svext_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) +svuint64_t svext_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) +svuint16_t svext_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) +svint8_t svext_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) +svfloat64_t svext_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) +svfloat32_t svext_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) +svfloat16_t svext_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) +svint32_t svext_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) +svint64_t svext_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) +svint16_t svext_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) +svint32_t svextb_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) +svint64_t svextb_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) +svint16_t svextb_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) +svint32_t svextb_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) +svint64_t svextb_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) +svint16_t svextb_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) +svint32_t svextb_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) +svint64_t svextb_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) +svint16_t svextb_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) +svuint32_t svextb_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) +svuint64_t svextb_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) +svuint16_t svextb_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) +svuint32_t svextb_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) +svuint64_t svextb_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) +svuint16_t svextb_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) +svuint32_t svextb_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) +svuint64_t svextb_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) +svuint16_t svextb_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) +svint32_t svexth_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) +svint64_t svexth_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) +svint32_t svexth_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) +svint64_t svexth_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) +svint32_t svexth_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) +svint64_t svexth_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) +svuint32_t svexth_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) +svuint64_t svexth_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) +svuint32_t svexth_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) +svuint64_t svexth_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) +svuint32_t svexth_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) +svuint64_t svexth_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) +svint64_t svextw_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) +svint64_t svextw_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) +svint64_t svextw_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) +svuint64_t svextw_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) +svuint64_t svextw_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) +svuint64_t svextw_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) +svuint8_t svget2_u8(svuint8x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) +svuint32_t svget2_u32(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) +svuint64_t svget2_u64(svuint64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) +svuint16_t svget2_u16(svuint16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) +svint8_t svget2_s8(svint8x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) +svfloat64_t svget2_f64(svfloat64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) +svfloat32_t svget2_f32(svfloat32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) +svfloat16_t svget2_f16(svfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) +svint32_t svget2_s32(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) +svint64_t svget2_s64(svint64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) +svint16_t svget2_s16(svint16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) +svuint8_t svget3_u8(svuint8x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) +svuint32_t svget3_u32(svuint32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) +svuint64_t svget3_u64(svuint64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) +svuint16_t svget3_u16(svuint16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) +svint8_t svget3_s8(svint8x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) +svfloat64_t svget3_f64(svfloat64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) +svfloat32_t svget3_f32(svfloat32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) +svfloat16_t svget3_f16(svfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) +svint32_t svget3_s32(svint32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) +svint64_t svget3_s64(svint64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) +svint16_t svget3_s16(svint16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) +svuint8_t svget4_u8(svuint8x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) +svuint32_t svget4_u32(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) +svuint64_t svget4_u64(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) +svuint16_t svget4_u16(svuint16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) +svint8_t svget4_s8(svint8x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) +svfloat64_t svget4_f64(svfloat64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) +svfloat32_t svget4_f32(svfloat32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) +svfloat16_t svget4_f16(svfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) +svint32_t svget4_s32(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) +svint64_t svget4_s64(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) +svint16_t svget4_s16(svint16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u8))) +svuint8_t svindex_u8(uint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u32))) +svuint32_t svindex_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u64))) +svuint64_t svindex_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u16))) +svuint16_t svindex_u16(uint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s8))) +svint8_t svindex_s8(int8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s32))) +svint32_t svindex_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s64))) +svint64_t svindex_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s16))) +svint16_t svindex_s16(int16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) +svuint8_t svinsr_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) +svuint32_t svinsr_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) +svuint64_t svinsr_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) +svuint16_t svinsr_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) +svint8_t svinsr_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) +svfloat64_t svinsr_n_f64(svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) +svfloat32_t svinsr_n_f32(svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) +svfloat16_t svinsr_n_f16(svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) +svint32_t svinsr_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) +svint64_t svinsr_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) +svint16_t svinsr_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) +uint8_t svlasta_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) +uint32_t svlasta_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) +uint64_t svlasta_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) +uint16_t svlasta_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) +int8_t svlasta_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) +float64_t svlasta_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) +float32_t svlasta_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) +float16_t svlasta_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) +int32_t svlasta_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) +int64_t svlasta_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) +int16_t svlasta_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) +uint8_t svlastb_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) +uint32_t svlastb_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) +uint64_t svlastb_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) +uint16_t svlastb_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) +int8_t svlastb_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) +float64_t svlastb_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) +float32_t svlastb_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) +float16_t svlastb_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) +int32_t svlastb_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) +int64_t svlastb_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) +int16_t svlastb_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) +svuint8_t svld1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) +svuint32_t svld1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) +svuint64_t svld1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) +svuint16_t svld1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) +svint8_t svld1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) +svfloat64_t svld1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) +svfloat32_t svld1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) +svfloat16_t svld1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) +svint32_t svld1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) +svint64_t svld1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) +svint16_t svld1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) +svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) +svuint32_t svld1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) +svuint64_t svld1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) +svuint16_t svld1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) +svint8_t svld1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) +svfloat64_t svld1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) +svfloat32_t svld1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) +svfloat16_t svld1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) +svint32_t svld1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) +svint64_t svld1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) +svint16_t svld1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) +svuint8_t svld1rq_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) +svuint32_t svld1rq_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) +svuint64_t svld1rq_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) +svuint16_t svld1rq_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) +svint8_t svld1rq_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) +svfloat64_t svld1rq_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) +svfloat32_t svld1rq_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) +svfloat16_t svld1rq_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) +svint32_t svld1rq_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) +svint64_t svld1rq_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) +svint16_t svld1rq_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32))) +svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64))) +svuint64_t svld1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u16))) +svuint16_t svld1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s32))) +svint32_t svld1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s64))) +svint64_t svld1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s16))) +svint16_t svld1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u32))) +svuint32_t svld1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u64))) +svuint64_t svld1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u16))) +svuint16_t svld1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s32))) +svint32_t svld1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64))) +svint64_t svld1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16))) +svint16_t svld1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32))) +svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64))) +svuint64_t svld1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s32))) +svint32_t svld1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s64))) +svint64_t svld1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u32))) +svuint32_t svld1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u64))) +svuint64_t svld1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32))) +svint32_t svld1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64))) +svint64_t svld1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64))) +svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64))) +svint64_t svld1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64))) +svuint64_t svld1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64))) +svint64_t svld1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32))) +svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64))) +svuint64_t svld1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u16))) +svuint16_t svld1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s32))) +svint32_t svld1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s64))) +svint64_t svld1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s16))) +svint16_t svld1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u32))) +svuint32_t svld1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u64))) +svuint64_t svld1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u16))) +svuint16_t svld1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s32))) +svint32_t svld1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64))) +svint64_t svld1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16))) +svint16_t svld1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32))) +svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64))) +svuint64_t svld1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s32))) +svint32_t svld1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s64))) +svint64_t svld1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u32))) +svuint32_t svld1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u64))) +svuint64_t svld1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32))) +svint32_t svld1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64))) +svint64_t svld1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64))) +svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64))) +svint64_t svld1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_u64))) +svuint64_t svld1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_s64))) +svint64_t svld1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) +svuint8x2_t svld2_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) +svuint32x2_t svld2_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) +svuint64x2_t svld2_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) +svuint16x2_t svld2_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) +svint8x2_t svld2_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) +svfloat64x2_t svld2_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) +svfloat32x2_t svld2_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) +svfloat16x2_t svld2_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) +svint32x2_t svld2_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) +svint64x2_t svld2_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) +svint16x2_t svld2_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) +svuint8x2_t svld2_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) +svuint32x2_t svld2_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) +svuint64x2_t svld2_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) +svuint16x2_t svld2_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) +svint8x2_t svld2_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) +svfloat64x2_t svld2_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) +svfloat32x2_t svld2_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) +svfloat16x2_t svld2_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) +svint32x2_t svld2_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) +svint64x2_t svld2_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) +svint16x2_t svld2_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) +svuint8x3_t svld3_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) +svuint32x3_t svld3_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) +svuint64x3_t svld3_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) +svuint16x3_t svld3_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) +svint8x3_t svld3_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) +svfloat64x3_t svld3_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) +svfloat32x3_t svld3_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) +svfloat16x3_t svld3_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) +svint32x3_t svld3_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) +svint64x3_t svld3_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) +svint16x3_t svld3_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) +svuint8x3_t svld3_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) +svuint32x3_t svld3_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) +svuint64x3_t svld3_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) +svuint16x3_t svld3_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) +svint8x3_t svld3_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) +svfloat64x3_t svld3_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) +svfloat32x3_t svld3_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) +svfloat16x3_t svld3_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) +svint32x3_t svld3_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) +svint64x3_t svld3_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) +svint16x3_t svld3_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) +svuint8x4_t svld4_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) +svuint32x4_t svld4_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) +svuint64x4_t svld4_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) +svuint16x4_t svld4_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) +svint8x4_t svld4_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) +svfloat64x4_t svld4_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) +svfloat32x4_t svld4_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) +svfloat16x4_t svld4_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) +svint32x4_t svld4_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) +svint64x4_t svld4_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) +svint16x4_t svld4_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) +svuint8x4_t svld4_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) +svuint32x4_t svld4_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) +svuint64x4_t svld4_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) +svuint16x4_t svld4_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) +svint8x4_t svld4_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) +svfloat64x4_t svld4_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) +svfloat32x4_t svld4_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) +svfloat16x4_t svld4_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) +svint32x4_t svld4_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) +svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) +svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32))) +svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64))) +svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16))) +svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32))) +svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64))) +svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16))) +svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32))) +svuint32_t svldff1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64))) +svuint64_t svldff1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16))) +svuint16_t svldff1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32))) +svint32_t svldff1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64))) +svint64_t svldff1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16))) +svint16_t svldff1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32))) +svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64))) +svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32))) +svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64))) +svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32))) +svuint32_t svldff1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64))) +svuint64_t svldff1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32))) +svint32_t svldff1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64))) +svint64_t svldff1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64))) +svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64))) +svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64))) +svuint64_t svldff1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64))) +svint64_t svldff1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32))) +svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64))) +svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16))) +svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32))) +svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64))) +svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16))) +svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32))) +svuint32_t svldff1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64))) +svuint64_t svldff1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16))) +svuint16_t svldff1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32))) +svint32_t svldff1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64))) +svint64_t svldff1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16))) +svint16_t svldff1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32))) +svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64))) +svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32))) +svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64))) +svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32))) +svuint32_t svldff1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64))) +svuint64_t svldff1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32))) +svint32_t svldff1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64))) +svint64_t svldff1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64))) +svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64))) +svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64))) +svuint64_t svldff1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64))) +svint64_t svldff1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32))) +svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64))) +svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16))) +svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32))) +svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64))) +svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16))) +svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32))) +svuint32_t svldnf1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64))) +svuint64_t svldnf1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16))) +svuint16_t svldnf1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32))) +svint32_t svldnf1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64))) +svint64_t svldnf1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16))) +svint16_t svldnf1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32))) +svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64))) +svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32))) +svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64))) +svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32))) +svuint32_t svldnf1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64))) +svuint64_t svldnf1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32))) +svint32_t svldnf1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64))) +svint64_t svldnf1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64))) +svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64))) +svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64))) +svuint64_t svldnf1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64))) +svint64_t svldnf1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32))) +svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64))) +svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16))) +svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32))) +svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64))) +svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16))) +svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32))) +svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64))) +svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16))) +svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32))) +svint32_t svldnf1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64))) +svint64_t svldnf1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16))) +svint16_t svldnf1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32))) +svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64))) +svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32))) +svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64))) +svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32))) +svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64))) +svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32))) +svint32_t svldnf1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64))) +svint64_t svldnf1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64))) +svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64))) +svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64))) +svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64))) +svint64_t svldnf1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) +svuint8_t svldnt1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) +svuint32_t svldnt1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) +svuint64_t svldnt1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) +svuint16_t svldnt1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) +svint8_t svldnt1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) +svfloat64_t svldnt1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) +svfloat32_t svldnt1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) +svfloat16_t svldnt1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) +svint32_t svldnt1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) +svint64_t svldnt1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) +svint16_t svldnt1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) +svuint8_t svldnt1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) +svuint32_t svldnt1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) +svuint64_t svldnt1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) +svuint16_t svldnt1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) +svint8_t svldnt1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) +svfloat64_t svldnt1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) +svfloat32_t svldnt1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) +svfloat16_t svldnt1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) +svint32_t svldnt1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) +svint64_t svldnt1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) +svint16_t svldnt1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) +uint64_t svlen_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) +uint64_t svlen_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) +uint64_t svlen_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) +uint64_t svlen_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) +uint64_t svlen_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) +uint64_t svlen_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) +uint64_t svlen_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) +uint64_t svlen_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) +uint64_t svlen_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) +uint64_t svlen_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) +uint64_t svlen_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) +svuint8_t svlsl_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) +svuint32_t svlsl_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) +svuint64_t svlsl_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) +svuint16_t svlsl_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) +svint8_t svlsl_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) +svint32_t svlsl_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) +svint64_t svlsl_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) +svint16_t svlsl_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) +svuint8_t svlsl_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) +svuint32_t svlsl_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) +svuint64_t svlsl_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) +svuint16_t svlsl_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) +svint8_t svlsl_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) +svint32_t svlsl_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) +svint64_t svlsl_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) +svint16_t svlsl_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) +svuint8_t svlsl_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) +svuint32_t svlsl_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) +svuint64_t svlsl_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) +svuint16_t svlsl_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) +svint8_t svlsl_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) +svint32_t svlsl_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) +svint64_t svlsl_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) +svint16_t svlsl_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) +svuint8_t svlsl_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) +svuint32_t svlsl_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) +svuint64_t svlsl_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) +svuint16_t svlsl_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) +svint8_t svlsl_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) +svint32_t svlsl_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) +svint64_t svlsl_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) +svint16_t svlsl_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) +svuint8_t svlsl_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) +svuint32_t svlsl_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) +svuint64_t svlsl_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) +svuint16_t svlsl_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) +svint8_t svlsl_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) +svint32_t svlsl_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) +svint64_t svlsl_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) +svint16_t svlsl_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) +svuint8_t svlsl_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) +svuint32_t svlsl_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) +svuint64_t svlsl_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) +svuint16_t svlsl_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) +svint8_t svlsl_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) +svint32_t svlsl_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) +svint64_t svlsl_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) +svint16_t svlsl_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) +svuint8_t svlsl_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) +svuint32_t svlsl_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) +svuint16_t svlsl_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) +svint8_t svlsl_wide_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) +svint32_t svlsl_wide_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) +svint16_t svlsl_wide_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) +svuint8_t svlsl_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) +svuint32_t svlsl_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) +svuint16_t svlsl_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) +svint8_t svlsl_wide_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) +svint32_t svlsl_wide_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) +svint16_t svlsl_wide_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) +svuint8_t svlsl_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) +svuint32_t svlsl_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) +svuint16_t svlsl_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) +svint8_t svlsl_wide_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) +svint32_t svlsl_wide_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) +svint16_t svlsl_wide_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) +svuint8_t svlsl_wide_u8_m(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) +svuint32_t svlsl_wide_u32_m(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) +svuint16_t svlsl_wide_u16_m(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) +svint8_t svlsl_wide_s8_m(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) +svint32_t svlsl_wide_s32_m(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) +svint16_t svlsl_wide_s16_m(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) +svuint8_t svlsl_wide_u8_x(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) +svuint32_t svlsl_wide_u32_x(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) +svuint16_t svlsl_wide_u16_x(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) +svint8_t svlsl_wide_s8_x(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) +svint32_t svlsl_wide_s32_x(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) +svint16_t svlsl_wide_s16_x(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) +svuint8_t svlsl_wide_u8_z(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) +svuint32_t svlsl_wide_u32_z(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) +svuint16_t svlsl_wide_u16_z(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) +svint8_t svlsl_wide_s8_z(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) +svint32_t svlsl_wide_s32_z(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) +svint16_t svlsl_wide_s16_z(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) +svuint8_t svlsr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) +svuint32_t svlsr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) +svuint64_t svlsr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) +svuint16_t svlsr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) +svuint8_t svlsr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) +svuint32_t svlsr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) +svuint64_t svlsr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) +svuint16_t svlsr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) +svuint8_t svlsr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) +svuint32_t svlsr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) +svuint64_t svlsr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) +svuint16_t svlsr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) +svuint8_t svlsr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) +svuint32_t svlsr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) +svuint64_t svlsr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) +svuint16_t svlsr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) +svuint8_t svlsr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) +svuint32_t svlsr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) +svuint64_t svlsr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) +svuint16_t svlsr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) +svuint8_t svlsr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) +svuint32_t svlsr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) +svuint64_t svlsr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) +svuint16_t svlsr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) +svuint8_t svlsr_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) +svuint32_t svlsr_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) +svuint16_t svlsr_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) +svuint8_t svlsr_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) +svuint32_t svlsr_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) +svuint16_t svlsr_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) +svuint8_t svlsr_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) +svuint32_t svlsr_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) +svuint16_t svlsr_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) +svuint8_t svlsr_wide_u8_m(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) +svuint32_t svlsr_wide_u32_m(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) +svuint16_t svlsr_wide_u16_m(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) +svuint8_t svlsr_wide_u8_x(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) +svuint32_t svlsr_wide_u32_x(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) +svuint16_t svlsr_wide_u16_x(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) +svuint8_t svlsr_wide_u8_z(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) +svuint32_t svlsr_wide_u32_z(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) +svuint16_t svlsr_wide_u16_z(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) +svfloat64_t svmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) +svfloat32_t svmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) +svfloat16_t svmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) +svfloat64_t svmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) +svfloat32_t svmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) +svfloat16_t svmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) +svfloat64_t svmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) +svfloat32_t svmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) +svfloat16_t svmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) +svuint8_t svmad_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) +svuint32_t svmad_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) +svuint64_t svmad_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) +svuint16_t svmad_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) +svint8_t svmad_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) +svint32_t svmad_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) +svint64_t svmad_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) +svint16_t svmad_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) +svuint8_t svmad_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) +svuint32_t svmad_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) +svuint64_t svmad_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) +svuint16_t svmad_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) +svint8_t svmad_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) +svint32_t svmad_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) +svint64_t svmad_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) +svint16_t svmad_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) +svuint8_t svmad_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) +svuint32_t svmad_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) +svuint64_t svmad_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) +svuint16_t svmad_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) +svint8_t svmad_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) +svint32_t svmad_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) +svint64_t svmad_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) +svint16_t svmad_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) +svfloat64_t svmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) +svfloat32_t svmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) +svfloat16_t svmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) +svfloat64_t svmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) +svfloat32_t svmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) +svfloat16_t svmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) +svfloat64_t svmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) +svfloat32_t svmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) +svfloat16_t svmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) +svuint8_t svmad_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) +svuint32_t svmad_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) +svuint64_t svmad_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) +svuint16_t svmad_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) +svint8_t svmad_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) +svint32_t svmad_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) +svint64_t svmad_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) +svint16_t svmad_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) +svuint8_t svmad_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) +svuint32_t svmad_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) +svuint64_t svmad_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) +svuint16_t svmad_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) +svint8_t svmad_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) +svint32_t svmad_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) +svint64_t svmad_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) +svint16_t svmad_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) +svuint8_t svmad_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) +svuint32_t svmad_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) +svuint64_t svmad_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) +svuint16_t svmad_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) +svint8_t svmad_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) +svint32_t svmad_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) +svint64_t svmad_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) +svint16_t svmad_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) +svfloat64_t svmax_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) +svfloat32_t svmax_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) +svfloat16_t svmax_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) +svfloat64_t svmax_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) +svfloat32_t svmax_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) +svfloat16_t svmax_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) +svfloat64_t svmax_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) +svfloat32_t svmax_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) +svfloat16_t svmax_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) +svint8_t svmax_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) +svint32_t svmax_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) +svint64_t svmax_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) +svint16_t svmax_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) +svint8_t svmax_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) +svint32_t svmax_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) +svint64_t svmax_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) +svint16_t svmax_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) +svint8_t svmax_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) +svint32_t svmax_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) +svint64_t svmax_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) +svint16_t svmax_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) +svuint8_t svmax_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) +svuint32_t svmax_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) +svuint64_t svmax_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) +svuint16_t svmax_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) +svuint8_t svmax_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) +svuint32_t svmax_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) +svuint64_t svmax_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) +svuint16_t svmax_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) +svuint8_t svmax_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) +svuint32_t svmax_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) +svuint64_t svmax_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) +svuint16_t svmax_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) +svfloat64_t svmax_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) +svfloat32_t svmax_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) +svfloat16_t svmax_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) +svfloat64_t svmax_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) +svfloat32_t svmax_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) +svfloat16_t svmax_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) +svfloat64_t svmax_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) +svfloat32_t svmax_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) +svfloat16_t svmax_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) +svint8_t svmax_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) +svint32_t svmax_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) +svint64_t svmax_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) +svint16_t svmax_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) +svint8_t svmax_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) +svint32_t svmax_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) +svint64_t svmax_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) +svint16_t svmax_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) +svint8_t svmax_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) +svint32_t svmax_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) +svint64_t svmax_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) +svint16_t svmax_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) +svuint8_t svmax_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) +svuint32_t svmax_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) +svuint64_t svmax_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) +svuint16_t svmax_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) +svuint8_t svmax_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) +svuint32_t svmax_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) +svuint64_t svmax_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) +svuint16_t svmax_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) +svuint8_t svmax_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) +svuint32_t svmax_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) +svuint64_t svmax_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) +svuint16_t svmax_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) +svfloat64_t svmaxnm_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) +svfloat32_t svmaxnm_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) +svfloat16_t svmaxnm_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) +svfloat64_t svmaxnm_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) +svfloat32_t svmaxnm_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) +svfloat16_t svmaxnm_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) +svfloat64_t svmaxnm_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) +svfloat32_t svmaxnm_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) +svfloat16_t svmaxnm_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) +svfloat64_t svmaxnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) +svfloat32_t svmaxnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) +svfloat16_t svmaxnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) +svfloat64_t svmaxnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) +svfloat32_t svmaxnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) +svfloat16_t svmaxnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) +svfloat64_t svmaxnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) +svfloat32_t svmaxnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) +svfloat16_t svmaxnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) +float64_t svmaxnmv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) +float32_t svmaxnmv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) +float16_t svmaxnmv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) +float64_t svmaxv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) +float32_t svmaxv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) +float16_t svmaxv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) +int8_t svmaxv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) +int32_t svmaxv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) +int64_t svmaxv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) +int16_t svmaxv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) +uint8_t svmaxv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) +uint32_t svmaxv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) +uint64_t svmaxv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) +uint16_t svmaxv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) +svfloat64_t svmin_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) +svfloat32_t svmin_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) +svfloat16_t svmin_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) +svfloat64_t svmin_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) +svfloat32_t svmin_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) +svfloat16_t svmin_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) +svfloat64_t svmin_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) +svfloat32_t svmin_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) +svfloat16_t svmin_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) +svint8_t svmin_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) +svint32_t svmin_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) +svint64_t svmin_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) +svint16_t svmin_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) +svint8_t svmin_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) +svint32_t svmin_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) +svint64_t svmin_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) +svint16_t svmin_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) +svint8_t svmin_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) +svint32_t svmin_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) +svint64_t svmin_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) +svint16_t svmin_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) +svuint8_t svmin_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) +svuint32_t svmin_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) +svuint64_t svmin_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) +svuint16_t svmin_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) +svuint8_t svmin_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) +svuint32_t svmin_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) +svuint64_t svmin_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) +svuint16_t svmin_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) +svuint8_t svmin_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) +svuint32_t svmin_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) +svuint64_t svmin_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) +svuint16_t svmin_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) +svfloat64_t svmin_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) +svfloat32_t svmin_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) +svfloat16_t svmin_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) +svfloat64_t svmin_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) +svfloat32_t svmin_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) +svfloat16_t svmin_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) +svfloat64_t svmin_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) +svfloat32_t svmin_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) +svfloat16_t svmin_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) +svint8_t svmin_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) +svint32_t svmin_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) +svint64_t svmin_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) +svint16_t svmin_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) +svint8_t svmin_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) +svint32_t svmin_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) +svint64_t svmin_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) +svint16_t svmin_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) +svint8_t svmin_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) +svint32_t svmin_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) +svint64_t svmin_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) +svint16_t svmin_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) +svuint8_t svmin_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) +svuint32_t svmin_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) +svuint64_t svmin_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) +svuint16_t svmin_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) +svuint8_t svmin_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) +svuint32_t svmin_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) +svuint64_t svmin_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) +svuint16_t svmin_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) +svuint8_t svmin_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) +svuint32_t svmin_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) +svuint64_t svmin_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) +svuint16_t svmin_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) +svfloat64_t svminnm_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) +svfloat32_t svminnm_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) +svfloat16_t svminnm_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) +svfloat64_t svminnm_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) +svfloat32_t svminnm_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) +svfloat16_t svminnm_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) +svfloat64_t svminnm_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) +svfloat32_t svminnm_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) +svfloat16_t svminnm_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) +svfloat64_t svminnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) +svfloat32_t svminnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) +svfloat16_t svminnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) +svfloat64_t svminnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) +svfloat32_t svminnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) +svfloat16_t svminnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) +svfloat64_t svminnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) +svfloat32_t svminnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) +svfloat16_t svminnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) +float64_t svminnmv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) +float32_t svminnmv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) +float16_t svminnmv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) +float64_t svminv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) +float32_t svminv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) +float16_t svminv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) +int8_t svminv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) +int32_t svminv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) +int64_t svminv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) +int16_t svminv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) +uint8_t svminv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) +uint32_t svminv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) +uint64_t svminv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) +uint16_t svminv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) +svfloat64_t svmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) +svfloat32_t svmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) +svfloat16_t svmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) +svfloat64_t svmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) +svfloat32_t svmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) +svfloat16_t svmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) +svfloat64_t svmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) +svfloat32_t svmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) +svfloat16_t svmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) +svuint8_t svmla_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) +svuint32_t svmla_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) +svuint64_t svmla_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) +svuint16_t svmla_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) +svint8_t svmla_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) +svint32_t svmla_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) +svint64_t svmla_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) +svint16_t svmla_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) +svuint8_t svmla_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) +svuint32_t svmla_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) +svuint64_t svmla_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) +svuint16_t svmla_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) +svint8_t svmla_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) +svint32_t svmla_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) +svint64_t svmla_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) +svint16_t svmla_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) +svuint8_t svmla_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) +svuint32_t svmla_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) +svuint64_t svmla_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) +svuint16_t svmla_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) +svint8_t svmla_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) +svint32_t svmla_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) +svint64_t svmla_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) +svint16_t svmla_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) +svfloat64_t svmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) +svfloat32_t svmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) +svfloat16_t svmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) +svfloat64_t svmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) +svfloat32_t svmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) +svfloat16_t svmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) +svfloat64_t svmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) +svfloat32_t svmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) +svfloat16_t svmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) +svuint8_t svmla_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) +svuint32_t svmla_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) +svuint64_t svmla_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) +svuint16_t svmla_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) +svint8_t svmla_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) +svint32_t svmla_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) +svint64_t svmla_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) +svint16_t svmla_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) +svuint8_t svmla_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) +svuint32_t svmla_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) +svuint64_t svmla_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) +svuint16_t svmla_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) +svint8_t svmla_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) +svint32_t svmla_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) +svint64_t svmla_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) +svint16_t svmla_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) +svuint8_t svmla_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) +svuint32_t svmla_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) +svuint64_t svmla_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) +svuint16_t svmla_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) +svint8_t svmla_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) +svint32_t svmla_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) +svint64_t svmla_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) +svint16_t svmla_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) +svfloat64_t svmla_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) +svfloat32_t svmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) +svfloat16_t svmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) +svfloat64_t svmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) +svfloat32_t svmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) +svfloat16_t svmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) +svfloat64_t svmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) +svfloat32_t svmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) +svfloat16_t svmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) +svfloat64_t svmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) +svfloat32_t svmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) +svfloat16_t svmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) +svuint8_t svmls_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) +svuint32_t svmls_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) +svuint64_t svmls_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) +svuint16_t svmls_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) +svint8_t svmls_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) +svint32_t svmls_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) +svint64_t svmls_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) +svint16_t svmls_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) +svuint8_t svmls_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) +svuint32_t svmls_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) +svuint64_t svmls_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) +svuint16_t svmls_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) +svint8_t svmls_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) +svint32_t svmls_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) +svint64_t svmls_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) +svint16_t svmls_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) +svuint8_t svmls_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) +svuint32_t svmls_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) +svuint64_t svmls_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) +svuint16_t svmls_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) +svint8_t svmls_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) +svint32_t svmls_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) +svint64_t svmls_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) +svint16_t svmls_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) +svfloat64_t svmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) +svfloat32_t svmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) +svfloat16_t svmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) +svfloat64_t svmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) +svfloat32_t svmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) +svfloat16_t svmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) +svfloat64_t svmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) +svfloat32_t svmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) +svfloat16_t svmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) +svuint8_t svmls_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) +svuint32_t svmls_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) +svuint64_t svmls_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) +svuint16_t svmls_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) +svint8_t svmls_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) +svint32_t svmls_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) +svint64_t svmls_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) +svint16_t svmls_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) +svuint8_t svmls_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) +svuint32_t svmls_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) +svuint64_t svmls_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) +svuint16_t svmls_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) +svint8_t svmls_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) +svint32_t svmls_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) +svint64_t svmls_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) +svint16_t svmls_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) +svuint8_t svmls_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) +svuint32_t svmls_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) +svuint64_t svmls_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) +svuint16_t svmls_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) +svint8_t svmls_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) +svint32_t svmls_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) +svint64_t svmls_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) +svint16_t svmls_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) +svfloat64_t svmls_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) +svfloat32_t svmls_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) +svfloat16_t svmls_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) +svbool_t svmov_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) +svfloat64_t svmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) +svfloat32_t svmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) +svfloat16_t svmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) +svfloat64_t svmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) +svfloat32_t svmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) +svfloat16_t svmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) +svfloat64_t svmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) +svfloat32_t svmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) +svfloat16_t svmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) +svuint8_t svmsb_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) +svuint32_t svmsb_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) +svuint64_t svmsb_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) +svuint16_t svmsb_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) +svint8_t svmsb_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) +svint32_t svmsb_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) +svint64_t svmsb_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) +svint16_t svmsb_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) +svuint8_t svmsb_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) +svuint32_t svmsb_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) +svuint64_t svmsb_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) +svuint16_t svmsb_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) +svint8_t svmsb_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) +svint32_t svmsb_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) +svint64_t svmsb_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) +svint16_t svmsb_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) +svuint8_t svmsb_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) +svuint32_t svmsb_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) +svuint64_t svmsb_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) +svuint16_t svmsb_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) +svint8_t svmsb_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) +svint32_t svmsb_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) +svint64_t svmsb_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) +svint16_t svmsb_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) +svfloat64_t svmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) +svfloat32_t svmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) +svfloat16_t svmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) +svfloat64_t svmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) +svfloat32_t svmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) +svfloat16_t svmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) +svfloat64_t svmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) +svfloat32_t svmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) +svfloat16_t svmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) +svuint8_t svmsb_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) +svuint32_t svmsb_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) +svuint64_t svmsb_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) +svuint16_t svmsb_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) +svint8_t svmsb_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) +svint32_t svmsb_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) +svint64_t svmsb_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) +svint16_t svmsb_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) +svuint8_t svmsb_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) +svuint32_t svmsb_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) +svuint64_t svmsb_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) +svuint16_t svmsb_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) +svint8_t svmsb_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) +svint32_t svmsb_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) +svint64_t svmsb_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) +svint16_t svmsb_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) +svuint8_t svmsb_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) +svuint32_t svmsb_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) +svuint64_t svmsb_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) +svuint16_t svmsb_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) +svint8_t svmsb_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) +svint32_t svmsb_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) +svint64_t svmsb_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) +svint16_t svmsb_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) +svfloat64_t svmul_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) +svfloat32_t svmul_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) +svfloat16_t svmul_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) +svfloat64_t svmul_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) +svfloat32_t svmul_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) +svfloat16_t svmul_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) +svfloat64_t svmul_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) +svfloat32_t svmul_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) +svfloat16_t svmul_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) +svuint8_t svmul_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) +svuint32_t svmul_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) +svuint64_t svmul_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) +svuint16_t svmul_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) +svint8_t svmul_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) +svint32_t svmul_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) +svint64_t svmul_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) +svint16_t svmul_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) +svuint8_t svmul_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) +svuint32_t svmul_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) +svuint64_t svmul_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) +svuint16_t svmul_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) +svint8_t svmul_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) +svint32_t svmul_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) +svint64_t svmul_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) +svint16_t svmul_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) +svuint8_t svmul_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) +svuint32_t svmul_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) +svuint64_t svmul_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) +svuint16_t svmul_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) +svint8_t svmul_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) +svint32_t svmul_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) +svint64_t svmul_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) +svint16_t svmul_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) +svfloat64_t svmul_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) +svfloat32_t svmul_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) +svfloat16_t svmul_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) +svfloat64_t svmul_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) +svfloat32_t svmul_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) +svfloat16_t svmul_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) +svfloat64_t svmul_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) +svfloat32_t svmul_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) +svfloat16_t svmul_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) +svuint8_t svmul_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) +svuint32_t svmul_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) +svuint64_t svmul_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) +svuint16_t svmul_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) +svint8_t svmul_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) +svint32_t svmul_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) +svint64_t svmul_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) +svint16_t svmul_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) +svuint8_t svmul_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) +svuint32_t svmul_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) +svuint64_t svmul_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) +svuint16_t svmul_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) +svint8_t svmul_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) +svint32_t svmul_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) +svint64_t svmul_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) +svint16_t svmul_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) +svuint8_t svmul_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) +svuint32_t svmul_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) +svuint64_t svmul_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) +svuint16_t svmul_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) +svint8_t svmul_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) +svint32_t svmul_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) +svint64_t svmul_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) +svint16_t svmul_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) +svfloat64_t svmul_lane_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) +svfloat32_t svmul_lane_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) +svfloat16_t svmul_lane_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) +svint8_t svmulh_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) +svint32_t svmulh_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) +svint64_t svmulh_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) +svint16_t svmulh_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) +svint8_t svmulh_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) +svint32_t svmulh_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) +svint64_t svmulh_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) +svint16_t svmulh_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) +svint8_t svmulh_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) +svint32_t svmulh_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) +svint64_t svmulh_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) +svint16_t svmulh_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) +svuint8_t svmulh_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) +svuint32_t svmulh_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) +svuint64_t svmulh_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) +svuint16_t svmulh_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) +svuint8_t svmulh_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) +svuint32_t svmulh_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) +svuint64_t svmulh_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) +svuint16_t svmulh_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) +svuint8_t svmulh_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) +svuint32_t svmulh_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) +svuint64_t svmulh_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) +svuint16_t svmulh_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) +svint8_t svmulh_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) +svint32_t svmulh_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) +svint64_t svmulh_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) +svint16_t svmulh_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) +svint8_t svmulh_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) +svint32_t svmulh_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) +svint64_t svmulh_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) +svint16_t svmulh_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) +svint8_t svmulh_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) +svint32_t svmulh_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) +svint64_t svmulh_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) +svint16_t svmulh_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) +svuint8_t svmulh_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) +svuint32_t svmulh_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) +svuint64_t svmulh_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) +svuint16_t svmulh_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) +svuint8_t svmulh_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) +svuint32_t svmulh_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) +svuint64_t svmulh_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) +svuint16_t svmulh_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) +svuint8_t svmulh_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) +svuint32_t svmulh_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) +svuint64_t svmulh_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) +svuint16_t svmulh_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) +svfloat64_t svmulx_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) +svfloat32_t svmulx_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) +svfloat16_t svmulx_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) +svfloat64_t svmulx_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) +svfloat32_t svmulx_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) +svfloat16_t svmulx_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) +svfloat64_t svmulx_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) +svfloat32_t svmulx_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) +svfloat16_t svmulx_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) +svfloat64_t svmulx_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) +svfloat32_t svmulx_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) +svfloat16_t svmulx_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) +svfloat64_t svmulx_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) +svfloat32_t svmulx_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) +svfloat16_t svmulx_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) +svfloat64_t svmulx_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) +svfloat32_t svmulx_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) +svfloat16_t svmulx_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) +svbool_t svnand_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) +svfloat64_t svneg_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) +svfloat32_t svneg_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) +svfloat16_t svneg_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) +svfloat64_t svneg_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) +svfloat32_t svneg_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) +svfloat16_t svneg_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) +svfloat64_t svneg_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) +svfloat32_t svneg_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) +svfloat16_t svneg_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) +svint8_t svneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) +svint32_t svneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) +svint64_t svneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) +svint16_t svneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) +svint8_t svneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) +svint32_t svneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) +svint64_t svneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) +svint16_t svneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) +svint8_t svneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) +svint32_t svneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) +svint64_t svneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) +svint16_t svneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) +svfloat64_t svnmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) +svfloat32_t svnmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) +svfloat16_t svnmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) +svfloat64_t svnmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) +svfloat32_t svnmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) +svfloat16_t svnmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) +svfloat64_t svnmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) +svfloat32_t svnmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) +svfloat16_t svnmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) +svfloat64_t svnmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) +svfloat32_t svnmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) +svfloat16_t svnmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) +svfloat64_t svnmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) +svfloat32_t svnmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) +svfloat16_t svnmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) +svfloat64_t svnmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) +svfloat32_t svnmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) +svfloat16_t svnmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) +svfloat64_t svnmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) +svfloat32_t svnmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) +svfloat16_t svnmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) +svfloat64_t svnmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) +svfloat32_t svnmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) +svfloat16_t svnmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) +svfloat64_t svnmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) +svfloat32_t svnmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) +svfloat16_t svnmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) +svfloat64_t svnmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) +svfloat32_t svnmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) +svfloat16_t svnmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) +svfloat64_t svnmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) +svfloat32_t svnmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) +svfloat16_t svnmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) +svfloat64_t svnmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) +svfloat32_t svnmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) +svfloat16_t svnmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) +svfloat64_t svnmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) +svfloat32_t svnmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) +svfloat16_t svnmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) +svfloat64_t svnmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) +svfloat32_t svnmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) +svfloat16_t svnmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) +svfloat64_t svnmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) +svfloat32_t svnmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) +svfloat16_t svnmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) +svfloat64_t svnmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) +svfloat32_t svnmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) +svfloat16_t svnmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) +svfloat64_t svnmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) +svfloat32_t svnmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) +svfloat16_t svnmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) +svfloat64_t svnmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) +svfloat32_t svnmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) +svfloat16_t svnmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) +svfloat64_t svnmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) +svfloat32_t svnmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) +svfloat16_t svnmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) +svfloat64_t svnmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) +svfloat32_t svnmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) +svfloat16_t svnmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) +svfloat64_t svnmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) +svfloat32_t svnmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) +svfloat16_t svnmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) +svfloat64_t svnmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) +svfloat32_t svnmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) +svfloat16_t svnmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) +svfloat64_t svnmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) +svfloat32_t svnmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) +svfloat16_t svnmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) +svfloat64_t svnmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) +svfloat32_t svnmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) +svfloat16_t svnmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) +svbool_t svnor_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) +svbool_t svnot_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) +svuint8_t svnot_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) +svuint32_t svnot_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) +svuint64_t svnot_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) +svuint16_t svnot_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) +svint8_t svnot_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) +svint32_t svnot_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) +svint64_t svnot_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) +svint16_t svnot_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) +svuint8_t svnot_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) +svuint32_t svnot_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) +svuint64_t svnot_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) +svuint16_t svnot_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) +svint8_t svnot_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) +svint32_t svnot_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) +svint64_t svnot_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) +svint16_t svnot_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) +svuint8_t svnot_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) +svuint32_t svnot_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) +svuint64_t svnot_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) +svuint16_t svnot_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) +svint8_t svnot_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) +svint32_t svnot_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) +svint64_t svnot_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) +svint16_t svnot_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) +svbool_t svorn_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) +svbool_t svorr_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) +svuint8_t svorr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) +svuint32_t svorr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) +svuint64_t svorr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) +svuint16_t svorr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) +svint8_t svorr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) +svint32_t svorr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) +svint64_t svorr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) +svint16_t svorr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) +svuint8_t svorr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) +svuint32_t svorr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) +svuint64_t svorr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) +svuint16_t svorr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) +svint8_t svorr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) +svint32_t svorr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) +svint64_t svorr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) +svint16_t svorr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) +svuint8_t svorr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) +svuint32_t svorr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) +svuint64_t svorr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) +svuint16_t svorr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) +svint8_t svorr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) +svint32_t svorr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) +svint64_t svorr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) +svint16_t svorr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) +svuint8_t svorr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) +svuint32_t svorr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) +svuint64_t svorr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) +svuint16_t svorr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) +svint8_t svorr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) +svint32_t svorr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) +svint64_t svorr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) +svint16_t svorr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) +svuint8_t svorr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) +svuint32_t svorr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) +svuint64_t svorr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) +svuint16_t svorr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) +svint8_t svorr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) +svint32_t svorr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) +svint64_t svorr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) +svint16_t svorr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) +svuint8_t svorr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) +svuint32_t svorr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) +svuint64_t svorr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) +svuint16_t svorr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) +svint8_t svorr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) +svint32_t svorr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) +svint64_t svorr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) +svint16_t svorr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) +uint8_t svorv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) +uint32_t svorv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) +uint64_t svorv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) +uint16_t svorv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) +int8_t svorv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) +int32_t svorv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) +int64_t svorv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) +int16_t svorv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) +svbool_t svpfalse_b(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) +svbool_t svpfirst_b(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8))) +svbool_t svpnext_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b32))) +svbool_t svpnext_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b64))) +svbool_t svpnext_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16))) +svbool_t svpnext_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb))) +void svprfb(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum))) +void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd))) +void svprfd(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum))) +void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh))) +void svprfh(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum))) +void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw))) +void svprfw(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum))) +void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any))) +bool svptest_any(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_first))) +bool svptest_first(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_last))) +bool svptest_last(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b8))) +svbool_t svptrue_pat_b8(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b32))) +svbool_t svptrue_pat_b32(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b64))) +svbool_t svptrue_pat_b64(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16))) +svbool_t svptrue_pat_b16(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8))) +svbool_t svptrue_b8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32))) +svbool_t svptrue_b32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64))) +svbool_t svptrue_b64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16))) +svbool_t svptrue_b16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) +svint8_t svqadd_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) +svint32_t svqadd_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) +svint64_t svqadd_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) +svint16_t svqadd_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) +svuint8_t svqadd_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) +svuint32_t svqadd_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) +svuint64_t svqadd_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) +svuint16_t svqadd_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) +svint8_t svqadd_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) +svint32_t svqadd_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) +svint64_t svqadd_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) +svint16_t svqadd_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) +svuint8_t svqadd_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) +svuint32_t svqadd_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) +svuint64_t svqadd_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) +svuint16_t svqadd_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) +int32_t svqdecb_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) +int64_t svqdecb_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) +uint32_t svqdecb_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) +uint64_t svqdecb_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) +int32_t svqdecb_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) +int64_t svqdecb_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) +uint32_t svqdecb_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) +uint64_t svqdecb_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) +int32_t svqdecd_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) +int64_t svqdecd_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) +uint32_t svqdecd_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) +uint64_t svqdecd_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) +svint64_t svqdecd_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) +svuint64_t svqdecd_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) +int32_t svqdecd_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) +int64_t svqdecd_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) +uint32_t svqdecd_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) +uint64_t svqdecd_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) +svint64_t svqdecd_pat_s64(svint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) +svuint64_t svqdecd_pat_u64(svuint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) +int32_t svqdech_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) +int64_t svqdech_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) +uint32_t svqdech_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) +uint64_t svqdech_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) +svint16_t svqdech_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) +svuint16_t svqdech_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) +int32_t svqdech_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) +int64_t svqdech_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) +uint32_t svqdech_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) +uint64_t svqdech_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) +svint16_t svqdech_pat_s16(svint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) +svuint16_t svqdech_pat_u16(svuint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) +int32_t svqdecp_n_s32_b8(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) +int32_t svqdecp_n_s32_b32(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) +int32_t svqdecp_n_s32_b64(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) +int32_t svqdecp_n_s32_b16(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) +int64_t svqdecp_n_s64_b8(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) +int64_t svqdecp_n_s64_b32(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) +int64_t svqdecp_n_s64_b64(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) +int64_t svqdecp_n_s64_b16(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) +uint32_t svqdecp_n_u32_b8(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) +uint32_t svqdecp_n_u32_b32(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) +uint32_t svqdecp_n_u32_b64(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) +uint32_t svqdecp_n_u32_b16(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) +uint64_t svqdecp_n_u64_b8(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) +uint64_t svqdecp_n_u64_b32(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) +uint64_t svqdecp_n_u64_b64(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) +uint64_t svqdecp_n_u64_b16(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) +svint32_t svqdecp_s32(svint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) +svint64_t svqdecp_s64(svint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) +svint16_t svqdecp_s16(svint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) +svuint32_t svqdecp_u32(svuint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) +svuint64_t svqdecp_u64(svuint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) +svuint16_t svqdecp_u16(svuint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) +int32_t svqdecw_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) +int64_t svqdecw_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) +uint32_t svqdecw_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) +uint64_t svqdecw_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) +svint32_t svqdecw_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) +svuint32_t svqdecw_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) +int32_t svqdecw_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) +int64_t svqdecw_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) +uint32_t svqdecw_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) +uint64_t svqdecw_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) +svint32_t svqdecw_pat_s32(svint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) +svuint32_t svqdecw_pat_u32(svuint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) +int32_t svqincb_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) +int64_t svqincb_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) +uint32_t svqincb_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) +uint64_t svqincb_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) +int32_t svqincb_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) +int64_t svqincb_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) +uint32_t svqincb_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) +uint64_t svqincb_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) +int32_t svqincd_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) +int64_t svqincd_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) +uint32_t svqincd_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) +uint64_t svqincd_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) +svint64_t svqincd_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) +svuint64_t svqincd_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) +int32_t svqincd_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) +int64_t svqincd_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) +uint32_t svqincd_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) +uint64_t svqincd_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) +svint64_t svqincd_pat_s64(svint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) +svuint64_t svqincd_pat_u64(svuint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) +int32_t svqinch_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) +int64_t svqinch_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) +uint32_t svqinch_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) +uint64_t svqinch_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) +svint16_t svqinch_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) +svuint16_t svqinch_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) +int32_t svqinch_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) +int64_t svqinch_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) +uint32_t svqinch_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) +uint64_t svqinch_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) +svint16_t svqinch_pat_s16(svint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) +svuint16_t svqinch_pat_u16(svuint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) +int32_t svqincp_n_s32_b8(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) +int32_t svqincp_n_s32_b32(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) +int32_t svqincp_n_s32_b64(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) +int32_t svqincp_n_s32_b16(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) +int64_t svqincp_n_s64_b8(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) +int64_t svqincp_n_s64_b32(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) +int64_t svqincp_n_s64_b64(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) +int64_t svqincp_n_s64_b16(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) +uint32_t svqincp_n_u32_b8(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) +uint32_t svqincp_n_u32_b32(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) +uint32_t svqincp_n_u32_b64(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) +uint32_t svqincp_n_u32_b16(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) +uint64_t svqincp_n_u64_b8(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) +uint64_t svqincp_n_u64_b32(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) +uint64_t svqincp_n_u64_b64(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) +uint64_t svqincp_n_u64_b16(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) +svint32_t svqincp_s32(svint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) +svint64_t svqincp_s64(svint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) +svint16_t svqincp_s16(svint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) +svuint32_t svqincp_u32(svuint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) +svuint64_t svqincp_u64(svuint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) +svuint16_t svqincp_u16(svuint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) +int32_t svqincw_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) +int64_t svqincw_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) +uint32_t svqincw_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) +uint64_t svqincw_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) +svint32_t svqincw_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) +svuint32_t svqincw_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) +int32_t svqincw_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) +int64_t svqincw_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) +uint32_t svqincw_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) +uint64_t svqincw_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) +svint32_t svqincw_pat_s32(svint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) +svuint32_t svqincw_pat_u32(svuint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) +svint8_t svqsub_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) +svint32_t svqsub_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) +svint64_t svqsub_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) +svint16_t svqsub_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) +svuint8_t svqsub_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) +svuint32_t svqsub_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) +svuint64_t svqsub_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) +svuint16_t svqsub_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) +svint8_t svqsub_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) +svint32_t svqsub_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) +svint64_t svqsub_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) +svint16_t svqsub_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) +svuint8_t svqsub_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) +svuint32_t svqsub_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) +svuint64_t svqsub_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) +svuint16_t svqsub_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) +svuint8_t svrbit_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) +svuint32_t svrbit_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) +svuint64_t svrbit_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) +svuint16_t svrbit_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) +svint8_t svrbit_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) +svint32_t svrbit_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) +svint64_t svrbit_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) +svint16_t svrbit_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) +svuint8_t svrbit_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) +svuint32_t svrbit_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) +svuint64_t svrbit_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) +svuint16_t svrbit_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) +svint8_t svrbit_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) +svint32_t svrbit_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) +svint64_t svrbit_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) +svint16_t svrbit_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) +svuint8_t svrbit_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) +svuint32_t svrbit_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) +svuint64_t svrbit_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) +svuint16_t svrbit_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) +svint8_t svrbit_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) +svint32_t svrbit_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) +svint64_t svrbit_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) +svint16_t svrbit_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr))) +svbool_t svrdffr(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z))) +svbool_t svrdffr_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) +svfloat64_t svrecpe_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) +svfloat32_t svrecpe_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) +svfloat16_t svrecpe_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) +svfloat64_t svrecps_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) +svfloat32_t svrecps_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) +svfloat16_t svrecps_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) +svfloat64_t svrecpx_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) +svfloat32_t svrecpx_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) +svfloat16_t svrecpx_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) +svfloat64_t svrecpx_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) +svfloat32_t svrecpx_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) +svfloat16_t svrecpx_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) +svfloat64_t svrecpx_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) +svfloat32_t svrecpx_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) +svfloat16_t svrecpx_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) +svuint8_t svrev_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) +svuint32_t svrev_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) +svuint64_t svrev_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) +svuint16_t svrev_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) +svint8_t svrev_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) +svfloat64_t svrev_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) +svfloat32_t svrev_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) +svfloat16_t svrev_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) +svint32_t svrev_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) +svint64_t svrev_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) +svint16_t svrev_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b8))) +svbool_t svrev_b8(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b32))) +svbool_t svrev_b32(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b64))) +svbool_t svrev_b64(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b16))) +svbool_t svrev_b16(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) +svuint32_t svrevb_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) +svuint64_t svrevb_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) +svuint16_t svrevb_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) +svint32_t svrevb_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) +svint64_t svrevb_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) +svint16_t svrevb_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) +svuint32_t svrevb_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) +svuint64_t svrevb_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) +svuint16_t svrevb_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) +svint32_t svrevb_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) +svint64_t svrevb_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) +svint16_t svrevb_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) +svuint32_t svrevb_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) +svuint64_t svrevb_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) +svuint16_t svrevb_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) +svint32_t svrevb_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) +svint64_t svrevb_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) +svint16_t svrevb_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) +svuint32_t svrevh_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) +svuint64_t svrevh_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) +svint32_t svrevh_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) +svint64_t svrevh_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) +svuint32_t svrevh_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) +svuint64_t svrevh_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) +svint32_t svrevh_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) +svint64_t svrevh_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) +svuint32_t svrevh_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) +svuint64_t svrevh_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) +svint32_t svrevh_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) +svint64_t svrevh_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) +svuint64_t svrevw_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) +svint64_t svrevw_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) +svuint64_t svrevw_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) +svint64_t svrevw_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) +svuint64_t svrevw_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) +svint64_t svrevw_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) +svfloat64_t svrinta_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) +svfloat32_t svrinta_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) +svfloat16_t svrinta_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) +svfloat64_t svrinta_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) +svfloat32_t svrinta_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) +svfloat16_t svrinta_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) +svfloat64_t svrinta_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) +svfloat32_t svrinta_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) +svfloat16_t svrinta_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) +svfloat64_t svrinti_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) +svfloat32_t svrinti_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) +svfloat16_t svrinti_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) +svfloat64_t svrinti_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) +svfloat32_t svrinti_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) +svfloat16_t svrinti_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) +svfloat64_t svrinti_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) +svfloat32_t svrinti_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) +svfloat16_t svrinti_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) +svfloat64_t svrintm_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) +svfloat32_t svrintm_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) +svfloat16_t svrintm_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) +svfloat64_t svrintm_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) +svfloat32_t svrintm_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) +svfloat16_t svrintm_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) +svfloat64_t svrintm_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) +svfloat32_t svrintm_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) +svfloat16_t svrintm_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) +svfloat64_t svrintn_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) +svfloat32_t svrintn_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) +svfloat16_t svrintn_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) +svfloat64_t svrintn_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) +svfloat32_t svrintn_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) +svfloat16_t svrintn_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) +svfloat64_t svrintn_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) +svfloat32_t svrintn_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) +svfloat16_t svrintn_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) +svfloat64_t svrintp_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) +svfloat32_t svrintp_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) +svfloat16_t svrintp_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) +svfloat64_t svrintp_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) +svfloat32_t svrintp_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) +svfloat16_t svrintp_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) +svfloat64_t svrintp_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) +svfloat32_t svrintp_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) +svfloat16_t svrintp_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) +svfloat64_t svrintx_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) +svfloat32_t svrintx_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) +svfloat16_t svrintx_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) +svfloat64_t svrintx_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) +svfloat32_t svrintx_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) +svfloat16_t svrintx_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) +svfloat64_t svrintx_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) +svfloat32_t svrintx_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) +svfloat16_t svrintx_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) +svfloat64_t svrintz_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) +svfloat32_t svrintz_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) +svfloat16_t svrintz_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) +svfloat64_t svrintz_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) +svfloat32_t svrintz_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) +svfloat16_t svrintz_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) +svfloat64_t svrintz_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) +svfloat32_t svrintz_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) +svfloat16_t svrintz_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) +svfloat64_t svrsqrte_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) +svfloat32_t svrsqrte_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) +svfloat16_t svrsqrte_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) +svfloat64_t svrsqrts_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) +svfloat32_t svrsqrts_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) +svfloat16_t svrsqrts_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) +svfloat64_t svscale_n_f64_m(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) +svfloat32_t svscale_n_f32_m(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) +svfloat16_t svscale_n_f16_m(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) +svfloat64_t svscale_n_f64_x(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) +svfloat32_t svscale_n_f32_x(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) +svfloat16_t svscale_n_f16_x(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) +svfloat64_t svscale_n_f64_z(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) +svfloat32_t svscale_n_f32_z(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) +svfloat16_t svscale_n_f16_z(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) +svfloat64_t svscale_f64_m(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) +svfloat32_t svscale_f32_m(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) +svfloat16_t svscale_f16_m(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) +svfloat64_t svscale_f64_x(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) +svfloat32_t svscale_f32_x(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) +svfloat16_t svscale_f16_x(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) +svfloat64_t svscale_f64_z(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) +svfloat32_t svscale_f32_z(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) +svfloat16_t svscale_f16_z(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) +svbool_t svsel_b(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) +svuint8_t svsel_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) +svuint32_t svsel_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) +svuint64_t svsel_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) +svuint16_t svsel_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) +svint8_t svsel_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) +svfloat64_t svsel_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) +svfloat32_t svsel_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) +svfloat16_t svsel_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) +svint32_t svsel_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) +svint64_t svsel_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) +svint16_t svsel_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) +svuint8x2_t svset2_u8(svuint8x2_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) +svuint32x2_t svset2_u32(svuint32x2_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) +svuint64x2_t svset2_u64(svuint64x2_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) +svuint16x2_t svset2_u16(svuint16x2_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) +svint8x2_t svset2_s8(svint8x2_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) +svfloat64x2_t svset2_f64(svfloat64x2_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) +svfloat32x2_t svset2_f32(svfloat32x2_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) +svfloat16x2_t svset2_f16(svfloat16x2_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) +svint32x2_t svset2_s32(svint32x2_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) +svint64x2_t svset2_s64(svint64x2_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) +svint16x2_t svset2_s16(svint16x2_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) +svuint8x3_t svset3_u8(svuint8x3_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) +svuint32x3_t svset3_u32(svuint32x3_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) +svuint64x3_t svset3_u64(svuint64x3_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) +svuint16x3_t svset3_u16(svuint16x3_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) +svint8x3_t svset3_s8(svint8x3_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) +svfloat64x3_t svset3_f64(svfloat64x3_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) +svfloat32x3_t svset3_f32(svfloat32x3_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) +svfloat16x3_t svset3_f16(svfloat16x3_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) +svint32x3_t svset3_s32(svint32x3_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) +svint64x3_t svset3_s64(svint64x3_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) +svint16x3_t svset3_s16(svint16x3_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) +svuint8x4_t svset4_u8(svuint8x4_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) +svuint32x4_t svset4_u32(svuint32x4_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) +svuint64x4_t svset4_u64(svuint64x4_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) +svuint16x4_t svset4_u16(svuint16x4_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) +svint8x4_t svset4_s8(svint8x4_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) +svfloat64x4_t svset4_f64(svfloat64x4_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) +svfloat32x4_t svset4_f32(svfloat32x4_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) +svfloat16x4_t svset4_f16(svfloat16x4_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) +svint32x4_t svset4_s32(svint32x4_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) +svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) +svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr))) +void svsetffr(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) +svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) +svuint32_t svsplice_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) +svuint64_t svsplice_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) +svuint16_t svsplice_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) +svint8_t svsplice_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) +svfloat64_t svsplice_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) +svfloat32_t svsplice_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) +svfloat16_t svsplice_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) +svint32_t svsplice_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) +svint64_t svsplice_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) +svint16_t svsplice_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) +svfloat64_t svsqrt_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) +svfloat32_t svsqrt_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) +svfloat16_t svsqrt_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) +svfloat64_t svsqrt_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) +svfloat32_t svsqrt_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) +svfloat16_t svsqrt_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) +svfloat64_t svsqrt_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) +svfloat32_t svsqrt_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) +svfloat16_t svsqrt_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) +void svst1_u8(svbool_t, uint8_t *, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) +void svst1_u32(svbool_t, uint32_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) +void svst1_u64(svbool_t, uint64_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) +void svst1_u16(svbool_t, uint16_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) +void svst1_s8(svbool_t, int8_t *, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) +void svst1_f64(svbool_t, float64_t *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) +void svst1_f32(svbool_t, float32_t *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) +void svst1_f16(svbool_t, float16_t *, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) +void svst1_s32(svbool_t, int32_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) +void svst1_s64(svbool_t, int64_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) +void svst1_s16(svbool_t, int16_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) +void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) +void svst1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) +void svst1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) +void svst1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) +void svst1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) +void svst1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) +void svst1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) +void svst1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) +void svst1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) +void svst1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) +void svst1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) +void svst1b_s32(svbool_t, int8_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) +void svst1b_s64(svbool_t, int8_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) +void svst1b_s16(svbool_t, int8_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) +void svst1b_u32(svbool_t, uint8_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) +void svst1b_u64(svbool_t, uint8_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) +void svst1b_u16(svbool_t, uint8_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) +void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) +void svst1b_vnum_s64(svbool_t, int8_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) +void svst1b_vnum_s16(svbool_t, int8_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) +void svst1b_vnum_u32(svbool_t, uint8_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) +void svst1b_vnum_u64(svbool_t, uint8_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) +void svst1b_vnum_u16(svbool_t, uint8_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) +void svst1h_s32(svbool_t, int16_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) +void svst1h_s64(svbool_t, int16_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) +void svst1h_u32(svbool_t, uint16_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) +void svst1h_u64(svbool_t, uint16_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) +void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) +void svst1h_vnum_s64(svbool_t, int16_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) +void svst1h_vnum_u32(svbool_t, uint16_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) +void svst1h_vnum_u64(svbool_t, uint16_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) +void svst1w_s64(svbool_t, int32_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) +void svst1w_u64(svbool_t, uint32_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) +void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) +void svst1w_vnum_u64(svbool_t, uint32_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) +void svst2_u8(svbool_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) +void svst2_u32(svbool_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) +void svst2_u64(svbool_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) +void svst2_u16(svbool_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) +void svst2_s8(svbool_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) +void svst2_f64(svbool_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) +void svst2_f32(svbool_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) +void svst2_f16(svbool_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) +void svst2_s32(svbool_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) +void svst2_s64(svbool_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) +void svst2_s16(svbool_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) +void svst2_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) +void svst2_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) +void svst2_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) +void svst2_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) +void svst2_vnum_s8(svbool_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) +void svst2_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) +void svst2_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) +void svst2_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) +void svst2_vnum_s32(svbool_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) +void svst2_vnum_s64(svbool_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) +void svst2_vnum_s16(svbool_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) +void svst3_u8(svbool_t, uint8_t *, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) +void svst3_u32(svbool_t, uint32_t *, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) +void svst3_u64(svbool_t, uint64_t *, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) +void svst3_u16(svbool_t, uint16_t *, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) +void svst3_s8(svbool_t, int8_t *, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) +void svst3_f64(svbool_t, float64_t *, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) +void svst3_f32(svbool_t, float32_t *, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) +void svst3_f16(svbool_t, float16_t *, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) +void svst3_s32(svbool_t, int32_t *, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) +void svst3_s64(svbool_t, int64_t *, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) +void svst3_s16(svbool_t, int16_t *, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) +void svst3_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) +void svst3_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) +void svst3_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) +void svst3_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) +void svst3_vnum_s8(svbool_t, int8_t *, int64_t, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) +void svst3_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) +void svst3_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) +void svst3_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) +void svst3_vnum_s32(svbool_t, int32_t *, int64_t, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) +void svst3_vnum_s64(svbool_t, int64_t *, int64_t, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) +void svst3_vnum_s16(svbool_t, int16_t *, int64_t, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) +void svst4_u8(svbool_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) +void svst4_u32(svbool_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) +void svst4_u64(svbool_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) +void svst4_u16(svbool_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) +void svst4_s8(svbool_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) +void svst4_f64(svbool_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) +void svst4_f32(svbool_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) +void svst4_f16(svbool_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) +void svst4_s32(svbool_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) +void svst4_s64(svbool_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) +void svst4_s16(svbool_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) +void svst4_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) +void svst4_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) +void svst4_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) +void svst4_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) +void svst4_vnum_s8(svbool_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) +void svst4_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) +void svst4_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) +void svst4_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) +void svst4_vnum_s32(svbool_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) +void svst4_vnum_s64(svbool_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) +void svst4_vnum_s16(svbool_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) +void svstnt1_u8(svbool_t, uint8_t *, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) +void svstnt1_u32(svbool_t, uint32_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) +void svstnt1_u64(svbool_t, uint64_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) +void svstnt1_u16(svbool_t, uint16_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) +void svstnt1_s8(svbool_t, int8_t *, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) +void svstnt1_f64(svbool_t, float64_t *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) +void svstnt1_f32(svbool_t, float32_t *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) +void svstnt1_f16(svbool_t, float16_t *, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) +void svstnt1_s32(svbool_t, int32_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) +void svstnt1_s64(svbool_t, int64_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) +void svstnt1_s16(svbool_t, int16_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) +void svstnt1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) +void svstnt1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) +void svstnt1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) +void svstnt1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) +void svstnt1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) +void svstnt1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) +void svstnt1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) +void svstnt1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) +void svstnt1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) +void svstnt1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) +void svstnt1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) +svfloat64_t svsub_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) +svfloat32_t svsub_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) +svfloat16_t svsub_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) +svfloat64_t svsub_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) +svfloat32_t svsub_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) +svfloat16_t svsub_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) +svfloat64_t svsub_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) +svfloat32_t svsub_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) +svfloat16_t svsub_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) +svuint8_t svsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) +svuint32_t svsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) +svuint64_t svsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) +svuint16_t svsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) +svint8_t svsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) +svint32_t svsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) +svint64_t svsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) +svint16_t svsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) +svuint8_t svsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) +svuint32_t svsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) +svuint64_t svsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) +svuint16_t svsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) +svint8_t svsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) +svint32_t svsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) +svint64_t svsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) +svint16_t svsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) +svuint8_t svsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) +svuint32_t svsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) +svuint64_t svsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) +svuint16_t svsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) +svint8_t svsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) +svint32_t svsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) +svint64_t svsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) +svint16_t svsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) +svfloat64_t svsub_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) +svfloat32_t svsub_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) +svfloat16_t svsub_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) +svfloat64_t svsub_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) +svfloat32_t svsub_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) +svfloat16_t svsub_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) +svfloat64_t svsub_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) +svfloat32_t svsub_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) +svfloat16_t svsub_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) +svuint8_t svsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) +svuint32_t svsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) +svuint64_t svsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) +svuint16_t svsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) +svint8_t svsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) +svint32_t svsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) +svint64_t svsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) +svint16_t svsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) +svuint8_t svsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) +svuint32_t svsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) +svuint64_t svsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) +svuint16_t svsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) +svint8_t svsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) +svint32_t svsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) +svint64_t svsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) +svint16_t svsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) +svuint8_t svsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) +svuint32_t svsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) +svuint64_t svsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) +svuint16_t svsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) +svint8_t svsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) +svint32_t svsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) +svint64_t svsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) +svint16_t svsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) +svfloat64_t svsubr_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) +svfloat32_t svsubr_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) +svfloat16_t svsubr_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) +svfloat64_t svsubr_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) +svfloat32_t svsubr_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) +svfloat16_t svsubr_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) +svfloat64_t svsubr_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) +svfloat32_t svsubr_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) +svfloat16_t svsubr_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) +svuint8_t svsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) +svuint32_t svsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) +svuint64_t svsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) +svuint16_t svsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) +svint8_t svsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) +svint32_t svsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) +svint64_t svsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) +svint16_t svsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) +svuint8_t svsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) +svuint32_t svsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) +svuint64_t svsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) +svuint16_t svsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) +svint8_t svsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) +svint32_t svsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) +svint64_t svsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) +svint16_t svsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) +svuint8_t svsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) +svuint32_t svsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) +svuint64_t svsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) +svuint16_t svsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) +svint8_t svsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) +svint32_t svsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) +svint64_t svsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) +svint16_t svsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) +svfloat64_t svsubr_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) +svfloat32_t svsubr_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) +svfloat16_t svsubr_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) +svfloat64_t svsubr_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) +svfloat32_t svsubr_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) +svfloat16_t svsubr_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) +svfloat64_t svsubr_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) +svfloat32_t svsubr_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) +svfloat16_t svsubr_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) +svuint8_t svsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) +svuint32_t svsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) +svuint64_t svsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) +svuint16_t svsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) +svint8_t svsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) +svint32_t svsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) +svint64_t svsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) +svint16_t svsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) +svuint8_t svsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) +svuint32_t svsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) +svuint64_t svsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) +svuint16_t svsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) +svint8_t svsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) +svint32_t svsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) +svint64_t svsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) +svint16_t svsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) +svuint8_t svsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) +svuint32_t svsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) +svuint64_t svsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) +svuint16_t svsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) +svint8_t svsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) +svint32_t svsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) +svint64_t svsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) +svint16_t svsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) +svuint8_t svtbl_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) +svuint32_t svtbl_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) +svuint64_t svtbl_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) +svuint16_t svtbl_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) +svint8_t svtbl_s8(svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) +svfloat64_t svtbl_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) +svfloat32_t svtbl_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) +svfloat16_t svtbl_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) +svint32_t svtbl_s32(svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) +svint64_t svtbl_s64(svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) +svint16_t svtbl_s16(svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) +svuint8_t svtrn1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) +svuint32_t svtrn1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) +svuint64_t svtrn1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) +svuint16_t svtrn1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) +svint8_t svtrn1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) +svfloat64_t svtrn1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) +svfloat32_t svtrn1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) +svfloat16_t svtrn1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) +svint32_t svtrn1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) +svint64_t svtrn1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) +svint16_t svtrn1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b8))) +svbool_t svtrn1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b32))) +svbool_t svtrn1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b64))) +svbool_t svtrn1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b16))) +svbool_t svtrn1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) +svuint8_t svtrn2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) +svuint32_t svtrn2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) +svuint64_t svtrn2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) +svuint16_t svtrn2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) +svint8_t svtrn2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) +svfloat64_t svtrn2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) +svfloat32_t svtrn2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) +svfloat16_t svtrn2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) +svint32_t svtrn2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) +svint64_t svtrn2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) +svint16_t svtrn2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8))) +svbool_t svtrn2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b32))) +svbool_t svtrn2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64))) +svbool_t svtrn2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b16))) +svbool_t svtrn2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8))) +svuint8x2_t svundef2_u8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32))) +svuint32x2_t svundef2_u32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64))) +svuint64x2_t svundef2_u64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16))) +svuint16x2_t svundef2_u16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8))) +svint8x2_t svundef2_s8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64))) +svfloat64x2_t svundef2_f64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32))) +svfloat32x2_t svundef2_f32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16))) +svfloat16x2_t svundef2_f16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32))) +svint32x2_t svundef2_s32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64))) +svint64x2_t svundef2_s64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16))) +svint16x2_t svundef2_s16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8))) +svuint8x3_t svundef3_u8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32))) +svuint32x3_t svundef3_u32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64))) +svuint64x3_t svundef3_u64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16))) +svuint16x3_t svundef3_u16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8))) +svint8x3_t svundef3_s8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64))) +svfloat64x3_t svundef3_f64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32))) +svfloat32x3_t svundef3_f32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16))) +svfloat16x3_t svundef3_f16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32))) +svint32x3_t svundef3_s32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64))) +svint64x3_t svundef3_s64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16))) +svint16x3_t svundef3_s16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8))) +svuint8x4_t svundef4_u8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32))) +svuint32x4_t svundef4_u32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64))) +svuint64x4_t svundef4_u64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16))) +svuint16x4_t svundef4_u16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8))) +svint8x4_t svundef4_s8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64))) +svfloat64x4_t svundef4_f64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32))) +svfloat32x4_t svundef4_f32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16))) +svfloat16x4_t svundef4_f16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32))) +svint32x4_t svundef4_s32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64))) +svint64x4_t svundef4_s64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16))) +svint16x4_t svundef4_s16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8))) +svuint8_t svundef_u8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32))) +svuint32_t svundef_u32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64))) +svuint64_t svundef_u64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16))) +svuint16_t svundef_u16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8))) +svint8_t svundef_s8(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64))) +svfloat64_t svundef_f64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32))) +svfloat32_t svundef_f32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16))) +svfloat16_t svundef_f16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32))) +svint32_t svundef_s32(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64))) +svint64_t svundef_s64(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16))) +svint16_t svundef_s16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) +svbool_t svunpkhi_b(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) +svint32_t svunpkhi_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) +svint64_t svunpkhi_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) +svint16_t svunpkhi_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) +svuint32_t svunpkhi_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) +svuint64_t svunpkhi_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) +svuint16_t svunpkhi_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) +svbool_t svunpklo_b(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) +svint32_t svunpklo_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) +svint64_t svunpklo_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) +svint16_t svunpklo_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) +svuint32_t svunpklo_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) +svuint64_t svunpklo_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) +svuint16_t svunpklo_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) +svuint8_t svuzp1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) +svuint32_t svuzp1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) +svuint64_t svuzp1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) +svuint16_t svuzp1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) +svint8_t svuzp1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) +svfloat64_t svuzp1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) +svfloat32_t svuzp1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) +svfloat16_t svuzp1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) +svint32_t svuzp1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) +svint64_t svuzp1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) +svint16_t svuzp1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b8))) +svbool_t svuzp1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b32))) +svbool_t svuzp1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b64))) +svbool_t svuzp1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b16))) +svbool_t svuzp1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) +svuint8_t svuzp2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) +svuint32_t svuzp2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) +svuint64_t svuzp2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) +svuint16_t svuzp2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) +svint8_t svuzp2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) +svfloat64_t svuzp2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) +svfloat32_t svuzp2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) +svfloat16_t svuzp2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) +svint32_t svuzp2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) +svint64_t svuzp2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) +svint16_t svuzp2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b8))) +svbool_t svuzp2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b32))) +svbool_t svuzp2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b64))) +svbool_t svuzp2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b16))) +svbool_t svuzp2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) +svbool_t svwhilele_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) +svbool_t svwhilele_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) +svbool_t svwhilele_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) +svbool_t svwhilele_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) +svbool_t svwhilele_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) +svbool_t svwhilele_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) +svbool_t svwhilele_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) +svbool_t svwhilele_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) +svbool_t svwhilele_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) +svbool_t svwhilele_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) +svbool_t svwhilele_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) +svbool_t svwhilele_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) +svbool_t svwhilele_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) +svbool_t svwhilele_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) +svbool_t svwhilele_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) +svbool_t svwhilele_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) +svbool_t svwhilelt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) +svbool_t svwhilelt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) +svbool_t svwhilelt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) +svbool_t svwhilelt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) +svbool_t svwhilelt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) +svbool_t svwhilelt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) +svbool_t svwhilelt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) +svbool_t svwhilelt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) +svbool_t svwhilelt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) +svbool_t svwhilelt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) +svbool_t svwhilelt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) +svbool_t svwhilelt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) +svbool_t svwhilelt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) +svbool_t svwhilelt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) +svbool_t svwhilelt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) +svbool_t svwhilelt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr))) +void svwrffr(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) +svuint8_t svzip1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) +svuint32_t svzip1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) +svuint64_t svzip1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) +svuint16_t svzip1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) +svint8_t svzip1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) +svfloat64_t svzip1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) +svfloat32_t svzip1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) +svfloat16_t svzip1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) +svint32_t svzip1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) +svint64_t svzip1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) +svint16_t svzip1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b8))) +svbool_t svzip1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b32))) +svbool_t svzip1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b64))) +svbool_t svzip1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b16))) +svbool_t svzip1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) +svuint8_t svzip2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) +svuint32_t svzip2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) +svuint64_t svzip2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) +svuint16_t svzip2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) +svint8_t svzip2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) +svfloat64_t svzip2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) +svfloat32_t svzip2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) +svfloat16_t svzip2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) +svint32_t svzip2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) +svint64_t svzip2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) +svint16_t svzip2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b8))) +svbool_t svzip2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b32))) +svbool_t svzip2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b64))) +svbool_t svzip2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b16))) +svbool_t svzip2_b16(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) +svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) +svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) +svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) +svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) +svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) +svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) +svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) +svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) +svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) +svint8_t svabd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) +svint32_t svabd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) +svint64_t svabd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) +svint16_t svabd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) +svint8_t svabd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) +svint32_t svabd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) +svint64_t svabd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) +svint16_t svabd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) +svint8_t svabd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) +svint32_t svabd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) +svint64_t svabd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) +svint16_t svabd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) +svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) +svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) +svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) +svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) +svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) +svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) +svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) +svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) +svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) +svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) +svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) +svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) +svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) +svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) +svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) +svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) +svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) +svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) +svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) +svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) +svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) +svint8_t svabd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) +svint32_t svabd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) +svint64_t svabd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) +svint16_t svabd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) +svint8_t svabd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) +svint32_t svabd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) +svint64_t svabd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) +svint16_t svabd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) +svint8_t svabd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) +svint32_t svabd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) +svint64_t svabd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) +svint16_t svabd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) +svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) +svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) +svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) +svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) +svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) +svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) +svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) +svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) +svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) +svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) +svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) +svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) +svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) +svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) +svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) +svfloat64_t svabs_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) +svfloat32_t svabs_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) +svfloat16_t svabs_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) +svfloat64_t svabs_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) +svfloat32_t svabs_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) +svfloat16_t svabs_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) +svint8_t svabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) +svint32_t svabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) +svint64_t svabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) +svint16_t svabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) +svint8_t svabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) +svint32_t svabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) +svint64_t svabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) +svint16_t svabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) +svint8_t svabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) +svint32_t svabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) +svint64_t svabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) +svint16_t svabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) +svbool_t svacge(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) +svbool_t svacge(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) +svbool_t svacge(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) +svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) +svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) +svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) +svbool_t svacgt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) +svbool_t svacgt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) +svbool_t svacgt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) +svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) +svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) +svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) +svbool_t svacle(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) +svbool_t svacle(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) +svbool_t svacle(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) +svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) +svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) +svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) +svbool_t svaclt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) +svbool_t svaclt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) +svbool_t svaclt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) +svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) +svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) +svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) +svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) +svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) +svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) +svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) +svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) +svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) +svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) +svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) +svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) +svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) +svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) +svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) +svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) +svint8_t svadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) +svint32_t svadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) +svint64_t svadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) +svint16_t svadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) +svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) +svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) +svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) +svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) +svint8_t svadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) +svint32_t svadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) +svint64_t svadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) +svint16_t svadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) +svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) +svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) +svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) +svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) +svint8_t svadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) +svint32_t svadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) +svint64_t svadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) +svint16_t svadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) +svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) +svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) +svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) +svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) +svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) +svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) +svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) +svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) +svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) +svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) +svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) +svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) +svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) +svint8_t svadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) +svint32_t svadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) +svint64_t svadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) +svint16_t svadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) +svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) +svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) +svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) +svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) +svint8_t svadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) +svint32_t svadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) +svint64_t svadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) +svint16_t svadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) +svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) +svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) +svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) +svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) +svint8_t svadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) +svint32_t svadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) +svint64_t svadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) +svint16_t svadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) +float64_t svadda(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) +float32_t svadda(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) +float16_t svadda(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) +int64_t svaddv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) +int64_t svaddv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) +int64_t svaddv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) +int64_t svaddv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) +uint64_t svaddv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) +uint64_t svaddv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) +uint64_t svaddv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) +uint64_t svaddv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) +float64_t svaddv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) +float32_t svaddv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) +float16_t svaddv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_offset(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_offset(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_offset(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_offset(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) +svbool_t svand_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) +svuint8_t svand_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) +svuint32_t svand_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) +svuint64_t svand_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) +svuint16_t svand_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) +svint8_t svand_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) +svint32_t svand_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) +svint64_t svand_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) +svint16_t svand_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) +svuint8_t svand_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) +svuint32_t svand_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) +svuint64_t svand_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) +svuint16_t svand_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) +svint8_t svand_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) +svint32_t svand_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) +svint64_t svand_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) +svint16_t svand_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) +svuint8_t svand_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) +svuint32_t svand_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) +svuint64_t svand_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) +svuint16_t svand_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) +svint8_t svand_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) +svint32_t svand_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) +svint64_t svand_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) +svint16_t svand_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) +svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) +svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) +svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) +svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) +svint8_t svand_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) +svint32_t svand_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) +svint64_t svand_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) +svint16_t svand_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) +svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) +svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) +svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) +svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) +svint8_t svand_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) +svint32_t svand_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) +svint64_t svand_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) +svint16_t svand_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) +svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) +svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) +svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) +svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) +svint8_t svand_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) +svint32_t svand_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) +svint64_t svand_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) +svint16_t svand_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) +uint8_t svandv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) +uint32_t svandv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) +uint64_t svandv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) +uint16_t svandv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) +int8_t svandv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) +int32_t svandv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) +int64_t svandv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) +int16_t svandv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) +svint8_t svasr_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) +svint32_t svasr_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) +svint64_t svasr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) +svint16_t svasr_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) +svint8_t svasr_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) +svint32_t svasr_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) +svint64_t svasr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) +svint16_t svasr_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) +svint8_t svasr_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) +svint32_t svasr_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) +svint64_t svasr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) +svint16_t svasr_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) +svint8_t svasr_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) +svint32_t svasr_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) +svint64_t svasr_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) +svint16_t svasr_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) +svint8_t svasr_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) +svint32_t svasr_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) +svint64_t svasr_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) +svint16_t svasr_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) +svint8_t svasr_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) +svint32_t svasr_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) +svint64_t svasr_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) +svint16_t svasr_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) +svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) +svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) +svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) +svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) +svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) +svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) +svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) +svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) +svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) +svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) +svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) +svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) +svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) +svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) +svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) +svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) +svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) +svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) +svint8_t svasrd_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) +svint32_t svasrd_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) +svint64_t svasrd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) +svint16_t svasrd_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) +svint8_t svasrd_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) +svint32_t svasrd_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) +svint64_t svasrd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) +svint16_t svasrd_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) +svint8_t svasrd_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) +svint32_t svasrd_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) +svint64_t svasrd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) +svint16_t svasrd_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) +svbool_t svbic_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) +svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) +svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) +svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) +svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) +svint8_t svbic_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) +svint32_t svbic_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) +svint64_t svbic_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) +svint16_t svbic_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) +svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) +svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) +svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) +svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) +svint8_t svbic_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) +svint32_t svbic_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) +svint64_t svbic_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) +svint16_t svbic_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) +svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) +svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) +svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) +svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) +svint8_t svbic_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) +svint32_t svbic_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) +svint64_t svbic_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) +svint16_t svbic_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) +svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) +svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) +svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) +svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) +svint8_t svbic_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) +svint32_t svbic_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) +svint64_t svbic_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) +svint16_t svbic_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) +svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) +svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) +svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) +svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) +svint8_t svbic_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) +svint32_t svbic_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) +svint64_t svbic_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) +svint16_t svbic_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) +svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) +svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) +svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) +svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) +svint8_t svbic_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) +svint32_t svbic_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) +svint64_t svbic_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) +svint16_t svbic_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) +svbool_t svbrka_m(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) +svbool_t svbrka_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) +svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) +svbool_t svbrkb_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) +svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) +svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) +svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) +svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) +svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) +svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) +svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) +svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) +svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) +svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) +svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) +svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) +uint8_t svclasta(svbool_t, uint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) +uint32_t svclasta(svbool_t, uint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) +uint64_t svclasta(svbool_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) +uint16_t svclasta(svbool_t, uint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) +int8_t svclasta(svbool_t, int8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) +float64_t svclasta(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) +float32_t svclasta(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) +float16_t svclasta(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) +int32_t svclasta(svbool_t, int32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) +int64_t svclasta(svbool_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) +int16_t svclasta(svbool_t, int16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) +svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) +svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) +svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) +svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) +svint8_t svclasta(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) +svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) +svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) +svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) +svint32_t svclasta(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) +svint64_t svclasta(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) +svint16_t svclasta(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) +uint8_t svclastb(svbool_t, uint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) +uint32_t svclastb(svbool_t, uint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) +uint64_t svclastb(svbool_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) +uint16_t svclastb(svbool_t, uint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) +int8_t svclastb(svbool_t, int8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) +float64_t svclastb(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) +float32_t svclastb(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) +float16_t svclastb(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) +int32_t svclastb(svbool_t, int32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) +int64_t svclastb(svbool_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) +int16_t svclastb(svbool_t, int16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) +svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) +svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) +svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) +svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) +svint8_t svclastb(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) +svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) +svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) +svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) +svint32_t svclastb(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) +svint64_t svclastb(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) +svint16_t svclastb(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) +svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) +svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) +svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) +svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) +svuint8_t svcls_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) +svuint32_t svcls_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) +svuint64_t svcls_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) +svuint16_t svcls_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) +svuint8_t svcls_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) +svuint32_t svcls_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) +svuint64_t svcls_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) +svuint16_t svcls_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) +svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) +svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) +svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) +svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) +svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) +svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) +svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) +svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) +svuint8_t svclz_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) +svuint32_t svclz_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) +svuint64_t svclz_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) +svuint16_t svclz_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) +svuint8_t svclz_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) +svuint32_t svclz_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) +svuint64_t svclz_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) +svuint16_t svclz_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) +svuint8_t svclz_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) +svuint32_t svclz_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) +svuint64_t svclz_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) +svuint16_t svclz_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) +svuint8_t svclz_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) +svuint32_t svclz_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) +svuint64_t svclz_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) +svuint16_t svclz_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) +svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) +svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) +svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) +svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) +svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) +svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) +svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) +svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) +svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) +svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) +svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) +svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) +svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) +svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) +svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) +svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) +svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) +svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) +svbool_t svcmpeq(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) +svbool_t svcmpeq(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) +svbool_t svcmpeq(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) +svbool_t svcmpeq(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) +svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) +svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) +svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) +svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) +svbool_t svcmpeq(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) +svbool_t svcmpeq(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) +svbool_t svcmpeq(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) +svbool_t svcmpeq(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) +svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) +svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) +svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) +svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) +svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) +svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) +svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) +svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) +svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) +svbool_t svcmpge(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) +svbool_t svcmpge(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) +svbool_t svcmpge(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) +svbool_t svcmpge(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) +svbool_t svcmpge(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) +svbool_t svcmpge(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) +svbool_t svcmpge(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) +svbool_t svcmpge(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) +svbool_t svcmpge(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) +svbool_t svcmpge(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) +svbool_t svcmpge(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) +svbool_t svcmpge(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) +svbool_t svcmpge(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) +svbool_t svcmpge(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) +svbool_t svcmpge(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) +svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) +svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) +svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) +svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) +svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) +svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) +svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) +svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) +svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) +svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) +svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) +svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) +svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) +svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) +svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) +svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) +svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) +svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) +svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) +svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) +svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) +svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) +svbool_t svcmpgt(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) +svbool_t svcmpgt(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) +svbool_t svcmpgt(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) +svbool_t svcmpgt(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) +svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) +svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) +svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) +svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) +svbool_t svcmpgt(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) +svbool_t svcmpgt(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) +svbool_t svcmpgt(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) +svbool_t svcmpgt(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) +svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) +svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) +svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) +svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) +svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) +svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) +svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) +svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) +svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) +svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) +svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) +svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) +svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) +svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) +svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) +svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) +svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) +svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) +svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) +svbool_t svcmple(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) +svbool_t svcmple(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) +svbool_t svcmple(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) +svbool_t svcmple(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) +svbool_t svcmple(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) +svbool_t svcmple(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) +svbool_t svcmple(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) +svbool_t svcmple(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) +svbool_t svcmple(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) +svbool_t svcmple(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) +svbool_t svcmple(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) +svbool_t svcmple(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) +svbool_t svcmple(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) +svbool_t svcmple(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) +svbool_t svcmple(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) +svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) +svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) +svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) +svbool_t svcmple(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) +svbool_t svcmple(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) +svbool_t svcmple(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) +svbool_t svcmple(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) +svbool_t svcmple_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) +svbool_t svcmple_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) +svbool_t svcmple_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) +svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) +svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) +svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) +svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) +svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) +svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) +svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) +svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) +svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) +svbool_t svcmplt(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) +svbool_t svcmplt(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) +svbool_t svcmplt(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) +svbool_t svcmplt(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) +svbool_t svcmplt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) +svbool_t svcmplt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) +svbool_t svcmplt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) +svbool_t svcmplt(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) +svbool_t svcmplt(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) +svbool_t svcmplt(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) +svbool_t svcmplt(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) +svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) +svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) +svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) +svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) +svbool_t svcmplt(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) +svbool_t svcmplt(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) +svbool_t svcmplt(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) +svbool_t svcmplt(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) +svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) +svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) +svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) +svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) +svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) +svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) +svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) +svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) +svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) +svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) +svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) +svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) +svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) +svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) +svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) +svbool_t svcmpne(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) +svbool_t svcmpne(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) +svbool_t svcmpne(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) +svbool_t svcmpne(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) +svbool_t svcmpne(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) +svbool_t svcmpne(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) +svbool_t svcmpne(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) +svbool_t svcmpne(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) +svbool_t svcmpne(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) +svbool_t svcmpne(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) +svbool_t svcmpne(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) +svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) +svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) +svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) +svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) +svbool_t svcmpne(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) +svbool_t svcmpne(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) +svbool_t svcmpne(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) +svbool_t svcmpne(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) +svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) +svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) +svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) +svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) +svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) +svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) +svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) +svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) +svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) +svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) +svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) +svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) +svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) +svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) +svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) +svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) +svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) +svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) +svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) +svint8_t svcnot_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) +svint32_t svcnot_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) +svint64_t svcnot_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) +svint16_t svcnot_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) +svuint8_t svcnot_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) +svuint32_t svcnot_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) +svuint64_t svcnot_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) +svuint16_t svcnot_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) +svint8_t svcnot_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) +svint32_t svcnot_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) +svint64_t svcnot_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) +svint16_t svcnot_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) +svuint8_t svcnot_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) +svuint32_t svcnot_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) +svuint64_t svcnot_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) +svuint16_t svcnot_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) +svint8_t svcnot_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) +svint32_t svcnot_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) +svint64_t svcnot_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) +svint16_t svcnot_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) +svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) +svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) +svuint8_t svcnt_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) +svuint32_t svcnt_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) +svuint64_t svcnt_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) +svuint16_t svcnt_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) +svuint8_t svcnt_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) +svuint64_t svcnt_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) +svuint32_t svcnt_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) +svuint16_t svcnt_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) +svuint32_t svcnt_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) +svuint64_t svcnt_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) +svuint16_t svcnt_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) +svuint8_t svcnt_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) +svuint32_t svcnt_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) +svuint64_t svcnt_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) +svuint16_t svcnt_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) +svuint8_t svcnt_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) +svuint64_t svcnt_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) +svuint32_t svcnt_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) +svuint16_t svcnt_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) +svuint32_t svcnt_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) +svuint64_t svcnt_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) +svuint16_t svcnt_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) +svuint8x2_t svcreate2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) +svuint32x2_t svcreate2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) +svuint64x2_t svcreate2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) +svuint16x2_t svcreate2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) +svint8x2_t svcreate2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) +svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) +svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) +svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) +svint32x2_t svcreate2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) +svint64x2_t svcreate2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) +svint16x2_t svcreate2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) +svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) +svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) +svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) +svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) +svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) +svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) +svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) +svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) +svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) +svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) +svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) +svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) +svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) +svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) +svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) +svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) +svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) +svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) +svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) +svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) +svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) +svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) +svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) +svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) +svfloat32_t svcvt_f32_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) +svfloat32_t svcvt_f32_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) +svfloat32_t svcvt_f32_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) +svfloat32_t svcvt_f32_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) +svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) +svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) +svfloat64_t svcvt_f64_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) +svfloat64_t svcvt_f64_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) +svfloat64_t svcvt_f64_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) +svfloat64_t svcvt_f64_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) +svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) +svint16_t svcvt_s16_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) +svint16_t svcvt_s16_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) +svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) +svuint16_t svcvt_u16_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) +svuint16_t svcvt_u16_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) +svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) +svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) +svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) +svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) +svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) +svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) +svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) +svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) +svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) +svint32_t svdiv_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) +svint64_t svdiv_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) +svint32_t svdiv_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) +svint64_t svdiv_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) +svint32_t svdiv_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) +svint64_t svdiv_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) +svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) +svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) +svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) +svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) +svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) +svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) +svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) +svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) +svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) +svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) +svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) +svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) +svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) +svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) +svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) +svint32_t svdiv_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) +svint64_t svdiv_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) +svint32_t svdiv_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) +svint64_t svdiv_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) +svint32_t svdiv_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) +svint64_t svdiv_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) +svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) +svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) +svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) +svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) +svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) +svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) +svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) +svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) +svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) +svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) +svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) +svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) +svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) +svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) +svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) +svint32_t svdivr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) +svint64_t svdivr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) +svint32_t svdivr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) +svint64_t svdivr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) +svint32_t svdivr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) +svint64_t svdivr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) +svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) +svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) +svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) +svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) +svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) +svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) +svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) +svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) +svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) +svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) +svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) +svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) +svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) +svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) +svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) +svint32_t svdivr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) +svint64_t svdivr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) +svint32_t svdivr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) +svint64_t svdivr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) +svint32_t svdivr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) +svint64_t svdivr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) +svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) +svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) +svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) +svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) +svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) +svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) +svint32_t svdot(svint32_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) +svint64_t svdot(svint64_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) +svuint32_t svdot(svuint32_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) +svuint64_t svdot(svuint64_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) +svint32_t svdot(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) +svint64_t svdot(svint64_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) +svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) +svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) +svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) +svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) +svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) +svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) +svuint8_t svdup_u8(uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) +svuint32_t svdup_u32(uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) +svuint64_t svdup_u64(uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) +svuint16_t svdup_u16(uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) +svint8_t svdup_s8(int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) +svfloat64_t svdup_f64(float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) +svfloat32_t svdup_f32(float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) +svfloat16_t svdup_f16(float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) +svint32_t svdup_s32(int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) +svint64_t svdup_s64(int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) +svint16_t svdup_s16(int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) +svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) +svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) +svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) +svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) +svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) +svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) +svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) +svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) +svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) +svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) +svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) +svbool_t svdup_b8(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) +svbool_t svdup_b32(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) +svbool_t svdup_b64(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) +svbool_t svdup_b16(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) +svuint8_t svdup_u8_x(svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) +svuint32_t svdup_u32_x(svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) +svuint64_t svdup_u64_x(svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) +svuint16_t svdup_u16_x(svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) +svint8_t svdup_s8_x(svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) +svfloat64_t svdup_f64_x(svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) +svfloat32_t svdup_f32_x(svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) +svfloat16_t svdup_f16_x(svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) +svint32_t svdup_s32_x(svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) +svint64_t svdup_s64_x(svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) +svint16_t svdup_s16_x(svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) +svuint8_t svdup_u8_z(svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) +svuint32_t svdup_u32_z(svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) +svuint64_t svdup_u64_z(svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) +svuint16_t svdup_u16_z(svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) +svint8_t svdup_s8_z(svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) +svfloat64_t svdup_f64_z(svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) +svfloat32_t svdup_f32_z(svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) +svfloat16_t svdup_f16_z(svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) +svint32_t svdup_s32_z(svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) +svint64_t svdup_s64_z(svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) +svint16_t svdup_s16_z(svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) +svuint8_t svdup_lane(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) +svuint32_t svdup_lane(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) +svuint64_t svdup_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) +svuint16_t svdup_lane(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) +svint8_t svdup_lane(svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) +svfloat64_t svdup_lane(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) +svfloat32_t svdup_lane(svfloat32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) +svfloat16_t svdup_lane(svfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) +svint32_t svdup_lane(svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) +svint64_t svdup_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) +svint16_t svdup_lane(svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) +svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) +svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) +svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) +svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) +svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) +svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) +svuint64_t svdupq_u64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) +svfloat64_t svdupq_f64(float64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) +svint64_t svdupq_s64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) +svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) +svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) +svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) +svbool_t svdupq_b32(bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) +svbool_t svdupq_b64(bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) +svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) +svuint8_t svdupq_lane(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) +svuint32_t svdupq_lane(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) +svuint64_t svdupq_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) +svuint16_t svdupq_lane(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) +svint8_t svdupq_lane(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) +svfloat64_t svdupq_lane(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) +svfloat32_t svdupq_lane(svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) +svfloat16_t svdupq_lane(svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) +svint32_t svdupq_lane(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) +svint64_t svdupq_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) +svint16_t svdupq_lane(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) +svbool_t sveor_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) +svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) +svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) +svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) +svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) +svint8_t sveor_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) +svint32_t sveor_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) +svint64_t sveor_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) +svint16_t sveor_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) +svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) +svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) +svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) +svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) +svint8_t sveor_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) +svint32_t sveor_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) +svint64_t sveor_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) +svint16_t sveor_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) +svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) +svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) +svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) +svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) +svint8_t sveor_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) +svint32_t sveor_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) +svint64_t sveor_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) +svint16_t sveor_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) +svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) +svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) +svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) +svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) +svint8_t sveor_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) +svint32_t sveor_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) +svint64_t sveor_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) +svint16_t sveor_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) +svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) +svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) +svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) +svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) +svint8_t sveor_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) +svint32_t sveor_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) +svint64_t sveor_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) +svint16_t sveor_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) +svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) +svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) +svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) +svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) +svint8_t sveor_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) +svint32_t sveor_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) +svint64_t sveor_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) +svint16_t sveor_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) +uint8_t sveorv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) +uint32_t sveorv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) +uint64_t sveorv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) +uint16_t sveorv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) +int8_t sveorv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) +int32_t sveorv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) +int64_t sveorv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) +int16_t sveorv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) +svuint8_t svext(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) +svuint32_t svext(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) +svuint64_t svext(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) +svuint16_t svext(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) +svint8_t svext(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) +svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) +svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) +svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) +svint32_t svext(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) +svint64_t svext(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) +svint16_t svext(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) +svint32_t svextb_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) +svint64_t svextb_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) +svint16_t svextb_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) +svint32_t svextb_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) +svint64_t svextb_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) +svint16_t svextb_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) +svint32_t svextb_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) +svint64_t svextb_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) +svint16_t svextb_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) +svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) +svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) +svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) +svuint32_t svextb_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) +svuint64_t svextb_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) +svuint16_t svextb_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) +svuint32_t svextb_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) +svuint64_t svextb_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) +svuint16_t svextb_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) +svint32_t svexth_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) +svint64_t svexth_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) +svint32_t svexth_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) +svint64_t svexth_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) +svint32_t svexth_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) +svint64_t svexth_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) +svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) +svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) +svuint32_t svexth_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) +svuint64_t svexth_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) +svuint32_t svexth_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) +svuint64_t svexth_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) +svint64_t svextw_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) +svint64_t svextw_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) +svint64_t svextw_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) +svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) +svuint64_t svextw_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) +svuint64_t svextw_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) +svuint8_t svget2(svuint8x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) +svuint32_t svget2(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) +svuint64_t svget2(svuint64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) +svuint16_t svget2(svuint16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) +svint8_t svget2(svint8x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) +svfloat64_t svget2(svfloat64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) +svfloat32_t svget2(svfloat32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) +svfloat16_t svget2(svfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) +svint32_t svget2(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) +svint64_t svget2(svint64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) +svint16_t svget2(svint16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) +svuint8_t svget3(svuint8x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) +svuint32_t svget3(svuint32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) +svuint64_t svget3(svuint64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) +svuint16_t svget3(svuint16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) +svint8_t svget3(svint8x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) +svfloat64_t svget3(svfloat64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) +svfloat32_t svget3(svfloat32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) +svfloat16_t svget3(svfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) +svint32_t svget3(svint32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) +svint64_t svget3(svint64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) +svint16_t svget3(svint16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) +svuint8_t svget4(svuint8x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) +svuint32_t svget4(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) +svuint64_t svget4(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) +svuint16_t svget4(svuint16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) +svint8_t svget4(svint8x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) +svfloat64_t svget4(svfloat64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) +svfloat32_t svget4(svfloat32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) +svfloat16_t svget4(svfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) +svint32_t svget4(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) +svint64_t svget4(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) +svint16_t svget4(svint16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) +svuint8_t svinsr(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) +svuint32_t svinsr(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) +svuint64_t svinsr(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) +svuint16_t svinsr(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) +svint8_t svinsr(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) +svfloat64_t svinsr(svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) +svfloat32_t svinsr(svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) +svfloat16_t svinsr(svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) +svint32_t svinsr(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) +svint64_t svinsr(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) +svint16_t svinsr(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) +uint8_t svlasta(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) +uint32_t svlasta(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) +uint64_t svlasta(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) +uint16_t svlasta(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) +int8_t svlasta(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) +float64_t svlasta(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) +float32_t svlasta(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) +float16_t svlasta(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) +int32_t svlasta(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) +int64_t svlasta(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) +int16_t svlasta(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) +uint8_t svlastb(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) +uint32_t svlastb(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) +uint64_t svlastb(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) +uint16_t svlastb(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) +int8_t svlastb(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) +float64_t svlastb(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) +float32_t svlastb(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) +float16_t svlastb(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) +int32_t svlastb(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) +int64_t svlastb(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) +int16_t svlastb(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) +svuint8_t svld1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) +svuint32_t svld1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) +svuint64_t svld1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) +svuint16_t svld1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) +svint8_t svld1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) +svfloat64_t svld1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) +svfloat32_t svld1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) +svfloat16_t svld1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) +svint32_t svld1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) +svint64_t svld1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) +svint16_t svld1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) +svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) +svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) +svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) +svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) +svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) +svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) +svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) +svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) +svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) +svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) +svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) +svuint8_t svld1rq(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) +svuint32_t svld1rq(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) +svuint64_t svld1rq(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) +svuint16_t svld1rq(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) +svint8_t svld1rq(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) +svfloat64_t svld1rq(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) +svfloat32_t svld1rq(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) +svfloat16_t svld1rq(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) +svint32_t svld1rq(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) +svint64_t svld1rq(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) +svint16_t svld1rq(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) +svuint8x2_t svld2(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) +svuint32x2_t svld2(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) +svuint64x2_t svld2(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) +svuint16x2_t svld2(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) +svint8x2_t svld2(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) +svfloat64x2_t svld2(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) +svfloat32x2_t svld2(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) +svfloat16x2_t svld2(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) +svint32x2_t svld2(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) +svint64x2_t svld2(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) +svint16x2_t svld2(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) +svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) +svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) +svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) +svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) +svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) +svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) +svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) +svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) +svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) +svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) +svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) +svuint8x3_t svld3(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) +svuint32x3_t svld3(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) +svuint64x3_t svld3(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) +svuint16x3_t svld3(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) +svint8x3_t svld3(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) +svfloat64x3_t svld3(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) +svfloat32x3_t svld3(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) +svfloat16x3_t svld3(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) +svint32x3_t svld3(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) +svint64x3_t svld3(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) +svint16x3_t svld3(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) +svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) +svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) +svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) +svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) +svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) +svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) +svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) +svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) +svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) +svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) +svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) +svuint8x4_t svld4(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) +svuint32x4_t svld4(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) +svuint64x4_t svld4(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) +svuint16x4_t svld4(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) +svint8x4_t svld4(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) +svfloat64x4_t svld4(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) +svfloat32x4_t svld4(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) +svfloat16x4_t svld4(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) +svint32x4_t svld4(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) +svint64x4_t svld4(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) +svint16x4_t svld4(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) +svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) +svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) +svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) +svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) +svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) +svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) +svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) +svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) +svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) +svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) +svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) +svuint8_t svldnt1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) +svuint32_t svldnt1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) +svuint64_t svldnt1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) +svuint16_t svldnt1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) +svint8_t svldnt1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) +svfloat64_t svldnt1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) +svfloat32_t svldnt1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) +svfloat16_t svldnt1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) +svint32_t svldnt1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) +svint64_t svldnt1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) +svint16_t svldnt1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) +svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) +svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) +svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) +svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) +svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) +svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) +svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) +svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) +svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) +svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) +svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) +uint64_t svlen(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) +uint64_t svlen(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) +uint64_t svlen(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) +uint64_t svlen(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) +uint64_t svlen(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) +uint64_t svlen(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) +uint64_t svlen(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) +uint64_t svlen(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) +uint64_t svlen(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) +uint64_t svlen(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) +uint64_t svlen(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) +svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) +svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) +svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) +svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) +svint8_t svlsl_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) +svint32_t svlsl_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) +svint64_t svlsl_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) +svint16_t svlsl_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) +svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) +svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) +svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) +svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) +svint8_t svlsl_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) +svint32_t svlsl_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) +svint64_t svlsl_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) +svint16_t svlsl_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) +svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) +svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) +svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) +svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) +svint8_t svlsl_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) +svint32_t svlsl_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) +svint64_t svlsl_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) +svint16_t svlsl_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) +svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) +svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) +svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) +svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) +svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) +svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) +svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) +svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) +svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) +svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) +svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) +svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) +svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) +svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) +svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) +svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) +svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) +svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) +svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) +svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) +svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) +svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) +svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) +svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) +svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) +svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) +svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) +svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) +svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) +svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) +svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) +svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) +svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) +svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) +svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) +svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) +svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) +svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) +svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) +svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) +svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) +svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) +svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) +svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) +svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) +svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) +svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) +svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) +svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) +svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) +svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) +svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) +svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) +svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) +svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) +svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) +svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) +svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) +svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) +svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) +svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) +svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) +svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) +svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) +svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) +svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) +svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) +svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) +svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) +svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) +svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) +svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) +svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) +svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) +svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) +svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) +svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) +svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) +svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) +svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) +svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) +svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) +svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) +svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) +svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) +svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) +svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) +svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) +svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) +svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) +svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) +svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) +svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) +svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) +svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) +svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) +svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) +svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) +svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) +svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) +svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) +svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) +svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) +svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) +svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) +svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) +svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) +svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) +svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) +svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) +svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) +svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) +svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) +svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) +svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) +svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) +svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) +svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) +svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) +svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) +svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) +svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) +svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) +svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) +svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) +svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) +svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) +svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) +svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) +svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) +svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) +svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) +svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) +svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) +svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) +svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) +svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) +svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) +svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) +svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) +svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) +svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) +svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) +svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) +svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) +svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) +svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) +svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) +svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) +svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) +svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) +svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) +svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) +svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) +svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) +svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) +svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) +svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) +svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) +svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) +svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) +svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) +svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) +svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) +svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) +svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) +svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) +svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) +svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) +svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) +svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) +svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) +svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) +svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) +svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) +svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) +svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) +svint8_t svmax_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) +svint32_t svmax_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) +svint64_t svmax_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) +svint16_t svmax_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) +svint8_t svmax_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) +svint32_t svmax_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) +svint64_t svmax_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) +svint16_t svmax_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) +svint8_t svmax_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) +svint32_t svmax_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) +svint64_t svmax_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) +svint16_t svmax_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) +svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) +svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) +svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) +svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) +svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) +svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) +svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) +svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) +svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) +svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) +svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) +svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) +svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) +svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) +svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) +svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) +svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) +svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) +svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) +svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) +svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) +svint8_t svmax_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) +svint32_t svmax_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) +svint64_t svmax_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) +svint16_t svmax_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) +svint8_t svmax_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) +svint32_t svmax_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) +svint64_t svmax_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) +svint16_t svmax_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) +svint8_t svmax_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) +svint32_t svmax_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) +svint64_t svmax_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) +svint16_t svmax_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) +svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) +svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) +svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) +svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) +svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) +svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) +svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) +svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) +svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) +svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) +svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) +svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) +svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) +svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) +svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) +svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) +svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) +svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) +svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) +svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) +svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) +svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) +svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) +svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) +svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) +svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) +svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) +svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) +svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) +svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) +float64_t svmaxnmv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) +float32_t svmaxnmv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) +float16_t svmaxnmv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) +float64_t svmaxv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) +float32_t svmaxv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) +float16_t svmaxv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) +int8_t svmaxv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) +int32_t svmaxv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) +int64_t svmaxv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) +int16_t svmaxv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) +uint8_t svmaxv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) +uint32_t svmaxv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) +uint64_t svmaxv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) +uint16_t svmaxv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) +svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) +svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) +svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) +svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) +svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) +svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) +svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) +svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) +svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) +svint8_t svmin_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) +svint32_t svmin_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) +svint64_t svmin_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) +svint16_t svmin_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) +svint8_t svmin_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) +svint32_t svmin_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) +svint64_t svmin_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) +svint16_t svmin_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) +svint8_t svmin_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) +svint32_t svmin_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) +svint64_t svmin_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) +svint16_t svmin_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) +svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) +svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) +svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) +svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) +svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) +svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) +svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) +svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) +svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) +svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) +svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) +svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) +svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) +svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) +svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) +svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) +svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) +svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) +svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) +svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) +svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) +svint8_t svmin_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) +svint32_t svmin_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) +svint64_t svmin_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) +svint16_t svmin_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) +svint8_t svmin_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) +svint32_t svmin_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) +svint64_t svmin_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) +svint16_t svmin_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) +svint8_t svmin_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) +svint32_t svmin_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) +svint64_t svmin_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) +svint16_t svmin_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) +svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) +svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) +svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) +svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) +svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) +svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) +svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) +svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) +svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) +svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) +svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) +svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) +svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) +svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) +svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) +svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) +svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) +svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) +svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) +svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) +svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) +svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) +svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) +svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) +svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) +svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) +svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) +svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) +svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) +svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) +float64_t svminnmv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) +float32_t svminnmv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) +float16_t svminnmv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) +float64_t svminv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) +float32_t svminv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) +float16_t svminv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) +int8_t svminv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) +int32_t svminv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) +int64_t svminv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) +int16_t svminv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) +uint8_t svminv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) +uint32_t svminv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) +uint64_t svminv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) +uint16_t svminv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) +svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) +svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) +svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) +svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) +svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) +svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) +svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) +svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) +svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) +svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) +svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) +svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) +svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) +svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) +svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) +svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) +svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) +svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) +svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) +svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) +svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) +svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) +svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) +svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) +svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) +svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) +svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) +svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) +svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) +svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) +svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) +svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) +svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) +svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) +svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) +svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) +svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) +svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) +svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) +svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) +svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) +svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) +svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) +svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) +svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) +svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) +svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) +svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) +svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) +svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) +svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) +svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) +svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) +svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) +svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) +svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) +svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) +svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) +svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) +svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) +svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) +svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) +svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) +svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) +svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) +svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) +svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) +svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) +svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) +svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) +svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) +svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) +svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) +svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) +svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) +svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) +svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) +svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) +svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) +svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) +svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) +svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) +svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) +svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) +svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) +svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) +svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) +svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) +svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) +svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) +svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) +svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) +svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) +svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) +svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) +svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) +svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) +svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) +svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) +svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) +svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) +svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) +svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) +svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) +svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) +svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) +svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) +svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) +svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) +svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) +svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) +svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) +svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) +svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) +svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) +svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) +svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) +svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) +svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) +svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) +svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) +svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) +svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) +svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) +svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) +svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) +svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) +svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) +svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) +svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) +svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) +svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) +svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) +svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) +svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) +svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) +svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) +svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) +svbool_t svmov_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) +svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) +svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) +svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) +svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) +svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) +svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) +svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) +svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) +svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) +svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) +svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) +svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) +svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) +svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) +svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) +svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) +svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) +svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) +svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) +svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) +svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) +svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) +svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) +svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) +svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) +svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) +svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) +svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) +svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) +svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) +svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) +svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) +svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) +svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) +svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) +svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) +svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) +svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) +svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) +svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) +svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) +svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) +svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) +svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) +svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) +svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) +svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) +svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) +svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) +svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) +svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) +svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) +svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) +svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) +svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) +svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) +svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) +svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) +svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) +svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) +svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) +svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) +svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) +svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) +svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) +svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) +svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) +svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) +svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) +svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) +svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) +svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) +svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) +svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) +svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) +svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) +svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) +svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) +svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) +svint8_t svmul_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) +svint32_t svmul_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) +svint64_t svmul_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) +svint16_t svmul_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) +svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) +svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) +svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) +svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) +svint8_t svmul_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) +svint32_t svmul_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) +svint64_t svmul_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) +svint16_t svmul_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) +svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) +svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) +svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) +svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) +svint8_t svmul_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) +svint32_t svmul_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) +svint64_t svmul_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) +svint16_t svmul_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) +svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) +svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) +svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) +svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) +svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) +svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) +svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) +svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) +svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) +svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) +svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) +svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) +svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) +svint8_t svmul_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) +svint32_t svmul_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) +svint64_t svmul_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) +svint16_t svmul_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) +svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) +svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) +svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) +svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) +svint8_t svmul_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) +svint32_t svmul_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) +svint64_t svmul_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) +svint16_t svmul_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) +svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) +svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) +svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) +svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) +svint8_t svmul_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) +svint32_t svmul_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) +svint64_t svmul_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) +svint16_t svmul_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) +svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) +svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) +svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) +svint8_t svmulh_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) +svint32_t svmulh_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) +svint64_t svmulh_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) +svint16_t svmulh_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) +svint8_t svmulh_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) +svint32_t svmulh_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) +svint64_t svmulh_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) +svint16_t svmulh_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) +svint8_t svmulh_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) +svint32_t svmulh_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) +svint64_t svmulh_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) +svint16_t svmulh_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) +svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) +svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) +svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) +svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) +svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) +svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) +svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) +svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) +svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) +svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) +svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) +svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) +svint8_t svmulh_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) +svint32_t svmulh_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) +svint64_t svmulh_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) +svint16_t svmulh_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) +svint8_t svmulh_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) +svint32_t svmulh_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) +svint64_t svmulh_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) +svint16_t svmulh_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) +svint8_t svmulh_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) +svint32_t svmulh_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) +svint64_t svmulh_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) +svint16_t svmulh_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) +svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) +svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) +svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) +svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) +svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) +svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) +svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) +svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) +svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) +svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) +svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) +svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) +svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) +svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) +svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) +svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) +svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) +svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) +svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) +svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) +svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) +svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) +svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) +svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) +svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) +svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) +svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) +svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) +svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) +svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) +svbool_t svnand_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) +svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) +svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) +svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) +svfloat64_t svneg_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) +svfloat32_t svneg_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) +svfloat16_t svneg_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) +svfloat64_t svneg_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) +svfloat32_t svneg_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) +svfloat16_t svneg_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) +svint8_t svneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) +svint32_t svneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) +svint64_t svneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) +svint16_t svneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) +svint8_t svneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) +svint32_t svneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) +svint64_t svneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) +svint16_t svneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) +svint8_t svneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) +svint32_t svneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) +svint64_t svneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) +svint16_t svneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) +svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) +svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) +svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) +svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) +svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) +svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) +svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) +svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) +svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) +svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) +svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) +svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) +svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) +svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) +svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) +svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) +svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) +svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) +svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) +svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) +svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) +svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) +svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) +svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) +svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) +svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) +svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) +svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) +svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) +svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) +svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) +svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) +svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) +svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) +svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) +svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) +svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) +svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) +svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) +svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) +svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) +svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) +svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) +svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) +svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) +svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) +svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) +svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) +svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) +svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) +svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) +svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) +svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) +svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) +svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) +svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) +svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) +svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) +svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) +svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) +svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) +svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) +svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) +svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) +svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) +svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) +svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) +svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) +svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) +svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) +svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) +svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) +svbool_t svnor_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) +svbool_t svnot_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) +svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) +svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) +svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) +svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) +svint8_t svnot_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) +svint32_t svnot_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) +svint64_t svnot_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) +svint16_t svnot_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) +svuint8_t svnot_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) +svuint32_t svnot_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) +svuint64_t svnot_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) +svuint16_t svnot_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) +svint8_t svnot_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) +svint32_t svnot_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) +svint64_t svnot_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) +svint16_t svnot_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) +svuint8_t svnot_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) +svuint32_t svnot_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) +svuint64_t svnot_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) +svuint16_t svnot_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) +svint8_t svnot_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) +svint32_t svnot_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) +svint64_t svnot_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) +svint16_t svnot_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) +svbool_t svorn_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) +svbool_t svorr_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) +svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) +svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) +svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) +svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) +svint8_t svorr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) +svint32_t svorr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) +svint64_t svorr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) +svint16_t svorr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) +svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) +svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) +svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) +svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) +svint8_t svorr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) +svint32_t svorr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) +svint64_t svorr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) +svint16_t svorr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) +svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) +svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) +svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) +svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) +svint8_t svorr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) +svint32_t svorr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) +svint64_t svorr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) +svint16_t svorr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) +svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) +svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) +svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) +svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) +svint8_t svorr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) +svint32_t svorr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) +svint64_t svorr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) +svint16_t svorr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) +svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) +svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) +svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) +svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) +svint8_t svorr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) +svint32_t svorr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) +svint64_t svorr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) +svint16_t svorr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) +svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) +svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) +svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) +svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) +svint8_t svorr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) +svint32_t svorr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) +svint64_t svorr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) +svint16_t svorr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) +uint8_t svorv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) +uint32_t svorv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) +uint64_t svorv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) +uint16_t svorv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) +int8_t svorv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) +int32_t svorv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) +int64_t svorv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) +int16_t svorv(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) +svbool_t svpfalse(); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) +svbool_t svpfirst(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) +svint8_t svqadd(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) +svint32_t svqadd(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) +svint64_t svqadd(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) +svint16_t svqadd(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) +svuint8_t svqadd(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) +svuint32_t svqadd(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) +svuint64_t svqadd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) +svuint16_t svqadd(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) +svint8_t svqadd(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) +svint32_t svqadd(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) +svint64_t svqadd(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) +svint16_t svqadd(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) +svuint8_t svqadd(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) +svuint32_t svqadd(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) +svuint64_t svqadd(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) +svuint16_t svqadd(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) +int32_t svqdecb(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) +int64_t svqdecb(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) +uint32_t svqdecb(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) +uint64_t svqdecb(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) +int32_t svqdecb_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) +int64_t svqdecb_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) +uint32_t svqdecb_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) +uint64_t svqdecb_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) +int32_t svqdecd(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) +int64_t svqdecd(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) +uint32_t svqdecd(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) +uint64_t svqdecd(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) +svint64_t svqdecd(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) +svuint64_t svqdecd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) +int32_t svqdecd_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) +int64_t svqdecd_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) +uint32_t svqdecd_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) +uint64_t svqdecd_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) +svint64_t svqdecd_pat(svint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) +svuint64_t svqdecd_pat(svuint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) +int32_t svqdech(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) +int64_t svqdech(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) +uint32_t svqdech(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) +uint64_t svqdech(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) +svint16_t svqdech(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) +svuint16_t svqdech(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) +int32_t svqdech_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) +int64_t svqdech_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) +uint32_t svqdech_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) +uint64_t svqdech_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) +svint16_t svqdech_pat(svint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) +svuint16_t svqdech_pat(svuint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) +int32_t svqdecp_b8(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) +int32_t svqdecp_b32(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) +int32_t svqdecp_b64(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) +int32_t svqdecp_b16(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) +int64_t svqdecp_b8(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) +int64_t svqdecp_b32(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) +int64_t svqdecp_b64(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) +int64_t svqdecp_b16(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) +uint32_t svqdecp_b8(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) +uint32_t svqdecp_b32(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) +uint32_t svqdecp_b64(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) +uint32_t svqdecp_b16(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) +uint64_t svqdecp_b8(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) +uint64_t svqdecp_b32(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) +uint64_t svqdecp_b64(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) +uint64_t svqdecp_b16(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) +svint32_t svqdecp(svint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) +svint64_t svqdecp(svint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) +svint16_t svqdecp(svint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) +svuint32_t svqdecp(svuint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) +svuint64_t svqdecp(svuint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) +svuint16_t svqdecp(svuint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) +int32_t svqdecw(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) +int64_t svqdecw(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) +uint32_t svqdecw(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) +uint64_t svqdecw(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) +svint32_t svqdecw(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) +svuint32_t svqdecw(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) +int32_t svqdecw_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) +int64_t svqdecw_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) +uint32_t svqdecw_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) +uint64_t svqdecw_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) +svint32_t svqdecw_pat(svint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) +svuint32_t svqdecw_pat(svuint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) +int32_t svqincb(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) +int64_t svqincb(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) +uint32_t svqincb(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) +uint64_t svqincb(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) +int32_t svqincb_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) +int64_t svqincb_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) +uint32_t svqincb_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) +uint64_t svqincb_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) +int32_t svqincd(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) +int64_t svqincd(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) +uint32_t svqincd(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) +uint64_t svqincd(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) +svint64_t svqincd(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) +svuint64_t svqincd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) +int32_t svqincd_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) +int64_t svqincd_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) +uint32_t svqincd_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) +uint64_t svqincd_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) +svint64_t svqincd_pat(svint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) +svuint64_t svqincd_pat(svuint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) +int32_t svqinch(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) +int64_t svqinch(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) +uint32_t svqinch(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) +uint64_t svqinch(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) +svint16_t svqinch(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) +svuint16_t svqinch(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) +int32_t svqinch_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) +int64_t svqinch_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) +uint32_t svqinch_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) +uint64_t svqinch_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) +svint16_t svqinch_pat(svint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) +svuint16_t svqinch_pat(svuint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) +int32_t svqincp_b8(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) +int32_t svqincp_b32(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) +int32_t svqincp_b64(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) +int32_t svqincp_b16(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) +int64_t svqincp_b8(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) +int64_t svqincp_b32(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) +int64_t svqincp_b64(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) +int64_t svqincp_b16(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) +uint32_t svqincp_b8(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) +uint32_t svqincp_b32(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) +uint32_t svqincp_b64(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) +uint32_t svqincp_b16(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) +uint64_t svqincp_b8(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) +uint64_t svqincp_b32(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) +uint64_t svqincp_b64(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) +uint64_t svqincp_b16(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) +svint32_t svqincp(svint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) +svint64_t svqincp(svint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) +svint16_t svqincp(svint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) +svuint32_t svqincp(svuint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) +svuint64_t svqincp(svuint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) +svuint16_t svqincp(svuint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) +int32_t svqincw(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) +int64_t svqincw(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) +uint32_t svqincw(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) +uint64_t svqincw(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) +svint32_t svqincw(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) +svuint32_t svqincw(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) +int32_t svqincw_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) +int64_t svqincw_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) +uint32_t svqincw_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) +uint64_t svqincw_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) +svint32_t svqincw_pat(svint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) +svuint32_t svqincw_pat(svuint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) +svint8_t svqsub(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) +svint32_t svqsub(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) +svint64_t svqsub(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) +svint16_t svqsub(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) +svuint8_t svqsub(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) +svuint32_t svqsub(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) +svuint64_t svqsub(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) +svuint16_t svqsub(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) +svint8_t svqsub(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) +svint32_t svqsub(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) +svint64_t svqsub(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) +svint16_t svqsub(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) +svuint8_t svqsub(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) +svuint32_t svqsub(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) +svuint64_t svqsub(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) +svuint16_t svqsub(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) +svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) +svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) +svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) +svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) +svint8_t svrbit_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) +svint32_t svrbit_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) +svint64_t svrbit_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) +svint16_t svrbit_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) +svuint8_t svrbit_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) +svuint32_t svrbit_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) +svuint64_t svrbit_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) +svuint16_t svrbit_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) +svint8_t svrbit_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) +svint32_t svrbit_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) +svint64_t svrbit_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) +svint16_t svrbit_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) +svuint8_t svrbit_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) +svuint32_t svrbit_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) +svuint64_t svrbit_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) +svuint16_t svrbit_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) +svint8_t svrbit_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) +svint32_t svrbit_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) +svint64_t svrbit_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) +svint16_t svrbit_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) +svfloat64_t svrecpe(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) +svfloat32_t svrecpe(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) +svfloat16_t svrecpe(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) +svfloat64_t svrecps(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) +svfloat32_t svrecps(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) +svfloat16_t svrecps(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) +svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) +svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) +svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) +svfloat64_t svrecpx_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) +svfloat32_t svrecpx_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) +svfloat16_t svrecpx_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) +svfloat64_t svrecpx_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) +svfloat32_t svrecpx_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) +svfloat16_t svrecpx_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) +svuint8_t svrev(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) +svuint32_t svrev(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) +svuint64_t svrev(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) +svuint16_t svrev(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) +svint8_t svrev(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) +svfloat64_t svrev(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) +svfloat32_t svrev(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) +svfloat16_t svrev(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) +svint32_t svrev(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) +svint64_t svrev(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) +svint16_t svrev(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) +svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) +svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) +svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) +svint32_t svrevb_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) +svint64_t svrevb_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) +svint16_t svrevb_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) +svuint32_t svrevb_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) +svuint64_t svrevb_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) +svuint16_t svrevb_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) +svint32_t svrevb_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) +svint64_t svrevb_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) +svint16_t svrevb_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) +svuint32_t svrevb_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) +svuint64_t svrevb_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) +svuint16_t svrevb_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) +svint32_t svrevb_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) +svint64_t svrevb_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) +svint16_t svrevb_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) +svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) +svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) +svint32_t svrevh_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) +svint64_t svrevh_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) +svuint32_t svrevh_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) +svuint64_t svrevh_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) +svint32_t svrevh_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) +svint64_t svrevh_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) +svuint32_t svrevh_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) +svuint64_t svrevh_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) +svint32_t svrevh_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) +svint64_t svrevh_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) +svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) +svint64_t svrevw_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) +svuint64_t svrevw_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) +svint64_t svrevw_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) +svuint64_t svrevw_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) +svint64_t svrevw_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) +svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) +svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) +svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) +svfloat64_t svrinta_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) +svfloat32_t svrinta_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) +svfloat16_t svrinta_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) +svfloat64_t svrinta_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) +svfloat32_t svrinta_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) +svfloat16_t svrinta_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) +svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) +svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) +svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) +svfloat64_t svrinti_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) +svfloat32_t svrinti_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) +svfloat16_t svrinti_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) +svfloat64_t svrinti_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) +svfloat32_t svrinti_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) +svfloat16_t svrinti_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) +svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) +svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) +svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) +svfloat64_t svrintm_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) +svfloat32_t svrintm_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) +svfloat16_t svrintm_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) +svfloat64_t svrintm_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) +svfloat32_t svrintm_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) +svfloat16_t svrintm_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) +svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) +svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) +svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) +svfloat64_t svrintn_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) +svfloat32_t svrintn_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) +svfloat16_t svrintn_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) +svfloat64_t svrintn_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) +svfloat32_t svrintn_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) +svfloat16_t svrintn_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) +svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) +svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) +svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) +svfloat64_t svrintp_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) +svfloat32_t svrintp_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) +svfloat16_t svrintp_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) +svfloat64_t svrintp_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) +svfloat32_t svrintp_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) +svfloat16_t svrintp_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) +svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) +svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) +svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) +svfloat64_t svrintx_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) +svfloat32_t svrintx_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) +svfloat16_t svrintx_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) +svfloat64_t svrintx_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) +svfloat32_t svrintx_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) +svfloat16_t svrintx_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) +svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) +svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) +svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) +svfloat64_t svrintz_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) +svfloat32_t svrintz_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) +svfloat16_t svrintz_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) +svfloat64_t svrintz_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) +svfloat32_t svrintz_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) +svfloat16_t svrintz_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) +svfloat64_t svrsqrte(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) +svfloat32_t svrsqrte(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) +svfloat16_t svrsqrte(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) +svfloat64_t svrsqrts(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) +svfloat32_t svrsqrts(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) +svfloat16_t svrsqrts(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) +svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) +svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) +svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) +svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) +svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) +svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) +svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) +svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) +svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) +svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) +svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) +svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) +svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) +svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) +svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) +svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) +svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) +svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) +svbool_t svsel(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) +svuint8_t svsel(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) +svuint32_t svsel(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) +svuint64_t svsel(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) +svuint16_t svsel(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) +svint8_t svsel(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) +svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) +svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) +svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) +svint32_t svsel(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) +svint64_t svsel(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) +svint16_t svsel(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) +svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) +svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) +svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) +svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) +svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) +svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) +svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) +svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) +svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) +svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) +svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) +svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) +svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) +svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) +svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) +svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) +svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) +svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) +svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) +svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) +svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) +svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) +svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) +svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) +svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) +svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) +svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) +svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) +svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) +svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) +svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) +svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) +svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) +svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) +svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) +svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) +svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) +svint8_t svsplice(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) +svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) +svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) +svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) +svint32_t svsplice(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) +svint64_t svsplice(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) +svint16_t svsplice(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) +svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) +svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) +svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) +svfloat64_t svsqrt_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) +svfloat32_t svsqrt_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) +svfloat16_t svsqrt_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) +svfloat64_t svsqrt_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) +svfloat32_t svsqrt_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) +svfloat16_t svsqrt_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) +void svst1(svbool_t, uint8_t *, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) +void svst1(svbool_t, uint32_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) +void svst1(svbool_t, uint64_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) +void svst1(svbool_t, uint16_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) +void svst1(svbool_t, int8_t *, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) +void svst1(svbool_t, float64_t *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) +void svst1(svbool_t, float32_t *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) +void svst1(svbool_t, float16_t *, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) +void svst1(svbool_t, int32_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) +void svst1(svbool_t, int64_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) +void svst1(svbool_t, int16_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) +void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) +void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) +void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) +void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) +void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) +void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) +void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) +void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) +void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) +void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) +void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) +void svst1b(svbool_t, int8_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) +void svst1b(svbool_t, int8_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) +void svst1b(svbool_t, int8_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) +void svst1b(svbool_t, uint8_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) +void svst1b(svbool_t, uint8_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) +void svst1b(svbool_t, uint8_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) +void svst1h(svbool_t, int16_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) +void svst1h(svbool_t, int16_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) +void svst1h(svbool_t, uint16_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) +void svst1h(svbool_t, uint16_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) +void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) +void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) +void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) +void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) +void svst1w(svbool_t, int32_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) +void svst1w(svbool_t, uint32_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) +void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) +void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) +void svst2(svbool_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) +void svst2(svbool_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) +void svst2(svbool_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) +void svst2(svbool_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) +void svst2(svbool_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) +void svst2(svbool_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) +void svst2(svbool_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) +void svst2(svbool_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) +void svst2(svbool_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) +void svst2(svbool_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) +void svst2(svbool_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) +void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) +void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) +void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) +void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) +void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) +void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) +void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) +void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) +void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) +void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) +void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) +void svst3(svbool_t, uint8_t *, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) +void svst3(svbool_t, uint32_t *, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) +void svst3(svbool_t, uint64_t *, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) +void svst3(svbool_t, uint16_t *, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) +void svst3(svbool_t, int8_t *, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) +void svst3(svbool_t, float64_t *, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) +void svst3(svbool_t, float32_t *, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) +void svst3(svbool_t, float16_t *, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) +void svst3(svbool_t, int32_t *, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) +void svst3(svbool_t, int64_t *, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) +void svst3(svbool_t, int16_t *, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) +void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) +void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) +void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) +void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) +void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) +void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) +void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) +void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) +void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) +void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) +void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) +void svst4(svbool_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) +void svst4(svbool_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) +void svst4(svbool_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) +void svst4(svbool_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) +void svst4(svbool_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) +void svst4(svbool_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) +void svst4(svbool_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) +void svst4(svbool_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) +void svst4(svbool_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) +void svst4(svbool_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) +void svst4(svbool_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) +void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) +void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) +void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) +void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) +void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) +void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) +void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) +void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) +void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) +void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) +void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) +void svstnt1(svbool_t, uint8_t *, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) +void svstnt1(svbool_t, uint32_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) +void svstnt1(svbool_t, uint64_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) +void svstnt1(svbool_t, uint16_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) +void svstnt1(svbool_t, int8_t *, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) +void svstnt1(svbool_t, float64_t *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) +void svstnt1(svbool_t, float32_t *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) +void svstnt1(svbool_t, float16_t *, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) +void svstnt1(svbool_t, int32_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) +void svstnt1(svbool_t, int64_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) +void svstnt1(svbool_t, int16_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) +void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) +void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) +void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) +void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) +void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) +void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) +void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) +void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) +void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) +void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) +void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) +svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) +svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) +svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) +svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) +svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) +svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) +svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) +svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) +svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) +svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) +svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) +svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) +svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) +svint8_t svsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) +svint32_t svsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) +svint64_t svsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) +svint16_t svsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) +svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) +svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) +svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) +svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) +svint8_t svsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) +svint32_t svsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) +svint64_t svsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) +svint16_t svsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) +svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) +svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) +svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) +svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) +svint8_t svsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) +svint32_t svsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) +svint64_t svsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) +svint16_t svsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) +svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) +svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) +svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) +svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) +svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) +svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) +svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) +svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) +svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) +svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) +svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) +svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) +svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) +svint8_t svsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) +svint32_t svsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) +svint64_t svsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) +svint16_t svsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) +svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) +svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) +svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) +svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) +svint8_t svsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) +svint32_t svsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) +svint64_t svsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) +svint16_t svsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) +svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) +svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) +svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) +svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) +svint8_t svsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) +svint32_t svsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) +svint64_t svsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) +svint16_t svsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) +svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) +svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) +svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) +svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) +svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) +svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) +svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) +svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) +svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) +svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) +svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) +svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) +svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) +svint8_t svsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) +svint32_t svsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) +svint64_t svsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) +svint16_t svsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) +svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) +svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) +svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) +svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) +svint8_t svsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) +svint32_t svsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) +svint64_t svsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) +svint16_t svsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) +svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) +svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) +svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) +svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) +svint8_t svsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) +svint32_t svsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) +svint64_t svsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) +svint16_t svsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) +svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) +svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) +svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) +svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) +svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) +svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) +svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) +svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) +svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) +svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) +svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) +svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) +svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) +svint8_t svsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) +svint32_t svsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) +svint64_t svsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) +svint16_t svsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) +svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) +svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) +svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) +svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) +svint8_t svsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) +svint32_t svsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) +svint64_t svsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) +svint16_t svsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) +svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) +svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) +svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) +svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) +svint8_t svsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) +svint32_t svsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) +svint64_t svsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) +svint16_t svsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) +svuint8_t svtbl(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) +svuint32_t svtbl(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) +svuint64_t svtbl(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) +svuint16_t svtbl(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) +svint8_t svtbl(svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) +svfloat64_t svtbl(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) +svfloat32_t svtbl(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) +svfloat16_t svtbl(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) +svint32_t svtbl(svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) +svint64_t svtbl(svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) +svint16_t svtbl(svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) +svuint8_t svtrn1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) +svuint32_t svtrn1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) +svuint64_t svtrn1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) +svuint16_t svtrn1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) +svint8_t svtrn1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) +svfloat64_t svtrn1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) +svfloat32_t svtrn1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) +svfloat16_t svtrn1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) +svint32_t svtrn1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) +svint64_t svtrn1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) +svint16_t svtrn1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) +svuint8_t svtrn2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) +svuint32_t svtrn2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) +svuint64_t svtrn2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) +svuint16_t svtrn2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) +svint8_t svtrn2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) +svfloat64_t svtrn2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) +svfloat32_t svtrn2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) +svfloat16_t svtrn2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) +svint32_t svtrn2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) +svint64_t svtrn2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) +svint16_t svtrn2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) +svbool_t svunpkhi(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) +svint32_t svunpkhi(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) +svint64_t svunpkhi(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) +svint16_t svunpkhi(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) +svuint32_t svunpkhi(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) +svuint64_t svunpkhi(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) +svuint16_t svunpkhi(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) +svbool_t svunpklo(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) +svint32_t svunpklo(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) +svint64_t svunpklo(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) +svint16_t svunpklo(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) +svuint32_t svunpklo(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) +svuint64_t svunpklo(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) +svuint16_t svunpklo(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) +svuint8_t svuzp1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) +svuint32_t svuzp1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) +svuint64_t svuzp1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) +svuint16_t svuzp1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) +svint8_t svuzp1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) +svfloat64_t svuzp1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) +svfloat32_t svuzp1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) +svfloat16_t svuzp1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) +svint32_t svuzp1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) +svint64_t svuzp1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) +svint16_t svuzp1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) +svuint8_t svuzp2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) +svuint32_t svuzp2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) +svuint64_t svuzp2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) +svuint16_t svuzp2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) +svint8_t svuzp2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) +svfloat64_t svuzp2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) +svfloat32_t svuzp2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) +svfloat16_t svuzp2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) +svint32_t svuzp2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) +svint64_t svuzp2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) +svint16_t svuzp2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) +svbool_t svwhilele_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) +svbool_t svwhilele_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) +svbool_t svwhilele_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) +svbool_t svwhilele_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) +svbool_t svwhilele_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) +svbool_t svwhilele_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) +svbool_t svwhilele_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) +svbool_t svwhilele_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) +svbool_t svwhilele_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) +svbool_t svwhilele_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) +svbool_t svwhilele_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) +svbool_t svwhilele_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) +svbool_t svwhilele_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) +svbool_t svwhilele_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) +svbool_t svwhilele_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) +svbool_t svwhilele_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) +svbool_t svwhilelt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) +svbool_t svwhilelt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) +svbool_t svwhilelt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) +svbool_t svwhilelt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) +svbool_t svwhilelt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) +svbool_t svwhilelt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) +svbool_t svwhilelt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) +svbool_t svwhilelt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) +svbool_t svwhilelt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) +svbool_t svwhilelt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) +svbool_t svwhilelt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) +svbool_t svwhilelt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) +svbool_t svwhilelt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) +svbool_t svwhilelt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) +svbool_t svwhilelt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) +svbool_t svwhilelt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) +svuint8_t svzip1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) +svuint32_t svzip1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) +svuint64_t svzip1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) +svuint16_t svzip1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) +svint8_t svzip1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) +svfloat64_t svzip1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) +svfloat32_t svzip1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) +svfloat16_t svzip1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) +svint32_t svzip1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) +svint64_t svzip1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) +svint16_t svzip1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) +svuint8_t svzip2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) +svuint32_t svzip2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) +svuint64_t svzip2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) +svuint16_t svzip2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) +svint8_t svzip2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) +svfloat64_t svzip2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) +svfloat32_t svzip2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) +svfloat16_t svzip2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) +svint32_t svzip2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) +svint64_t svzip2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) +svint16_t svzip2(svint16_t, svint16_t); + +#if defined (__ARM_FEATURE_SVE2_BITPERM) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp_u16(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp(svuint16_t, svuint16_t); +#endif //defined (__ARM_FEATURE_SVE2_BITPERM) + +#if defined(__ARM_FEATURE_SVE2) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt_s32(svint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt_s64(svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt_s16(svint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt_u32(svuint16_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt_u64(svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt_u16(svuint8_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt_s32(svuint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt_s64(svuint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt_s16(svuint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2_s8(svint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2_s32(svint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2_s64(svint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2_s16(svint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_m(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_m(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_m(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_x(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_x(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_x(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_z(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_z(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_z(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt(svint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt(svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt(svint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt(svuint16_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt(svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt(svuint8_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt(svuint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt(svuint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt(svuint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2(svint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2(svfloat64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2(svfloat32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2(svfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2(svint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2(svint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2(svint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx(svint8_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx(svint32_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx(svint64_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx(svint16_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar(svint16_t, svint16_t, uint64_t); +#endif //defined(__ARM_FEATURE_SVE2) + +#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *); +#endif //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) + +#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) +svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) +svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) +svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) +svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t); +#endif //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16) + +#if defined(__ARM_FEATURE_SVE2_AES) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) +svuint8_t svaesd_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) +svuint8_t svaese_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) +svuint8_t svaesimc_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) +svuint8_t svaesmc_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) +svuint64_t svpmullb_pair_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) +svuint64_t svpmullb_pair_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) +svuint64_t svpmullt_pair_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) +svuint64_t svpmullt_pair_u64(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) +svuint8_t svaesd(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) +svuint8_t svaese(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) +svuint8_t svaesimc(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) +svuint8_t svaesmc(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) +svuint64_t svpmullb_pair(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) +svuint64_t svpmullb_pair(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) +svuint64_t svpmullt_pair(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) +svuint64_t svpmullt_pair(svuint64_t, svuint64_t); +#endif //defined(__ARM_FEATURE_SVE2_AES) + +#if defined(__ARM_FEATURE_SVE2_SHA3) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) +svuint64_t svrax1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) +svint64_t svrax1_s64(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) +svuint64_t svrax1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) +svint64_t svrax1(svint64_t, svint64_t); +#endif //defined(__ARM_FEATURE_SVE2_SHA3) + +#if defined(__ARM_FEATURE_SVE2_SM4) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) +svuint32_t svsm4e_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) +svuint32_t svsm4ekey_u32(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) +svuint32_t svsm4e(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) +svuint32_t svsm4ekey(svuint32_t, svuint32_t); +#endif //defined(__ARM_FEATURE_SVE2_SM4) + +#if defined(__ARM_FEATURE_SVE_BF16) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_n_bf16(bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) +svbfloat16x2_t svundef2_bf16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) +svbfloat16x3_t svundef3_bf16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) +svbfloat16x4_t svundef4_bf16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) +svbfloat16_t svundef_bf16(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_bf16(bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2(svbfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3(svbfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4(svbfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); +#endif //defined(__ARM_FEATURE_SVE_BF16) + +#if defined(__ARM_FEATURE_SVE_MATMUL_FP32) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); +#endif //defined(__ARM_FEATURE_SVE_MATMUL_FP32) + +#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q(svint16_t, svint16_t); +#endif //defined(__ARM_FEATURE_SVE_MATMUL_FP64) + +#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); +#endif //defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16) + +#if defined(__ARM_FEATURE_SVE_MATMUL_INT8) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot(svint32_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot(svint32_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot(svint32_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); +#endif //defined(__ARM_FEATURE_SVE_MATMUL_INT8) +#if defined(__ARM_FEATURE_SVE_BF16) +#define svcvtnt_bf16_x svcvtnt_bf16_m +#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m +#endif /*__ARM_FEATURE_SVE_BF16 */ + +#if defined(__ARM_FEATURE_SVE2) +#define svcvtnt_f16_x svcvtnt_f16_m +#define svcvtnt_f16_f32_x svcvtnt_f16_f32_m +#define svcvtnt_f32_x svcvtnt_f32_m +#define svcvtnt_f32_f64_x svcvtnt_f32_f64_m + +#define svcvtxnt_f32_x svcvtxnt_f32_m +#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m + +#endif /*__ARM_FEATURE_SVE2 */ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /*__ARM_FEATURE_SVE */ + +#endif /* __ARM_SVE_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/armintr.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/armintr.h new file mode 100644 index 0000000..300ed4e --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/armintr.h @@ -0,0 +1,31 @@ +/*===---- armintr.h - ARM Windows intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __ARMINTR_H +#define __ARMINTR_H + +typedef enum +{ + _ARM_BARRIER_SY = 0xF, + _ARM_BARRIER_ST = 0xE, + _ARM_BARRIER_ISH = 0xB, + _ARM_BARRIER_ISHST = 0xA, + _ARM_BARRIER_NSH = 0x7, + _ARM_BARRIER_NSHST = 0x6, + _ARM_BARRIER_OSH = 0x3, + _ARM_BARRIER_OSHST = 0x2 +} _ARMINTR_BARRIER_TYPE; + +#endif /* __ARMINTR_H */ +#endif /* _MSC_VER */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx2intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx2intrin.h new file mode 100644 index 0000000..cc16720 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx2intrin.h @@ -0,0 +1,1148 @@ +/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX2INTRIN_H +#define __AVX2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128))) + +/* SSE4 Multiple Packed Sums of Absolute Difference. */ +#define _mm256_mpsadbw_epu8(X, Y, M) \ + (__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(M)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi8(__m256i __a) +{ + return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi16(__m256i __a) +{ + return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi32(__m256i __a) +{ + return (__m256i)__builtin_ia32_pabsd256((__v8si)__a); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packs_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packus_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packus_epi32(__m256i __V1, __m256i __V2) +{ + return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qu)__a + (__v32qu)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a + (__v16hu)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a + (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a + (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b); +} + +#define _mm256_alignr_epi8(a, b, n) \ + (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (n)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a & (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_si256(__m256i __a, __m256i __b) +{ + return (__m256i)(~(__v4du)__a & (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_avg_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_avg_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) +{ + return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2, + (__v32qi)__M); +} + +#define _mm256_blend_epi16(V1, V2, M) \ + (__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \ + (__v16hi)(__m256i)(V2), (int)(M)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qi)__a == (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hi)__a == (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a == (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a == (__v4di)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi8(__m256i __a, __m256i __b) +{ + /* This function always performs a signed comparison, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m256i)((__v32qs)__a > (__v32qs)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hi)__a > (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a > (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a > (__v4di)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadd_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadd_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadds_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsub_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsubs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maddubs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b); +} + +static __inline__ int __DEFAULT_FN_ATTRS256 +_mm256_movemask_epi8(__m256i __a) +{ + return __builtin_ia32_pmovmskb256((__v32qi)__a); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi16(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi32(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi64(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi16(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mul_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhrs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhi_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhi_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a * (__v16hu)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi32 (__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a * (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mul_epu32(__m256i __a, __m256i __b) +{ + return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a | (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sad_epu8(__m256i __a, __m256i __b) +{ + return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shuffle_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b); +} + +#define _mm256_shuffle_epi32(a, imm) \ + (__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)) + +#define _mm256_shufflehi_epi16(a, imm) \ + (__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)) + +#define _mm256_shufflelo_epi16(a, imm) \ + (__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b); +} + +#define _mm256_slli_si256(a, imm) \ + (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)) + +#define _mm256_bslli_epi128(a, imm) \ + (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psllqi256((__v4di)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi64(__m256i __a, __m128i __count) +{ + return __builtin_ia32_psllq256((__v4di)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count); +} + +#define _mm256_srli_si256(a, imm) \ + (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)) + +#define _mm256_bsrli_epi128(a, imm) \ + (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psrlqi256((__v4di)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi64(__m256i __a, __m128i __count) +{ + return __builtin_ia32_psrlq256((__v4di)__a, __count); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qu)__a - (__v32qu)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a - (__v16hu)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a - (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a - (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a ^ (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_stream_load_si256(__m256i const *__V) +{ + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_broadcastss_ps(__m128 __X) +{ + return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_broadcastsd_pd(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcastss_ps(__m128 __X) +{ + return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_broadcastsd_pd(__m128d __X) +{ + return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastsi128_si256(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1); +} + +#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) + +#define _mm_blend_epi32(V1, V2, M) \ + (__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \ + (__v4si)(__m128i)(V2), (int)(M)) + +#define _mm256_blend_epi32(V1, V2, M) \ + (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (int)(M)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastb_epi8(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastw_epi16(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastd_epi32(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastq_epi64(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastb_epi8(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastw_epi16(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastd_epi32(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastq_epi64(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b); +} + +#define _mm256_permute4x64_pd(V, M) \ + (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)) + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) +{ + return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); +} + +#define _mm256_permute4x64_epi64(V, M) \ + (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)) + +#define _mm256_permute2x128_si256(V1, V2, M) \ + (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)) + +#define _mm256_extracti128_si256(V, M) \ + (__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)) + +#define _mm256_inserti128_si256(V1, V2, M) \ + (__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \ + (__v2di)(__m128i)(V2), (int)(M)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskload_epi32(int const *__X, __m256i __M) +{ + return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskload_epi64(long long const *__X, __m256i __M) +{ + return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskload_epi32(int const *__X, __m128i __M) +{ + return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskload_epi64(long long const *__X, __m128i __M) +{ + return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y) +{ + __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y) +{ + __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y) +{ + __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y) +{ + __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y); +} + +#define _mm_mask_i32gather_pd(a, m, i, mask, s) \ + (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s)) + +#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \ + (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)(__m256d)(mask), (s)) + +#define _mm_mask_i64gather_pd(a, m, i, mask, s) \ + (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s)) + +#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \ + (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)(__m256d)(mask), (s)) + +#define _mm_mask_i32gather_ps(a, m, i, mask, s) \ + (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s)) + +#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \ + (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)(__m256)(mask), (s)) + +#define _mm_mask_i64gather_ps(a, m, i, mask, s) \ + (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s)) + +#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \ + (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)(__m128)(mask), (s)) + +#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \ + (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s)) + +#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \ + (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \ + (int const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8si)(__m256i)(mask), (s)) + +#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \ + (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s)) + +#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \ + (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4si)(__m128i)(mask), (s)) + +#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \ + (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s)) + +#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \ + (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)(__m256i)(mask), (s)) + +#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \ + (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s)) + +#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \ + (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)(__m256i)(mask), (s)) + +#define _mm_i32gather_pd(m, i, s) \ + (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s)) + +#define _mm256_i32gather_pd(m, i, s) \ + (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s)) + +#define _mm_i64gather_pd(m, i, s) \ + (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s)) + +#define _mm256_i64gather_pd(m, i, s) \ + (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s)) + +#define _mm_i32gather_ps(m, i, s) \ + (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s)) + +#define _mm256_i32gather_ps(m, i, s) \ + (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \ + _mm256_setzero_ps(), \ + _CMP_EQ_OQ), \ + (s)) + +#define _mm_i64gather_ps(m, i, s) \ + (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s)) + +#define _mm256_i64gather_ps(m, i, s) \ + (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s)) + +#define _mm_i32gather_epi32(m, i, s) \ + (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4si)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s)) + +#define _mm256_i32gather_epi32(m, i, s) \ + (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \ + (int const *)(m), (__v8si)(__m256i)(i), \ + (__v8si)_mm256_set1_epi32(-1), (s)) + +#define _mm_i64gather_epi32(m, i, s) \ + (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v2di)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s)) + +#define _mm256_i64gather_epi32(m, i, s) \ + (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4di)(__m256i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s)) + +#define _mm_i32gather_epi64(m, i, s) \ + (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s)) + +#define _mm256_i32gather_epi64(m, i, s) \ + (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s)) + +#define _mm_i64gather_epi64(m, i, s) \ + (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s)) + +#define _mm256_i64gather_epi64(m, i, s) \ + (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s)) + +#undef __DEFAULT_FN_ATTRS256 +#undef __DEFAULT_FN_ATTRS128 + +#endif /* __AVX2INTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bf16intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bf16intrin.h new file mode 100644 index 0000000..d1d87e7 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bf16intrin.h @@ -0,0 +1,279 @@ +/*===------------ avx512bf16intrin.h - AVX512_BF16 intrinsics --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512BF16INTRIN_H +#define __AVX512BF16INTRIN_H + +typedef short __m512bh __attribute__((__vector_size__(64), __aligned__(64))); +typedef short __m256bh __attribute__((__vector_size__(32), __aligned__(32))); +typedef unsigned short __bfloat16; + +#define __DEFAULT_FN_ATTRS512 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16"), \ + __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16"))) + +/// Convert One BF16 Data to One Single Float Data. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __A +/// A bfloat data. +/// \returns A float data whose sign field and exponent field keep unchanged, +/// and fraction field is extended to 23 bits. +static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bfloat16 __A) { + return __builtin_ia32_cvtsbf162ss_32(__A); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __B +/// A 512-bit vector of [16 x float]. +/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from +/// conversion of __B, and higher 256 bits come from conversion of __A. +static __inline__ __m512bh __DEFAULT_FN_ATTRS512 +_mm512_cvtne2ps_pbh(__m512 __A, __m512 __B) { + return (__m512bh)__builtin_ia32_cvtne2ps2bf16_512((__v16sf) __A, + (__v16sf) __B); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __B +/// A 512-bit vector of [16 x float]. +/// \param __W +/// A 512-bit vector of [32 x bfloat]. +/// \param __U +/// A 32-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element from __W. +/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from +/// conversion of __B, and higher 256 bits come from conversion of __A. +static __inline__ __m512bh __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) { + return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtne2ps_pbh(__A, __B), + (__v32hi)__W); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __B +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 32-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element is zero. +/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from +/// conversion of __B, and higher 256 bits come from conversion of __A. +static __inline__ __m512bh __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) { + return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtne2ps_pbh(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS512 +_mm512_cvtneps_pbh(__m512 __A) { + return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, + (__v16hi)_mm256_undefined_si256(), + (__mmask16)-1); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __W +/// A 256-bit vector of [16 x bfloat]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element from __W. +/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) { + return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, + (__v16hi)__W, + (__mmask16)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element is zero. +/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) { + return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, + (__v16hi)_mm256_setzero_si256(), + (__mmask16)__U); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 512-bit vector of [32 x bfloat]. +/// \param __B +/// A 512-bit vector of [32 x bfloat]. +/// \param __D +/// A 512-bit vector of [16 x float]. +/// \returns A 512-bit vector of [16 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_dpbf16_ps(__m512 __D, __m512bh __A, __m512bh __B) { + return (__m512)__builtin_ia32_dpbf16ps_512((__v16sf) __D, + (__v16si) __A, + (__v16si) __B); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 512-bit vector of [32 x bfloat]. +/// \param __B +/// A 512-bit vector of [32 x bfloat]. +/// \param __D +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D. +/// \returns A 512-bit vector of [16 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_dpbf16_ps(__m512 __D, __mmask16 __U, __m512bh __A, __m512bh __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_dpbf16_ps(__D, __A, __B), + (__v16sf)__D); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 512-bit vector of [32 x bfloat]. +/// \param __B +/// A 512-bit vector of [32 x bfloat]. +/// \param __D +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0. +/// \returns A 512-bit vector of [16 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_dpbf16_ps(__mmask16 __U, __m512 __D, __m512bh __A, __m512bh __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_dpbf16_ps(__D, __A, __B), + (__v16sf)_mm512_setzero_si512()); +} + +/// Convert Packed BF16 Data to Packed float Data. +/// +/// \headerfile +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \returns A 512-bit vector of [16 x float] come from convertion of __A +static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) { + return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32( + (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using zeroing mask. +/// +/// \headerfile +/// +/// \param __U +/// A 16-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \returns A 512-bit vector of [16 x float] come from convertion of __A +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) { + return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32( + (__m512i)_mm512_maskz_cvtepi16_epi32((__mmask16)__U, (__m256i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using merging mask. +/// +/// \headerfile +/// +/// \param __S +/// A 512-bit vector of [16 x float]. Elements are copied from __S when +/// the corresponding mask bit is not set. +/// \param __U +/// A 16-bit mask. +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \returns A 512-bit vector of [16 x float] come from convertion of __A +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) { + return _mm512_castsi512_ps((__m512i)_mm512_mask_slli_epi32( + (__m512i)__S, (__mmask16)__U, + (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16)); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS512 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bitalgintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bitalgintrin.h new file mode 100644 index 0000000..d4411d1 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bitalgintrin.h @@ -0,0 +1,83 @@ +/*===------------- avx512bitalgintrin.h - BITALG intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512BITALGINTRIN_H +#define __AVX512BITALGINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bitalg"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_popcnt_epi16(__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcntw_512((__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_selectw_512((__mmask32) __U, + (__v32hi) _mm512_popcnt_epi16(__B), + (__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B) +{ + return _mm512_mask_popcnt_epi16((__m512i) _mm512_setzero_si512(), + __U, + __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_popcnt_epi8(__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcntb_512((__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_selectb_512((__mmask64) __U, + (__v64qi) _mm512_popcnt_epi8(__B), + (__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) +{ + return _mm512_mask_popcnt_epi8((__m512i) _mm512_setzero_si512(), + __U, + __B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_vpshufbitqmb512_mask((__v64qi) __A, + (__v64qi) __B, + __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) +{ + return _mm512_mask_bitshuffle_epi64_mask((__mmask64) -1, + __A, + __B); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bwintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bwintrin.h new file mode 100644 index 0000000..4281a33 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512bwintrin.h @@ -0,0 +1,2024 @@ +/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512BWINTRIN_H +#define __AVX512BWINTRIN_H + +typedef unsigned int __mmask32; +typedef unsigned long long __mmask64; + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"))) + +static __inline __mmask32 __DEFAULT_FN_ATTRS +_knot_mask32(__mmask32 __M) +{ + return __builtin_ia32_knotsi(__M); +} + +static __inline __mmask64 __DEFAULT_FN_ATTRS +_knot_mask64(__mmask64 __M) +{ + return __builtin_ia32_knotdi(__M); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kand_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_kand_mask64(__mmask64 __A, __mmask64 __B) +{ + return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kandn_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_kandn_mask64(__mmask64 __A, __mmask64 __B) +{ + return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kor_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_kor_mask64(__mmask64 __A, __mmask64 __B) +{ + return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kxnor_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_kxnor_mask64(__mmask64 __A, __mmask64 __B) +{ + return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kxor_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_kxor_mask64(__mmask64 __A, __mmask64 __B) +{ + return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_kortestcsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_kortestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestcsi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B) +{ + return (unsigned char)__builtin_ia32_kortestcdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B) +{ + return (unsigned char)__builtin_ia32_kortestzdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestcdi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_ktestcsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_ktestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestcsi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B) +{ + return (unsigned char)__builtin_ia32_ktestcdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B) +{ + return (unsigned char)__builtin_ia32_ktestzdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestcdi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzdi(__A, __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kadd_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_kadd_mask64(__mmask64 __A, __mmask64 __B) +{ + return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B); +} + +#define _kshiftli_mask32(A, I) \ + (__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I)) + +#define _kshiftri_mask32(A, I) \ + (__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I)) + +#define _kshiftli_mask64(A, I) \ + (__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I)) + +#define _kshiftri_mask64(A, I) \ + (__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I)) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_cvtmask32_u32(__mmask32 __A) { + return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_cvtmask64_u64(__mmask64 __A) { + return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_cvtu32_mask32(unsigned int __A) { + return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_cvtu64_mask64(unsigned long long __A) { + return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_load_mask32(__mmask32 *__A) { + return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_load_mask64(__mmask64 *__A) { + return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask32(__mmask32 *__A, __mmask32 __B) { + *(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask64(__mmask64 *__A, __mmask64 __B) { + *(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B); +} + +/* Integer compare */ + +#define _mm512_cmp_epi8_mask(a, b, p) \ + (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)-1) + +#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \ + (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)(m)) + +#define _mm512_cmp_epu8_mask(a, b, p) \ + (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)-1) + +#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \ + (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)(m)) + +#define _mm512_cmp_epi16_mask(a, b, p) \ + (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)-1) + +#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \ + (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)(m)) + +#define _mm512_cmp_epu16_mask(a, b, p) \ + (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)-1) + +#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \ + (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)(m)) + +#define _mm512_cmpeq_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi8 (__m512i __A, __m512i __B) { + return (__m512i) ((__v64qu) __A + (__v64qu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_add_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_add_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi8 (__m512i __A, __m512i __B) { + return (__m512i) ((__v64qu) __A - (__v64qu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_sub_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_sub_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hu) __A + (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_add_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_add_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hu) __A - (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sub_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sub_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullo_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hu) __A * (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mullo_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mullo_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __W, + (__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __W, + (__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi8 (__m512i __A) +{ + return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_abs_epi8(__A), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_abs_epi8(__A), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi16 (__m512i __A) +{ + return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_abs_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_abs_epi16(__A), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packs_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packs_epi32(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packs_epi32(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packs_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packs_epi16(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packs_epi16(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packus_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packus_epi32(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packus_epi32(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packus_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packus_epi16(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packus_epi16(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_avg_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pavgb512((__v64qi)__A, (__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_avg_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_avg_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_avg_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pavgw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_avg_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_avg_epu16(__A, __B), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_shuffle_epi8(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_shuffle_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_shuffle_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I, + (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B), + (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B), + (__v32hi)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mulhrs_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhrs_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhrs_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mulhi_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mulhi_epu16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maddubs_epi16(__m512i __X, __m512i __Y) { + return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X, + __m512i __Y) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U, + (__v32hi)_mm512_maddubs_epi16(__X, __Y), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U, + (__v32hi)_mm512_maddubs_epi16(__X, __Y), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_madd_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_madd_epi16(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_madd_epi16(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)_mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)__O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_undefined_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi8(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B, + 8, 64+8, 9, 64+9, + 10, 64+10, 11, 64+11, + 12, 64+12, 13, 64+13, + 14, 64+14, 15, 64+15, + 24, 64+24, 25, 64+25, + 26, 64+26, 27, 64+27, + 28, 64+28, 29, 64+29, + 30, 64+30, 31, 64+31, + 40, 64+40, 41, 64+41, + 42, 64+42, 43, 64+43, + 44, 64+44, 45, 64+45, + 46, 64+46, 47, 64+47, + 56, 64+56, 57, 64+57, + 58, 64+58, 59, 64+59, + 60, 64+60, 61, 64+61, + 62, 64+62, 63, 64+63); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpackhi_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpackhi_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B, + 4, 32+4, 5, 32+5, + 6, 32+6, 7, 32+7, + 12, 32+12, 13, 32+13, + 14, 32+14, 15, 32+15, + 20, 32+20, 21, 32+21, + 22, 32+22, 23, 32+23, + 28, 32+28, 29, 32+29, + 30, 32+30, 31, 32+31); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpackhi_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpackhi_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi8(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B, + 0, 64+0, 1, 64+1, + 2, 64+2, 3, 64+3, + 4, 64+4, 5, 64+5, + 6, 64+6, 7, 64+7, + 16, 64+16, 17, 64+17, + 18, 64+18, 19, 64+19, + 20, 64+20, 21, 64+21, + 22, 64+22, 23, 64+23, + 32, 64+32, 33, 64+33, + 34, 64+34, 35, 64+35, + 36, 64+36, 37, 64+37, + 38, 64+38, 39, 64+39, + 48, 64+48, 49, 64+49, + 50, 64+50, 51, 64+51, + 52, 64+52, 53, 64+53, + 54, 64+54, 55, 64+55); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpacklo_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpacklo_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B, + 0, 32+0, 1, 32+1, + 2, 32+2, 3, 32+3, + 8, 32+8, 9, 32+9, + 10, 32+10, 11, 32+11, + 16, 32+16, 17, 32+17, + 18, 32+18, 19, 32+19, + 24, 32+24, 25, 32+25, + 26, 32+26, 27, 32+27); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpacklo_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpacklo_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi8_epi16(__m256i __A) +{ + /* This function always performs a signed extension, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepi8_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepi8_epi16(__A), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu8_epi16(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepu8_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepu8_epi16(__A), + (__v32hi)_mm512_setzero_si512()); +} + + +#define _mm512_shufflehi_epi16(A, imm) \ + (__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm)) + +#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflehi_epi16((A), \ + (imm)), \ + (__v32hi)(__m512i)(W)) + +#define _mm512_maskz_shufflehi_epi16(U, A, imm) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflehi_epi16((A), \ + (imm)), \ + (__v32hi)_mm512_setzero_si512()) + +#define _mm512_shufflelo_epi16(A, imm) \ + (__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm)) + + +#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflelo_epi16((A), \ + (imm)), \ + (__v32hi)(__m512i)(W)) + + +#define _mm512_maskz_shufflelo_epi16(U, A, imm) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflelo_epi16((A), \ + (imm)), \ + (__v32hi)_mm512_setzero_si512()) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sllv_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sllv_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sllv_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sll_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sll_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sll_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_slli_epi16(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_slli_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_slli_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +#define _mm512_bslli_epi128(a, imm) \ + (__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srlv_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srlv_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srlv_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srav_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srav_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srav_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sra_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sra_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sra_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srai_epi16(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srai_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srai_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srl_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srl_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srl_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srli_epi16(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srli_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srli_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +#define _mm512_bsrli_epi128(a, imm) \ + (__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __A, + (__v32hi) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __A, + (__v32hi) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __A, + (__v64qi) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __A, + (__v64qi) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_selectb_512(__M, + (__v64qi)_mm512_set1_epi8(__A), + (__v64qi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi8 (__mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_set1_epi8(__A), + (__v64qi) _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_kunpackd (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_kunpackw (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi16 (void const *__P) +{ + struct __loadu_epi16 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi16*)__P)->__v; +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi8 (void const *__P) +{ + struct __loadu_epi8 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi8*)__P)->__v; +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P, + (__v64qi) __W, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi16 (void *__P, __m512i __A) +{ + struct __storeu_epi16 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi16*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A) +{ + __builtin_ia32_storedquhi512_mask ((__v32hi *) __P, + (__v32hi) __A, + (__mmask32) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi8 (void *__P, __m512i __A) +{ + struct __storeu_epi8 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi8*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A) +{ + __builtin_ia32_storedquqi512_mask ((__v64qi *) __P, + (__v64qi) __A, + (__mmask64) __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_test_epi8_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_test_epi16_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi8_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi16_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_movepi8_mask (__m512i __A) +{ + return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_movepi16_mask (__m512i __A) +{ + return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi8 (__mmask64 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2b512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi16 (__mmask32 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2w512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastb_epi8 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_broadcastb_epi8(__A), + (__v64qi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_broadcastb_epi8(__A), + (__v64qi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_set1_epi16(__A), + (__v32hi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi16 (__mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_set1_epi16(__A), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastw_epi16 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_broadcastw_epi16(__A), + (__v32hi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_broadcastw_epi16(__A), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_permutexvar_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_permutexvar_epi16(__A, __B), + (__v32hi)__W); +} + +#define _mm512_alignr_epi8(A, B, N) \ + (__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(N)) + +#define _mm512_mask_alignr_epi8(W, U, A, B, N) \ + (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \ + (__v64qi)(__m512i)(W)) + +#define _mm512_maskz_alignr_epi8(U, A, B, N) \ + (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \ + (__v64qi)(__m512i)_mm512_setzero_si512()) + +#define _mm512_dbsad_epu8(A, B, imm) \ + (__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(imm)) + +#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \ + (__v32hi)(__m512i)(W)) + +#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \ + (__v32hi)_mm512_setzero_si512()) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sad_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A, + (__v64qi) __B); +} + +#undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512cdintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512cdintrin.h new file mode 100644 index 0000000..bfdba84 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512cdintrin.h @@ -0,0 +1,123 @@ +/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512CDINTRIN_H +#define __AVX512CDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_conflict_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictdi_512 ((__v8di) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_conflict_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_conflict_epi64(__A), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_conflict_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictsi_512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_conflict_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_conflict_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_lzcnt_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntd_512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_lzcnt_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_lzcnt_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_lzcnt_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntq_512 ((__v8di) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_lzcnt_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_lzcnt_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m512i) _mm512_set1_epi64((long long) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m512i) _mm512_set1_epi32((int) __A); + +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512dqintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512dqintrin.h new file mode 100644 index 0000000..337256c --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512dqintrin.h @@ -0,0 +1,1377 @@ +/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512DQINTRIN_H +#define __AVX512DQINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"))) + +static __inline __mmask8 __DEFAULT_FN_ATTRS +_knot_mask8(__mmask8 __M) +{ + return __builtin_ia32_knotqi(__M); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kand_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kandqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kandn_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kandnqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kor_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_korqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kxnor_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kxnorqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kxor_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kxorqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_kortestcqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_kortestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestcqi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_ktestcqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_ktestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestcqi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_ktestchi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_ktestzhi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestchi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzhi(__A, __B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kadd_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kaddqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_kadd_mask16(__mmask16 __A, __mmask16 __B) +{ + return (__mmask16)__builtin_ia32_kaddhi((__mmask16)__A, (__mmask16)__B); +} + +#define _kshiftli_mask8(A, I) \ + (__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I)) + +#define _kshiftri_mask8(A, I) \ + (__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I)) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_cvtmask8_u32(__mmask8 __A) { + return (unsigned int)__builtin_ia32_kmovb((__mmask8)__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_cvtu32_mask8(unsigned int __A) { + return (__mmask8)__builtin_ia32_kmovb((__mmask8)__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_load_mask8(__mmask8 *__A) { + return (__mmask8)__builtin_ia32_kmovb(*(__mmask8 *)__A); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask8(__mmask8 *__A, __mmask8 __B) { + *(__mmask8 *)__A = __builtin_ia32_kmovb((__mmask8)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullo_epi64 (__m512i __A, __m512i __B) { + return (__m512i) ((__v8du) __A * (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullo_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullo_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_xor_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A ^ (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_xor_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_xor_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_xor_ps (__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A ^ (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_xor_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_xor_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_or_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A | (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_or_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_or_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_or_ps(__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A | (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_or_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_or_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_and_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_and_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_and_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_and_ps(__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A & (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_and_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_and_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_andnot_pd(__m512d __A, __m512d __B) { + return (__m512d)(~(__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_andnot_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_andnot_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_andnot_ps(__m512 __A, __m512 __B) { + return (__m512)(~(__v16su)__A & (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_andnot_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_andnot_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epi64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epi64(A, R) \ + (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) \ + (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epu64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epu64(A, R) \ + (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) \ + (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epi64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epi64(A, R) \ + (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundps_epi64(U, A, R) \ + (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epu64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epu64(A, R) \ + (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundps_epu64(U, A, R) \ + (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_pd (__m512i __A) { + return (__m512d)__builtin_convertvector((__v8di)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepi64_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepi64_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_cvt_roundepi64_pd(A, R) \ + (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) \ + (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) \ + (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_ps (__m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi64_ps(A, R) \ + (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) \ + (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) \ + (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epi64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epi64(A, R) \ + (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) \ + (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epu64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epu64(A, R) \ + (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) \ + (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epi64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epi64(A, R) \ + (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) \ + (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epu64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epu64(A, R) \ + (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) \ + (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) \ + (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepu64_pd (__m512i __A) { + return (__m512d)__builtin_convertvector((__v8du)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepu64_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepu64_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_cvt_roundepu64_pd(A, R) \ + (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) \ + (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) \ + (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_cvtepu64_ps (__m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu64_ps(A, R) \ + (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) \ + (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) \ + (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_range_pd(A, B, C) \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_range_pd(W, U, A, B, C) \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_range_pd(U, A, B, C) \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_range_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_range_round_pd(W, U, A, B, C, R) \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm512_maskz_range_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_range_ps(A, B, C) \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_range_ps(W, U, A, B, C) \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_range_ps(U, A, B, C) \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_range_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_range_round_ps(W, U, A, B, C, R) \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)) + +#define _mm512_maskz_range_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +#define _mm_range_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8) -1, (int)(C),\ + (int)(R)) + +#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_range_round_ss(W, U, A, B, C, R) \ + (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W),\ + (__mmask8)(U), (int)(C),\ + (int)(R)) + +#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_range_round_ss(U, A, B, C, R) \ + (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C),\ + (int)(R)) + +#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_range_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8) -1, (int)(C),\ + (int)(R)) + +#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_range_round_sd(W, U, A, B, C, R) \ + (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W),\ + (__mmask8)(U), (int)(C),\ + (int)(R)) + +#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_range_round_sd(U, A, B, C, R) \ + (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C),\ + (int)(R)) + +#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm512_reduce_pd(A, B) \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_reduce_pd(W, U, A, B) \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_reduce_pd(U, A, B) \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_reduce_ps(A, B) \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_reduce_ps(W, U, A, B) \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_reduce_ps(U, A, B) \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_reduce_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_reduce_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_reduce_round_ps(A, B, R) \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_reduce_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +#define _mm_reduce_ss(A, B, C) \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \ + (int)(C), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_reduce_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(C), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_reduce_ss(U, A, B, C) \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_reduce_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \ + (int)(C), (int)(R)) + +#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(C), (int)(R)) + +#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C), (int)(R)) + +#define _mm_reduce_sd(A, B, C) \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(C), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_reduce_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), (__mmask8)(U), \ + (int)(C), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_reduce_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_reduce_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(C), (int)(R)) + +#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), (__mmask8)(U), \ + (int)(C), (int)(R)) + +#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C), (int)(R)) + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_movepi32_mask (__m512i __A) +{ + return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi32 (__mmask16 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2d512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi64 (__mmask8 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2q512 (__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_movepi64_mask (__m512i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A); +} + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f32x2 (__m128 __A) +{ + return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 0, 1, 0, 1, 0, 1, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x2(__A), + (__v16sf)__O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x2(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f32x8(__m256 __A) +{ + return (__m512)__builtin_shufflevector((__v8sf)__A, (__v8sf)__A, + 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x8(__A), + (__v16sf)__O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x8(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f64x2(__m128d __A) +{ + return (__m512d)__builtin_shufflevector((__v2df)__A, (__v2df)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x2(__A), + (__v8df)__O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x2(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i32x2 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 0, 1, 0, 1, 0, 1, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x2(__A), + (__v16si)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x2(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i32x8(__m256i __A) +{ + return (__m512i)__builtin_shufflevector((__v8si)__A, (__v8si)__A, + 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x8(__A), + (__v16si)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x8(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i64x2(__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v2di)__A, (__v2di)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x2(__A), + (__v8di)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x2(__A), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_extractf32x8_ps(A, imm) \ + (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v8sf)_mm256_undefined_ps(), \ + (__mmask8)-1) + +#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \ + (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extractf32x8_ps(U, A, imm) \ + (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm512_extractf64x2_pd(A, imm) \ + (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \ + (int)(imm), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1) + +#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \ + (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extractf64x2_pd(U, A, imm) \ + (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm512_extracti32x8_epi32(A, imm) \ + (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v8si)_mm256_undefined_si256(), \ + (__mmask8)-1) + +#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \ + (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extracti32x8_epi32(U, A, imm) \ + (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U)) + +#define _mm512_extracti64x2_epi64(A, imm) \ + (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \ + (int)(imm), \ + (__v2di)_mm_undefined_si128(), \ + (__mmask8)-1) + +#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \ + (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \ + (int)(imm), \ + (__v2di)(__m128i)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extracti64x2_epi64(U, A, imm) \ + (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \ + (int)(imm), \ + (__v2di)_mm_setzero_si128(), \ + (__mmask8)(U)) + +#define _mm512_insertf32x8(A, B, imm) \ + (__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \ + (__v8sf)(__m256)(B), (int)(imm)) + +#define _mm512_mask_insertf32x8(W, U, A, B, imm) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_insertf32x8(U, A, B, imm) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps()) + +#define _mm512_insertf64x2(A, B, imm) \ + (__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \ + (__v2df)(__m128d)(B), (int)(imm)) + +#define _mm512_mask_insertf64x2(W, U, A, B, imm) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x2((A), (B), (imm)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_insertf64x2(U, A, B, imm) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x2((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_inserti32x8(A, B, imm) \ + (__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \ + (__v8si)(__m256i)(B), (int)(imm)) + +#define _mm512_mask_inserti32x8(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x8((A), (B), (imm)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_inserti32x8(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x8((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_inserti64x2(A, B, imm) \ + (__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \ + (__v2di)(__m128i)(B), (int)(imm)) + +#define _mm512_mask_inserti64x2(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x2((A), (B), (imm)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_inserti64x2(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x2((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()) + +#define _mm512_mask_fpclass_ps_mask(U, A, imm) \ + (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \ + (int)(imm), (__mmask16)(U)) + +#define _mm512_fpclass_ps_mask(A, imm) \ + (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \ + (int)(imm), (__mmask16)-1) + +#define _mm512_mask_fpclass_pd_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__mmask8)(U)) + +#define _mm512_fpclass_pd_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__mmask8)-1) + +#define _mm_fpclass_sd_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_fpclass_sd_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_fpclass_ss_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_fpclass_ss_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)(U)) + +#undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512erintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512erintrin.h new file mode 100644 index 0000000..8570061 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512erintrin.h @@ -0,0 +1,271 @@ +/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512ERINTRIN_H +#define __AVX512ERINTRIN_H + +/* exp2a23 */ +#define _mm512_exp2a23_round_pd(A, R) \ + (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \ + (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)) + +#define _mm512_maskz_exp2a23_round_pd(M, A, R) \ + (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (int)(R)) + +#define _mm512_exp2a23_pd(A) \ + _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_pd(S, M, A) \ + _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_pd(M, A) \ + _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_exp2a23_round_ps(A, R) \ + (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \ + (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)) + +#define _mm512_maskz_exp2a23_round_ps(M, A, R) \ + (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (int)(R)) + +#define _mm512_exp2a23_ps(A) \ + _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_ps(S, M, A) \ + _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_ps(M, A) \ + _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +/* rsqrt28 */ +#define _mm512_rsqrt28_round_pd(A, R) \ + (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \ + (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)) + +#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \ + (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (int)(R)) + +#define _mm512_rsqrt28_pd(A) \ + _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_pd(S, M, A) \ + _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_pd(M, A) \ + _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rsqrt28_round_ps(A, R) \ + (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \ + (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)) + +#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \ + (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (int)(R)) + +#define _mm512_rsqrt28_ps(A) \ + _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_ps(S, M, A) \ + _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_ps(M, A) \ + _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_round_ss(A, B, R) \ + (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \ + (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)) + +#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \ + (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)) + +#define _mm_rsqrt28_ss(A, B) \ + _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_ss(S, M, A, B) \ + _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_ss(M, A, B) \ + _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \ + (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)) + +#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \ + (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)) + +#define _mm_rsqrt28_sd(A, B) \ + _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_sd(S, M, A, B) \ + _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_sd(M, A, B) \ + _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +/* rcp28 */ +#define _mm512_rcp28_round_pd(A, R) \ + (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_rcp28_round_pd(S, M, A, R) \ + (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)) + +#define _mm512_maskz_rcp28_round_pd(M, A, R) \ + (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (int)(R)) + +#define _mm512_rcp28_pd(A) \ + _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_pd(S, M, A) \ + _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_pd(M, A) \ + _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rcp28_round_ps(A, R) \ + (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_rcp28_round_ps(S, M, A, R) \ + (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)) + +#define _mm512_maskz_rcp28_round_ps(M, A, R) \ + (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (int)(R)) + +#define _mm512_rcp28_ps(A) \ + _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_ps(S, M, A) \ + _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_ps(M, A) \ + _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_round_ss(A, B, R) \ + (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \ + (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)) + +#define _mm_maskz_rcp28_round_ss(M, A, B, R) \ + (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)) + +#define _mm_rcp28_ss(A, B) \ + _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_ss(S, M, A, B) \ + _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_ss(M, A, B) \ + _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \ + (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)) + +#define _mm_maskz_rcp28_round_sd(M, A, B, R) \ + (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)) + +#define _mm_rcp28_sd(A, B) \ + _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_sd(S, M, A, B) \ + _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_sd(M, A, B) \ + _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#endif /* __AVX512ERINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512fintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512fintrin.h new file mode 100644 index 0000000..010bcad --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512fintrin.h @@ -0,0 +1,9758 @@ +/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512FINTRIN_H +#define __AVX512FINTRIN_H + +typedef char __v64qi __attribute__((__vector_size__(64))); +typedef short __v32hi __attribute__((__vector_size__(64))); +typedef double __v8df __attribute__((__vector_size__(64))); +typedef float __v16sf __attribute__((__vector_size__(64))); +typedef long long __v8di __attribute__((__vector_size__(64))); +typedef int __v16si __attribute__((__vector_size__(64))); + +/* Unsigned types */ +typedef unsigned char __v64qu __attribute__((__vector_size__(64))); +typedef unsigned short __v32hu __attribute__((__vector_size__(64))); +typedef unsigned long long __v8du __attribute__((__vector_size__(64))); +typedef unsigned int __v16su __attribute__((__vector_size__(64))); + +typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64))); +typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64))); +typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); + +typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1))); +typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1))); +typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1))); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +/* Rounding mode macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +/* Constants for integer comparison predicates */ +typedef enum { + _MM_CMPINT_EQ, /* Equal */ + _MM_CMPINT_LT, /* Less than */ + _MM_CMPINT_LE, /* Less than or Equal */ + _MM_CMPINT_UNUSED, + _MM_CMPINT_NE, /* Not Equal */ + _MM_CMPINT_NLT, /* Not Less than */ +#define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */ + _MM_CMPINT_NLE /* Not Less than or Equal */ +#define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */ +} _MM_CMPINT_ENUM; + +typedef enum +{ + _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, + _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, + _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, + _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, + _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, + _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, + _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, + _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, + _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, + _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, + _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, + _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, + _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, + _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, + _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, + _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, + _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, + _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, + _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, + _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, + _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, + _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, + _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, + _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, + _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, + _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, + _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, + _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, + _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, + _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, + _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, + _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, + _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, + _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, + _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, + _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, + _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, + _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, + _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, + _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, + _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, + _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, + _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, + _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, + _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, + _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, + _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, + _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, + _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, + _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, + _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, + _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, + _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, + _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, + _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, + _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, + _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, + _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, + _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, + _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, + _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, + _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, + _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, + _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, + _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, + _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, + _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, + _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, + _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, + _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, + _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, + _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, + _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, + _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, + _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, + _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, + _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, + _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, + _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, + _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, + _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, + _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, + _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, + _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, + _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, + _MM_PERM_DDDD = 0xFF +} _MM_PERM_ENUM; + +typedef enum +{ + _MM_MANT_NORM_1_2, /* interval [1, 2) */ + _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */ + _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */ + _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */ +} _MM_MANTISSA_NORM_ENUM; + +typedef enum +{ + _MM_MANT_SIGN_src, /* sign = sign(SRC) */ + _MM_MANT_SIGN_zero, /* sign = 0 */ + _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */ +} _MM_MANTISSA_SIGN_ENUM; + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) + +/* Create vectors with repeated elements */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_setzero_si512(void) +{ + return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; +} + +#define _mm512_setzero_epi32 _mm512_setzero_si512 + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_undefined_pd(void) +{ + return (__m512d)__builtin_ia32_undef512(); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_undefined(void) +{ + return (__m512)__builtin_ia32_undef512(); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_undefined_ps(void) +{ + return (__m512)__builtin_ia32_undef512(); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_undefined_epi32(void) +{ + return (__m512i)__builtin_ia32_undef512(); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastd_epi32 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si) _mm512_broadcastd_epi32(__A), + (__v16si) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si) _mm512_broadcastd_epi32(__A), + (__v16si) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastq_epi64 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A, + 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di) _mm512_broadcastq_epi64(__A), + (__v8di) __O); + +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di) _mm512_broadcastq_epi64(__A), + (__v8di) _mm512_setzero_si512()); +} + + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_setzero_ps(void) +{ + return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; +} + +#define _mm512_setzero _mm512_setzero_ps + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_setzero_pd(void) +{ + return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_set1_ps(float __w) +{ + return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_set1_pd(double __w) +{ + return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi8(char __w) +{ + return __extension__ (__m512i)(__v64qi){ + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi16(short __w) +{ + return __extension__ (__m512i)(__v32hi){ + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi32(int __s) +{ + return __extension__ (__m512i)(__v16si){ + __s, __s, __s, __s, __s, __s, __s, __s, + __s, __s, __s, __s, __s, __s, __s, __s }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi32(__mmask16 __M, int __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si)_mm512_set1_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi64(long long __d) +{ + return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi64(__mmask8 __M, long long __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_set1_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcastss_ps(__m128 __A) +{ + return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set4_epi32 (int __A, int __B, int __C, int __D) +{ + return __extension__ (__m512i)(__v16si) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set4_epi64 (long long __A, long long __B, long long __C, + long long __D) +{ + return __extension__ (__m512i) (__v8di) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_set4_pd (double __A, double __B, double __C, double __D) +{ + return __extension__ (__m512d) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_set4_ps (float __A, float __B, float __C, float __D) +{ + return __extension__ (__m512) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +#define _mm512_setr4_epi32(e0,e1,e2,e3) \ + _mm512_set4_epi32((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_epi64(e0,e1,e2,e3) \ + _mm512_set4_epi64((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_pd(e0,e1,e2,e3) \ + _mm512_set4_pd((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_ps(e0,e1,e2,e3) \ + _mm512_set4_ps((e3),(e2),(e1),(e0)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_broadcastsd_pd(__m128d __A) +{ + return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A, + 0, 0, 0, 0, 0, 0, 0, 0); +} + +/* Cast between vector types */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castpd256_pd512(__m256d __a) +{ + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_castps256_ps512(__m256 __a) +{ + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, + -1, -1, -1, -1, -1, -1, -1, -1); +} + +static __inline __m128d __DEFAULT_FN_ATTRS512 +_mm512_castpd512_pd128(__m512d __a) +{ + return __builtin_shufflevector(__a, __a, 0, 1); +} + +static __inline __m256d __DEFAULT_FN_ATTRS512 +_mm512_castpd512_pd256 (__m512d __A) +{ + return __builtin_shufflevector(__A, __A, 0, 1, 2, 3); +} + +static __inline __m128 __DEFAULT_FN_ATTRS512 +_mm512_castps512_ps128(__m512 __a) +{ + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); +} + +static __inline __m256 __DEFAULT_FN_ATTRS512 +_mm512_castps512_ps256 (__m512 __A) +{ + return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_castpd_ps (__m512d __A) +{ + return (__m512) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_castpd_si512 (__m512d __A) +{ + return (__m512i) (__A); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_castpd128_pd512 (__m128d __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castps_pd (__m512 __A) +{ + return (__m512d) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_castps_si512 (__m512 __A) +{ + return (__m512i) (__A); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_castps128_ps512 (__m128 __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_castsi128_si512 (__m128i __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_castsi256_si512 (__m256i __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_castsi512_ps (__m512i __A) +{ + return (__m512) (__A); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castsi512_pd (__m512i __A) +{ + return (__m512d) (__A); +} + +static __inline __m128i __DEFAULT_FN_ATTRS512 +_mm512_castsi512_si128 (__m512i __A) +{ + return (__m128i)__builtin_shufflevector(__A, __A , 0, 1); +} + +static __inline __m256i __DEFAULT_FN_ATTRS512 +_mm512_castsi512_si256 (__m512i __A) +{ + return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_int2mask(int __a) +{ + return (__mmask16)__a; +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask2int(__mmask16 __a) +{ + return (int)__a; +} + +/// Constructs a 512-bit floating-point vector of [8 x double] from a +/// 128-bit floating-point vector of [2 x double]. The lower 128 bits +/// contain the value of the source vector. The upper 384 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits +/// contain the value of the parameter. The upper 384 bits are set to zero. +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_zextpd128_pd512(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3); +} + +/// Constructs a 512-bit floating-point vector of [8 x double] from a +/// 256-bit floating-point vector of [4 x double]. The lower 256 bits +/// contain the value of the source vector. The upper 256 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits +/// contain the value of the parameter. The upper 256 bits are set to zero. +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_zextpd256_pd512(__m256d __a) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 512-bit floating-point vector of [16 x float] from a +/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain +/// the value of the source vector. The upper 384 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits +/// contain the value of the parameter. The upper 384 bits are set to zero. +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_zextps128_ps512(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7); +} + +/// Constructs a 512-bit floating-point vector of [16 x float] from a +/// 256-bit floating-point vector of [8 x float]. The lower 256 bits contain +/// the value of the source vector. The upper 256 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits +/// contain the value of the parameter. The upper 256 bits are set to zero. +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_zextps256_ps512(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +/// Constructs a 512-bit integer vector from a 128-bit integer vector. +/// The lower 128 bits contain the value of the source vector. The upper +/// 384 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 512-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The upper 384 bits are set to zero. +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_zextsi128_si512(__m128i __a) +{ + return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3); +} + +/// Constructs a 512-bit integer vector from a 256-bit integer vector. +/// The lower 256 bits contain the value of the source vector. The upper +/// 256 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 512-bit integer vector. The lower 256 bits contain the value of +/// the parameter. The upper 256 bits are set to zero. +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_zextsi256_si512(__m256i __a) +{ + return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7); +} + +/* Bitwise operators */ +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_and_epi32(__m512i __a, __m512i __b) +{ + return (__m512i)((__v16su)__a & (__v16su)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si) _mm512_and_epi32(__a, __b), + (__v16si) __src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (), + __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_and_epi64(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a & (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k, + (__v8di) _mm512_and_epi64(__a, __b), + (__v8di) __src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (), + __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_andnot_si512 (__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_andnot_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v16su)__A & (__v16su)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_andnot_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(), + __U, __A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_andnot_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_andnot_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(), + __U, __A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_or_epi32(__m512i __a, __m512i __b) +{ + return (__m512i)((__v16su)__a | (__v16su)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si)_mm512_or_epi32(__a, __b), + (__v16si)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_or_epi64(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a | (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, + (__v8di)_mm512_or_epi64(__a, __b), + (__v8di)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_xor_epi32(__m512i __a, __m512i __b) +{ + return (__m512i)((__v16su)__a ^ (__v16su)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si)_mm512_xor_epi32(__a, __b), + (__v16si)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_xor_epi64(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a ^ (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, + (__v8di)_mm512_xor_epi64(__a, __b), + (__v8di)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_and_si512(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a & (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_or_si512(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a | (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_xor_si512(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a ^ (__v8du)__b); +} + +/* Arithmetic */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_add_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a + (__v8df)__b); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_add_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a + (__v16sf)__b); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mul_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a * (__v8df)__b); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mul_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a * (__v16sf)__b); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_sub_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a - (__v8df)__b); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_sub_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a - (__v16sf)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A + (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_add_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_add_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A - (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sub_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sub_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A + (__v16su) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_add_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_add_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A - (__v16su) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sub_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sub_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +#define _mm512_max_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R)) + +#define _mm512_mask_max_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_max_round_pd((A), (B), (R)), \ + (__v8df)(W)) + +#define _mm512_maskz_max_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_max_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd()) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_max_pd(__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_max_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_max_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_max_round_ps(A, B, R) \ + (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R)) + +#define _mm512_mask_max_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_max_round_ps((A), (B), (R)), \ + (__v16sf)(W)) + +#define _mm512_maskz_max_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_max_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_max_ps(__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_max_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_max_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_ss(A, B, R) \ + (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_max_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm_maskz_max_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_max_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_max_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline __m512i +__DEFAULT_FN_ATTRS512 +_mm512_max_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epu32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epu32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epu64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epu64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_min_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R)) + +#define _mm512_mask_min_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_min_round_pd((A), (B), (R)), \ + (__v8df)(W)) + +#define _mm512_maskz_min_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_min_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd()) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_min_pd(__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_min_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_min_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_min_round_ps(A, B, R) \ + (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R)) + +#define _mm512_mask_min_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_min_round_ps((A), (B), (R)), \ + (__v16sf)(W)) + +#define _mm512_maskz_min_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_min_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_min_ps(__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_min_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_min_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_ss(A, B, R) \ + (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_min_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm_maskz_min_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_min_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_min_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline __m512i +__DEFAULT_FN_ATTRS512 +_mm512_min_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epu32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epu32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epu64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epu64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mul_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epi32(__X, __Y), + (__v8di)__W); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epi32(__X, __Y), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mul_epu32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epu32(__X, __Y), + (__v8di)__W); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epu32(__X, __Y), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullo_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A * (__v16su) __B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_mullo_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_mullo_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullox_epi64 (__m512i __A, __m512i __B) { + return (__m512i) ((__v8du) __A * (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullox_epi64(__A, __B), + (__v8di)__W); +} + +#define _mm512_sqrt_round_pd(A, R) \ + (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R)) + +#define _mm512_mask_sqrt_round_pd(W, U, A, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sqrt_round_pd((A), (R)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_sqrt_round_pd(U, A, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sqrt_round_pd((A), (R)), \ + (__v8df)_mm512_setzero_pd()) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_sqrt_pd(__m512d __A) +{ + return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_sqrt_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_sqrt_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_sqrt_round_ps(A, R) \ + (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R)) + +#define _mm512_mask_sqrt_round_ps(W, U, A, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sqrt_round_ps((A), (R)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_sqrt_round_ps(U, A, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sqrt_round_ps((A), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_sqrt_ps(__m512 __A) +{ + return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_sqrt_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_sqrt_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_rsqrt14_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1);} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_rsqrt14_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_ss(__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_sd(__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_rcp14_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_rcp14_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rcp14_ss(__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rcp14_sd(__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_floor_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_floor_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_ceil_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_ceil_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi64(__m512i __A) +{ + return (__m512i)__builtin_ia32_pabsq512((__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_abs_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_abs_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi32(__m512i __A) +{ + return (__m512i)__builtin_ia32_pabsd512((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_abs_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_abs_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_add_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_add_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} + +#define _mm_add_round_ss(A, B, R) \ + (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_add_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm_maskz_add_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_add_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_add_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} +#define _mm_add_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_add_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_add_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_add_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_add_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_add_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_add_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_add_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R)) + +#define _mm512_mask_add_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_add_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_add_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_add_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_add_round_ps(A, B, R) \ + (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R)) + +#define _mm512_mask_add_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_add_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_add_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_add_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_sub_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_sub_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} +#define _mm_sub_round_ss(A, B, R) \ + (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_sub_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm_maskz_sub_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_sub_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_sub_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} + +#define _mm_sub_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_sub_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_sub_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_sub_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_sub_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_sub_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_sub_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_sub_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R)) + +#define _mm512_mask_sub_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sub_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_sub_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sub_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_sub_round_ps(A, B, R) \ + (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R)) + +#define _mm512_mask_sub_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_sub_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_mul_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_mul_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} +#define _mm_mul_round_ss(A, B, R) \ + (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_mul_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm_maskz_mul_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_mul_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_mul_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} + +#define _mm_mul_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_mul_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_mul_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_mul_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_mul_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_mul_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_mul_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_mul_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R)) + +#define _mm512_mask_mul_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_mul_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_mul_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_mul_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_mul_round_ps(A, B, R) \ + (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R)) + +#define _mm512_mask_mul_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_mul_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_div_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_div_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} + +#define _mm_div_round_ss(A, B, R) \ + (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_div_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm_maskz_div_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_div_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_div_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} + +#define _mm_div_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_div_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_div_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_div_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a/(__v8df)__b); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_div_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_div_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_div_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a/(__v16sf)__b); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_div_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_div_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_div_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R)) + +#define _mm512_mask_div_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_div_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_div_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_div_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_div_round_ps(A, B, R) \ + (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R)) + +#define _mm512_mask_div_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_div_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_div_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_div_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps()) + +#define _mm512_roundscale_ps(A, B) \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_roundscale_ps(A, B, C, imm) \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ + (__v16sf)(__m512)(A), (__mmask16)(B), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_roundscale_ps(A, B, imm) \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ + (__v16sf)(__m512)(A), (__mmask16)(B), \ + (int)(R)) + +#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A), (int)(R)) + +#define _mm512_roundscale_round_ps(A, imm, R) \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_roundscale_pd(A, B) \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_roundscale_pd(A, B, C, imm) \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ + (__v8df)(__m512d)(A), (__mmask8)(B), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_roundscale_pd(A, B, imm) \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(A), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ + (__v8df)(__m512d)(A), (__mmask8)(B), \ + (int)(R)) + +#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(A), (int)(R)) + +#define _mm512_roundscale_round_pd(A, imm, R) \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_fmadd_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)) + + +#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_fmsub_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)) + + +#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_fnmadd_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)) + + +#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_fnmsub_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)) + + +#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmadd_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)) + + +#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_fmsub_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)) + + +#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_fnmadd_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)) + + +#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_fnmsub_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)) + + +#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmaddsub_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)) + + +#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_fmsubadd_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)) + + +#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmaddsub_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)) + + +#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_fmsubadd_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)) + + +#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + -(__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + -(__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + + + +/* Vector permutations */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I, + (__v16si) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_permutex2var_epi32(__A, __I, __B), + (__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_permutex2var_epi32(__A, __I, __B), + (__v16si)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_permutex2var_epi32(__A, __I, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_permutex2var_epi64(__A, __I, __B), + (__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_permutex2var_epi64(__A, __I, __B), + (__v8di)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_permutex2var_epi64(__A, __I, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_alignr_epi64(A, B, I) \ + (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(I)) + +#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_alignr_epi64(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()) + +#define _mm512_alignr_epi32(A, B, I) \ + (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(I)) + +#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_alignr_epi32(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()) +/* Vector Extract */ + +#define _mm512_extractf64x4_pd(A, I) \ + (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \ + (__v4df)_mm256_undefined_pd(), \ + (__mmask8)-1) + +#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \ + (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extractf64x4_pd(U, A, imm) \ + (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm512_extractf32x4_ps(A, I) \ + (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1) + +#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \ + (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extractf32x4_ps(U, A, imm) \ + (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)) + +/* Vector Blend */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __W, + (__v8df) __A); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __W, + (__v16sf) __A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __W, + (__v8di) __A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __W, + (__v16si) __A); +} + +/* Compare */ + +#define _mm512_cmp_round_ps_mask(A, B, P, R) \ + (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \ + (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_cmp_ps_mask(A, B, P) \ + _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) +#define _mm512_mask_cmp_ps_mask(U, A, B, P) \ + _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_cmpeq_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ) +#define _mm512_mask_cmpeq_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ) + +#define _mm512_cmplt_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS) +#define _mm512_mask_cmplt_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS) + +#define _mm512_cmple_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS) +#define _mm512_mask_cmple_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS) + +#define _mm512_cmpunord_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q) +#define _mm512_mask_cmpunord_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q) + +#define _mm512_cmpneq_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ) +#define _mm512_mask_cmpneq_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ) + +#define _mm512_cmpnlt_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US) +#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US) + +#define _mm512_cmpnle_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US) +#define _mm512_mask_cmpnle_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US) + +#define _mm512_cmpord_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q) +#define _mm512_mask_cmpord_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q) + +#define _mm512_cmp_round_pd_mask(A, B, P, R) \ + (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \ + (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_cmp_pd_mask(A, B, P) \ + _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) +#define _mm512_mask_cmp_pd_mask(U, A, B, P) \ + _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_cmpeq_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ) +#define _mm512_mask_cmpeq_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ) + +#define _mm512_cmplt_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS) +#define _mm512_mask_cmplt_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS) + +#define _mm512_cmple_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS) +#define _mm512_mask_cmple_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS) + +#define _mm512_cmpunord_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q) +#define _mm512_mask_cmpunord_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q) + +#define _mm512_cmpneq_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ) +#define _mm512_mask_cmpneq_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ) + +#define _mm512_cmpnlt_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US) +#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US) + +#define _mm512_cmpnle_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US) +#define _mm512_mask_cmpnle_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US) + +#define _mm512_cmpord_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q) +#define _mm512_mask_cmpord_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q) + +/* Conversion */ + +#define _mm512_cvtt_roundps_epu32(A, R) \ + (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_undefined_epi32(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \ + (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \ + (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)) + + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epu32(__m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi32_ps(A, R) \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_cvt_roundepu32_ps(A, R) \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_ps (__m512i __A) +{ + return (__m512)__builtin_convertvector((__v16su)__A, __v16sf); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepu32_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepu32_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_pd(__m256i __A) +{ + return (__m512d)__builtin_convertvector((__v8si)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32lo_pd(__m512i __A) +{ + return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A) +{ + return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A)); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_ps (__m512i __A) +{ + return (__m512)__builtin_convertvector((__v16si)__A, __v16sf); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepi32_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepi32_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_pd(__m256i __A) +{ + return (__m512d)__builtin_convertvector((__v8su)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32lo_pd(__m512i __A) +{ + return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A) +{ + return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A)); +} + +#define _mm512_cvt_roundpd_ps(A, R) \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R)) + +#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_ps (__m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) _mm256_undefined_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) _mm256_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_pslo (__m512d __A) +{ + return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A), + (__v8sf) _mm256_setzero_ps (), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A) +{ + return (__m512) __builtin_shufflevector ( + (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W), + __U, __A), + (__v8sf) _mm256_setzero_ps (), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +#define _mm512_cvt_roundps_ph(A, I) \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_undefined_si256(), \ + (__mmask16)-1) + +#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)(__m256i)(U), \ + (__mmask16)(W)) + +#define _mm512_maskz_cvt_roundps_ph(W, A, I) \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_setzero_si256(), \ + (__mmask16)(W)) + +#define _mm512_cvtps_ph _mm512_cvt_roundps_ph +#define _mm512_mask_cvtps_ph _mm512_mask_cvt_roundps_ph +#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph + +#define _mm512_cvt_roundph_ps(A, R) \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundph_ps(U, A, R) \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtph_ps(__m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epi32(A, R) \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)) + +static __inline __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epi32(__m512d __a) +{ + return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a, + (__v8si)_mm256_setzero_si256(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epi32(A, R) \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)) + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epi32(__m512 __a) +{ + return (__m512i) + __builtin_ia32_cvttps2dq512_mask((__v16sf) __a, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epi32(A, R) \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epi32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epi32(A, R) \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epi32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epu32(A, R) \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epu32 ( __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\ + (__v16si)\ + _mm512_undefined_epi32 (), + (__mmask16) -1,\ + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U , + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epu32(A, R) \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_cvtsd_f64(__m512d __a) +{ + return __a[0]; +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_cvtss_f32(__m512 __a) +{ + return __a[0]; +} + +/* Unpack and Interleave */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_pd(__m512d __a, __m512d __b) +{ + return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, + 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpackhi_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpackhi_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_pd(__m512d __a, __m512d __b) +{ + return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, + 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpacklo_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpacklo_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_ps(__m512 __a, __m512 __b) +{ + return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, + 2, 18, 3, 19, + 2+4, 18+4, 3+4, 19+4, + 2+8, 18+8, 3+8, 19+8, + 2+12, 18+12, 3+12, 19+12); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpackhi_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpackhi_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_ps(__m512 __a, __m512 __b) +{ + return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, + 0, 16, 1, 17, + 0+4, 16+4, 1+4, 17+4, + 0+8, 16+8, 1+8, 17+8, + 0+12, 16+12, 1+12, 17+12); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpacklo_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpacklo_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B, + 2, 18, 3, 19, + 2+4, 18+4, 3+4, 19+4, + 2+8, 18+8, 3+8, 19+8, + 2+12, 18+12, 3+12, 19+12); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpackhi_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpackhi_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B, + 0, 16, 1, 17, + 0+4, 16+4, 1+4, 17+4, + 0+8, 16+8, 1+8, 17+8, + 0+12, 16+12, 1+12, 17+12); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpacklo_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpacklo_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B, + 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpackhi_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpackhi_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B, + 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpacklo_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpacklo_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + + +/* SIMD load ops */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_si512 (void const *__P) +{ + struct __loadu_si512 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si512*)__P)->__v; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi32 (void const *__P) +{ + struct __loadu_epi32 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi32*)__P)->__v; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P, + (__v16si) __W, + (__mmask16) __U); +} + + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi64 (void const *__P) +{ + struct __loadu_epi64 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi64*)__P)->__v; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_loadu_pd(void const *__p) +{ + struct __loadu_pd { + __m512d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__p)->__v; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_loadu_ps(void const *__p) +{ + struct __loadu_ps { + __m512_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_load_ps(void const *__p) +{ + return *(const __m512*)__p; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_load_pd(void const *__p) +{ + return *(const __m512d*)__p; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_load_si512 (void const *__P) +{ + return *(const __m512i *) __P; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_load_epi32 (void const *__P) +{ + return *(const __m512i *) __P; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_load_epi64 (void const *__P) +{ + return *(const __m512i *) __P; +} + +/* SIMD store ops */ + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi64 (void *__P, __m512i __A) +{ + struct __storeu_epi64 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi64*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_si512 (void *__P, __m512i __A) +{ + struct __storeu_si512 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si512*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi32 (void *__P, __m512i __A) +{ + struct __storeu_epi32 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi32*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_pd(void *__P, __m512d __A) +{ + struct __storeu_pd { + __m512d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_ps(void *__P, __m512 __A) +{ + struct __storeu_ps { + __m512_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_pd(void *__P, __m512d __A) +{ + *(__m512d*)__P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_ps(void *__P, __m512 __A) +{ + *(__m512*)__P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_si512 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_epi32 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_epi64 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +/* Mask ops */ + +static __inline __mmask16 __DEFAULT_FN_ATTRS +_mm512_knot(__mmask16 __M) +{ + return __builtin_ia32_knothi(__M); +} + +/* Integer compare */ + +#define _mm512_cmpeq_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi8_epi32(__m128i __A) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi8_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi8_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi8_epi64(__m128i __A) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi8_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi8_epi64(__A), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_epi64(__m256i __X) +{ + return (__m512i)__builtin_convertvector((__v8si)__X, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi32_epi64(__X), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi32_epi64(__X), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_epi32(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi16_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi16_epi32(__A), + (__v16si)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi16_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi16_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu8_epi32(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu8_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu8_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu8_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu8_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu8_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_epi64(__m256i __X) +{ + return (__m512i)__builtin_convertvector((__v8su)__X, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu32_epi64(__X), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu32_epi64(__X), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu16_epi32(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu16_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu16_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu16_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu16_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu16_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rorv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rorv_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rorv_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rorv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rorv_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rorv_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + + + +#define _mm512_cmp_epi32_mask(a, b, p) \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm512_cmp_epu32_mask(a, b, p) \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm512_cmp_epi64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm512_cmp_epu64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)(m)) + +#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)(m)) + +#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm512_rol_epi32(a, b) \ + (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b)) + +#define _mm512_mask_rol_epi32(W, U, a, b) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_rol_epi32((a), (b)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_rol_epi32(U, a, b) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_rol_epi32((a), (b)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_rol_epi64(a, b) \ + (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b)) + +#define _mm512_mask_rol_epi64(W, U, a, b) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_rol_epi64((a), (b)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_rol_epi64(U, a, b) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_rol_epi64((a), (b)), \ + (__v8di)_mm512_setzero_si512()) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rolv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rolv_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rolv_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rolv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rolv_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rolv_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_ror_epi32(A, B) \ + (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B)) + +#define _mm512_mask_ror_epi32(W, U, A, B) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_ror_epi32((A), (B)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_ror_epi32(U, A, B) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_ror_epi32((A), (B)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_ror_epi64(A, B) \ + (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B)) + +#define _mm512_mask_ror_epi64(W, U, A, B) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_ror_epi64((A), (B)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_ror_epi64(U, A, B) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_ror_epi64((A), (B)), \ + (__v8di)_mm512_setzero_si512()) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_slli_epi32(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_slli_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_slli_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_slli_epi64(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_slli_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_slli_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srli_epi32(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srli_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srli_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srli_epi64(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srli_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srli_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __A, + (__v16si) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __A, + (__v16si) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __A, + (__v8di) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __A, + (__v8di) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_movedup_pd (__m512d __A) +{ + return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A, + 0, 0, 2, 2, 4, 4, 6, 6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_movedup_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_movedup_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_fixupimm_pd(A, B, C, imm) \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \ + (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), \ + (int)(imm), (__mmask8)(U), \ + (int)(R)) + +#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \ + (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), \ + (int)(imm), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_fixupimm_ps(A, B, C, imm) \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \ + (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U), \ + (int)(R)) + +#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \ + (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_fixupimm_round_sd(A, B, C, imm, R) \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)) + +#define _mm_fixupimm_sd(A, B, C, imm) \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \ + (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \ + (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_fixupimm_round_ss(A, B, C, imm, R) \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)) + +#define _mm_fixupimm_ss(A, B, C, imm) \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \ + (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \ + (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_getexp_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_getexp_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A, + (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm_getexp_round_ss(A, B, R) \ + (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_getexp_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +#define _mm_getmant_round_sd(A, B, C, D, R) \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_getmant_sd(A, B, C, D) \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_getmant_sd(W, U, A, B, C, D) \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_getmant_sd(U, A, B, C, D) \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm_getmant_round_ss(A, B, C, D, R) \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_getmant_ss(A, B, C, D) \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_getmant_ss(W, U, A, B, C, D) \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_getmant_ss(U, A, B, C, D) \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kmov (__mmask16 __A) +{ + return __A; +} + +#define _mm_comi_round_sd(A, B, P, R) \ + (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \ + (int)(P), (int)(R)) + +#define _mm_comi_round_ss(A, B, P, R) \ + (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \ + (int)(P), (int)(R)) + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_si64(A, R) \ + (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)) +#endif + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sll_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sll_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sll_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sll_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sll_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sll_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sllv_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sllv_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sllv_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sllv_epi64(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sllv_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sllv_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sra_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sra_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sra_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sra_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sra_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sra_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srav_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srav_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srav_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srav_epi64(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srav_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srav_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srl_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srl_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srl_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srl_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srl_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srl_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srlv_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srlv_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srlv_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srlv_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srlv_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srlv_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_ternarylogic_epi32(A, B, C, imm) \ + (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1) + +#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \ + (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U)) + +#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \ + (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U)) + +#define _mm512_ternarylogic_epi64(A, B, C, imm) \ + (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \ + (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \ + (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U)) + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_i64(A, R) \ + (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)) +#endif + +#define _mm_cvt_roundsd_si32(A, R) \ + (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)) + +#define _mm_cvt_roundsd_i32(A, R) \ + (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)) + +#define _mm_cvt_roundsd_u32(A, R) \ + (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvtsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_u64(A, R) \ + (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \ + (int)(R)) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvtsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvt_roundss_si32(A, R) \ + (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)) + +#define _mm_cvt_roundss_i32(A, R) \ + (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)) + +#ifdef __x86_64__ +#define _mm_cvt_roundss_si64(A, R) \ + (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)) + +#define _mm_cvt_roundss_i64(A, R) \ + (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)) +#endif + +#define _mm_cvt_roundss_u32(A, R) \ + (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvtss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundss_u64(A, R) \ + (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \ + (int)(R)) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvtss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsd_i32(A, R) \ + (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)) + +#define _mm_cvtt_roundsd_si32(A, R) \ + (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)) + +static __inline__ int __DEFAULT_FN_ATTRS128 +_mm_cvttsd_i32 (__m128d __A) +{ + return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsd_si64(A, R) \ + (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)) + +#define _mm_cvtt_roundsd_i64(A, R) \ + (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)) + +static __inline__ long long __DEFAULT_FN_ATTRS128 +_mm_cvttsd_i64 (__m128d __A) +{ + return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsd_u32(A, R) \ + (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvttsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsd_u64(A, R) \ + (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \ + (int)(R)) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvttsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundss_i32(A, R) \ + (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)) + +#define _mm_cvtt_roundss_si32(A, R) \ + (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)) + +static __inline__ int __DEFAULT_FN_ATTRS128 +_mm_cvttss_i32 (__m128 __A) +{ + return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundss_i64(A, R) \ + (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)) + +#define _mm_cvtt_roundss_si64(A, R) \ + (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)) + +static __inline__ long long __DEFAULT_FN_ATTRS128 +_mm_cvttss_i64 (__m128 __A) +{ + return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundss_u32(A, R) \ + (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvttss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundss_u64(A, R) \ + (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \ + (int)(R)) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvttss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm512_permute_pd(X, C) \ + (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)) + +#define _mm512_mask_permute_pd(W, U, X, C) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permute_pd((X), (C)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_permute_pd(U, X, C) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permute_pd((X), (C)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_permute_ps(X, C) \ + (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C)) + +#define _mm512_mask_permute_ps(W, U, X, C) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_permute_ps((X), (C)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_permute_ps(U, X, C) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_permute_ps((X), (C)), \ + (__v16sf)_mm512_setzero_ps()) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_permutevar_pd(__m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutevar_pd(__A, __C), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutevar_pd(__A, __C), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_permutevar_ps(__m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutevar_ps(__A, __C), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutevar_ps(__A, __C), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B) +{ + return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I, + (__v8df)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_permutex2var_pd(__A, __I, __B), + (__v8df)__A); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U, + __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_permutex2var_pd(__A, __I, __B), + (__v8df)(__m512d)__I); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I, + __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_permutex2var_pd(__A, __I, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B) +{ + return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I, + (__v16sf) __B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_permutex2var_ps(__A, __I, __B), + (__v16sf)__A); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_permutex2var_ps(__A, __I, __B), + (__v16sf)(__m512)__I); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_permutex2var_ps(__A, __I, __B), + (__v16sf)_mm512_setzero_ps()); +} + + +#define _mm512_cvtt_roundpd_epu32(A, R) \ + (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_undefined_si256(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \ + (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \ + (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_roundscale_round_sd(A, B, imm, R) \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(imm), \ + (int)(R)) + +#define _mm_roundscale_sd(A, B, imm) \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(imm), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_roundscale_sd(W, U, A, B, imm) \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(imm), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(I), \ + (int)(R)) + +#define _mm_maskz_roundscale_sd(U, A, B, I) \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(I), \ + (int)(R)) + +#define _mm_roundscale_round_ss(A, B, imm, R) \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(imm), \ + (int)(R)) + +#define _mm_roundscale_ss(A, B, imm) \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(imm), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_roundscale_ss(W, U, A, B, I) \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(I), \ + (int)(R)) + +#define _mm_maskz_roundscale_ss(U, A, B, I) \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(I), \ + (int)(R)) + +#define _mm512_scalef_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_scalef_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_scalef_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_scalef_round_ps(A, B, R) \ + (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_scalef_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_scalef_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_scalef_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_scalef_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A, + (__v2df)( __B), (__v2df) _mm_setzero_pd(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm_scalef_round_ss(A, B, R) \ + (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_scalef_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A, + (__v4sf)( __B), (__v4sf) _mm_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srai_epi32(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srai_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, + unsigned int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srai_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srai_epi64(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srai_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srai_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_shuffle_f32x4(A, B, imm) \ + (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(imm)) + +#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps()) + +#define _mm512_shuffle_f64x2(A, B, imm) \ + (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(imm)) + +#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_shuffle_i32x4(A, B, imm) \ + (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(imm)) + +#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_shuffle_i64x2(A, B, imm) \ + (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(imm)) + +#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()) + +#define _mm512_shuffle_pd(A, B, M) \ + (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(M)) + +#define _mm512_mask_shuffle_pd(W, U, A, B, M) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_shuffle_pd(U, A, B, M) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_shuffle_ps(A, B, M) \ + (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(M)) + +#define _mm512_mask_shuffle_ps(W, U, A, B, M) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_shuffle_ps(U, A, B, M) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \ + (__v16sf)_mm512_setzero_ps()) + +#define _mm_sqrt_round_sd(A, B, R) \ + (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm_sqrt_round_ss(A, B, R) \ + (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_ss(U, A, B, R) \ + (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f32x4(__m128 __A) +{ + return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 2, 3, 0, 1, 2, 3, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x4(__A), + (__v16sf)__O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x4(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f64x4(__m256d __A) +{ + return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x4(__A), + (__v8df)__O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x4(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i32x4(__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 2, 3, 0, 1, 2, 3, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x4(__A), + (__v16si)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x4(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i64x4(__m256i __A) +{ + return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x4(__A), + (__v8di)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x4(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__M, + (__v8df) _mm512_broadcastsd_pd(__A), + (__v8df) __O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__M, + (__v8df) _mm512_broadcastsd_pd(__A), + (__v8df) _mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__M, + (__v16sf) _mm512_broadcastss_ps(__A), + (__v16sf) __O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__M, + (__v16sf) _mm512_broadcastss_ps(__A), + (__v16sf) _mm512_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +#define _mm512_extracti32x4_epi32(A, imm) \ + (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v4si)_mm_undefined_si128(), \ + (__mmask8)-1) + +#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \ + (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \ + (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(U)) + +#define _mm512_extracti64x4_epi64(A, imm) \ + (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \ + (__v4di)_mm256_undefined_si256(), \ + (__mmask8)-1) + +#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \ + (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(U)) + +#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \ + (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)(U)) + +#define _mm512_insertf64x4(A, B, imm) \ + (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \ + (__v4df)(__m256d)(B), (int)(imm)) + +#define _mm512_mask_insertf64x4(W, U, A, B, imm) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x4((A), (B), (imm)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_insertf64x4(U, A, B, imm) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x4((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_inserti64x4(A, B, imm) \ + (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \ + (__v4di)(__m256i)(B), (int)(imm)) + +#define _mm512_mask_inserti64x4(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x4((A), (B), (imm)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_inserti64x4(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x4((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()) + +#define _mm512_insertf32x4(A, B, imm) \ + (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \ + (__v4sf)(__m128)(B), (int)(imm)) + +#define _mm512_mask_insertf32x4(W, U, A, B, imm) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \ + (__v16sf)(__m512)(W)) + +#define _mm512_maskz_insertf32x4(U, A, B, imm) \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps()) + +#define _mm512_inserti32x4(A, B, imm) \ + (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \ + (__v4si)(__m128i)(B), (int)(imm)) + +#define _mm512_mask_inserti32x4(W, U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x4((A), (B), (imm)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_inserti32x4(U, A, B, imm) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x4((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_getmant_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_getmant_pd(A, B, C) \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_getmant_pd(W, U, A, B, C) \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_getmant_pd(U, A, B, C) \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_getmant_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_getmant_ps(A, B, C) \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_getmant_ps(W, U, A, B, C) \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_getmant_ps(U, A, B, C) \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_getexp_round_pd(A, R) \ + (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_getexp_round_pd(W, U, A, R) \ + (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_getexp_round_pd(U, A, R) \ + (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_getexp_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_getexp_round_ps(A, R) \ + (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)) + +#define _mm512_mask_getexp_round_ps(W, U, A, R) \ + (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)) + +#define _mm512_maskz_getexp_round_ps(U, A, R) \ + (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_getexp_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_i64gather_ps(index, addr, scale) \ + (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale)) + +#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \ + (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm512_i64gather_epi32(index, addr, scale) \ + (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)-1, (int)(scale)) + +#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \ + (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm512_i64gather_pd(index, addr, scale) \ + (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale)) + +#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \ + (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm512_i64gather_epi64(index, addr, scale) \ + (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale)) + +#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \ + (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm512_i32gather_ps(index, addr, scale) \ + (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \ + (void const *)(addr), \ + (__v16si)(__m512)(index), \ + (__mmask16)-1, (int)(scale)) + +#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \ + (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \ + (void const *)(addr), \ + (__v16si)(__m512)(index), \ + (__mmask16)(mask), (int)(scale)) + +#define _mm512_i32gather_epi32(index, addr, scale) \ + (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \ + (void const *)(addr), \ + (__v16si)(__m512i)(index), \ + (__mmask16)-1, (int)(scale)) + +#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \ + (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \ + (void const *)(addr), \ + (__v16si)(__m512i)(index), \ + (__mmask16)(mask), (int)(scale)) + +#define _mm512_i32gather_pd(index, addr, scale) \ + (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), (__mmask8)-1, \ + (int)(scale)) + +#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \ + (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm512_i32gather_epi64(index, addr, scale) \ + (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), (__mmask8)-1, \ + (int)(scale)) + +#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \ + (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm512_i64scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8sf)(__m256)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8sf)(__m256)(v1), (int)(scale)) + +#define _mm512_i64scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + +#define _mm512_i64scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_i64scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +#define _mm512_i32scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \ + (__v16si)(__m512i)(index), \ + (__v16sf)(__m512)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \ + (__v16si)(__m512i)(index), \ + (__v16sf)(__m512)(v1), (int)(scale)) + +#define _mm512_i32scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \ + (__v16si)(__m512i)(index), \ + (__v16si)(__m512i)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \ + (__v16si)(__m512i)(index), \ + (__v16si)(__m512i)(v1), (int)(scale)) + +#define _mm512_i32scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_i32scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + (__v4sf)__A, + (__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + (__v4sf)__B, + (__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W, + (__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + (__v4sf)__A, + -(__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmsub_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + (__v4sf)__B, + -(__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W, + (__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \ + (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + -(__v4sf)__A, + (__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmadd_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + -(__v4sf)__B, + (__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W, + -(__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + -(__v4sf)__A, + -(__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmsub_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + -(__v4sf)__B, + -(__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W, + -(__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \ + (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + (__v2df)__A, + (__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + (__v2df)__B, + (__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W, + (__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + (__v2df)__A, + -(__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmsub_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + (__v2df)__B, + -(__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W, + (__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \ + (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + -(__v2df)__A, + (__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmadd_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + -(__v2df)__B, + (__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W, + -(__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + -(__v2df)__A, + -(__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmsub_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R)) + +#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + -(__v2df)__B, + -(__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), \ + (__mmask8)(U), \ + (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W, + -(__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \ + (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_permutex_pd(X, C) \ + (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)) + +#define _mm512_mask_permutex_pd(W, U, X, C) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permutex_pd((X), (C)), \ + (__v8df)(__m512d)(W)) + +#define _mm512_maskz_permutex_pd(U, X, C) \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permutex_pd((X), (C)), \ + (__v8df)_mm512_setzero_pd()) + +#define _mm512_permutex_epi64(X, C) \ + (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)) + +#define _mm512_mask_permutex_epi64(W, U, X, C) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_permutex_epi64((X), (C)), \ + (__v8di)(__m512i)(W)) + +#define _mm512_maskz_permutex_epi64(U, X, C) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_permutex_epi64((X), (C)), \ + (__v8di)_mm512_setzero_si512()) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_pd (__m512i __X, __m512d __Y) +{ + return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutexvar_pd(__X, __Y), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutexvar_pd(__X, __Y), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_permutexvar_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_permutexvar_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_ps (__m512i __X, __m512 __Y) +{ + return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutexvar_ps(__X, __Y), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutexvar_ps(__X, __Y), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X); +} + +#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32 + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_permutexvar_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_permutexvar_epi32(__X, __Y), + (__v16si)__W); +} + +#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32 + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kand (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kandn (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_kortestc (__mmask16 __A, __mmask16 __B) +{ + return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_kortestz (__mmask16 __A, __mmask16 __B) +{ + return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_kortestchi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_kortestzhi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzhi(__A, __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kunpackb (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kxnor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kxor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B); +} + +#define _kand_mask16 _mm512_kand +#define _kandn_mask16 _mm512_kandn +#define _knot_mask16 _mm512_knot +#define _kor_mask16 _mm512_kor +#define _kxnor_mask16 _mm512_kxnor +#define _kxor_mask16 _mm512_kxor + +#define _kshiftli_mask16(A, I) \ + (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I)) + +#define _kshiftri_mask16(A, I) \ + (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I)) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_cvtmask16_u32(__mmask16 __A) { + return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_cvtu32_mask16(unsigned int __A) { + return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_load_mask16(__mmask16 *__A) { + return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask16(__mmask16 *__A, __mmask16 __B) { + *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_stream_si512 (void * __P, __m512i __A) +{ + typedef __v8di __v8di_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_stream_load_si512 (void const *__P) +{ + typedef __v8di __v8di_aligned __attribute__((aligned(64))); + return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_stream_pd (void *__P, __m512d __A) +{ + typedef __v8df __v8df_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_stream_ps (void *__P, __m512 __A) +{ + typedef __v16sf __v16sf_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +#define _mm_cmp_round_ss_mask(X, Y, P, R) \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)(M), (int)(R)) + +#define _mm_cmp_ss_mask(X, Y, P) \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_cmp_ss_mask(M, X, Y, P) \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_cmp_round_sd_mask(X, Y, P, R) \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)(M), (int)(R)) + +#define _mm_cmp_sd_mask(X, Y, P) \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_cmp_sd_mask(M, X, Y, P) \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION) + +/* Bit Test */ + +static __inline __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_test_epi32_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_test_epi64_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi32_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi64_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_movehdup_ps (__m512 __A) +{ + return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_movehdup_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_movehdup_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_moveldup_ps (__m512 __A) +{ + return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_moveldup_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_moveldup_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), + _mm_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), + _mm_setzero_pd()); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A) +{ + __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W, + (__v4sf)_mm_setzero_ps(), + 0, 4, 4, 4); + + return (__m128) __builtin_ia32_loadss128_mask ((const __v4sf *) __A, src, __U & 1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_load_ss (__mmask8 __U, const float* __A) +{ + return (__m128)__builtin_ia32_loadss128_mask ((const __v4sf *) __A, + (__v4sf) _mm_setzero_ps(), + __U & 1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A) +{ + __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W, + (__v2df)_mm_setzero_pd(), + 0, 2); + + return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, src, __U & 1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_load_sd (__mmask8 __U, const double* __A) +{ + return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, + (__v2df) _mm_setzero_pd(), + __U & 1); +} + +#define _mm512_shuffle_epi32(A, I) \ + (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)) + +#define _mm512_mask_shuffle_epi32(W, U, A, I) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_epi32((A), (I)), \ + (__v16si)(__m512i)(W)) + +#define _mm512_maskz_shuffle_epi32(U, A, I) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_epi32((A), (I)), \ + (__v16si)_mm512_setzero_si512()) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P, + (__v8df) _mm512_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P, + (__v16si) _mm512_setzero_si512(), + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512(), + (__mmask16) __U); +} + +#define _mm512_cvt_roundps_pd(A, R) \ + (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \ + (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm512_maskz_cvt_roundps_pd(U, A, R) \ + (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtps_pd (__m256 __A) +{ + return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtps_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtps_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtpslo_pd (__m512 __A) +{ + return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A) +{ + return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __A, + (__v8df) __W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __A, + (__v8df) _mm512_setzero_pd ()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __A, + (__v16sf) __W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __A, + (__v16sf) _mm512_setzero_ps ()); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +#define _mm_cvt_roundsd_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \ + (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \ + (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B) +{ + return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A, + (__v2df)__B, + (__v4sf)__W, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B) +{ + return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A, + (__v2df)__B, + (__v4sf)_mm_setzero_ps(), + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvtss_i32 _mm_cvtss_si32 +#define _mm_cvtsd_i32 _mm_cvtsd_si32 +#define _mm_cvti32_sd _mm_cvtsi32_sd +#define _mm_cvti32_ss _mm_cvtsi32_ss +#ifdef __x86_64__ +#define _mm_cvtss_i64 _mm_cvtss_si64 +#define _mm_cvtsd_i64 _mm_cvtsd_si64 +#define _mm_cvti64_sd _mm_cvtsi64_sd +#define _mm_cvti64_ss _mm_cvtsi64_ss +#endif + +#ifdef __x86_64__ +#define _mm_cvt_roundi64_sd(A, B, R) \ + (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \ + (int)(R)) + +#define _mm_cvt_roundsi64_sd(A, B, R) \ + (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \ + (int)(R)) +#endif + +#define _mm_cvt_roundsi32_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)) + +#define _mm_cvt_roundi32_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)) + +#ifdef __x86_64__ +#define _mm_cvt_roundsi64_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \ + (int)(R)) + +#define _mm_cvt_roundi64_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \ + (int)(R)) +#endif + +#define _mm_cvt_roundss_sd(A, B, R) \ + (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1, (int)(R)) + +#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \ + (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)) + +#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \ + (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B) +{ + return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A, + (__v4sf)__B, + (__v2df)__W, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B) +{ + return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A, + (__v4sf)__B, + (__v2df)_mm_setzero_pd(), + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtu32_sd (__m128d __A, unsigned __B) +{ + __A[0] = __B; + return __A; +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_sd(A, B, R) \ + (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \ + (unsigned long long)(B), (int)(R)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtu64_sd (__m128d __A, unsigned long long __B) +{ + __A[0] = __B; + return __A; +} +#endif + +#define _mm_cvt_roundu32_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \ + (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtu32_ss (__m128 __A, unsigned __B) +{ + __A[0] = __B; + return __A; +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_ss(A, B, R) \ + (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \ + (unsigned long long)(B), (int)(R)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtu64_ss (__m128 __A, unsigned long long __B) +{ + __A[0] = __B; + return __A; +} +#endif + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A) +{ + return (__m512i) __builtin_ia32_selectd_512(__M, + (__v16si) _mm512_set1_epi32(__A), + (__v16si) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A) +{ + return (__m512i) __builtin_ia32_selectq_512(__M, + (__v8di) _mm512_set1_epi64(__A), + (__v8di) __O); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59, + char __e58, char __e57, char __e56, char __e55, char __e54, char __e53, + char __e52, char __e51, char __e50, char __e49, char __e48, char __e47, + char __e46, char __e45, char __e44, char __e43, char __e42, char __e41, + char __e40, char __e39, char __e38, char __e37, char __e36, char __e35, + char __e34, char __e33, char __e32, char __e31, char __e30, char __e29, + char __e28, char __e27, char __e26, char __e25, char __e24, char __e23, + char __e22, char __e21, char __e20, char __e19, char __e18, char __e17, + char __e16, char __e15, char __e14, char __e13, char __e12, char __e11, + char __e10, char __e9, char __e8, char __e7, char __e6, char __e5, + char __e4, char __e3, char __e2, char __e1, char __e0) { + + return __extension__ (__m512i)(__v64qi) + {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7, + __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15, + __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23, + __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31, + __e32, __e33, __e34, __e35, __e36, __e37, __e38, __e39, + __e40, __e41, __e42, __e43, __e44, __e45, __e46, __e47, + __e48, __e49, __e50, __e51, __e52, __e53, __e54, __e55, + __e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63}; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28, + short __e27, short __e26, short __e25, short __e24, short __e23, + short __e22, short __e21, short __e20, short __e19, short __e18, + short __e17, short __e16, short __e15, short __e14, short __e13, + short __e12, short __e11, short __e10, short __e9, short __e8, + short __e7, short __e6, short __e5, short __e4, short __e3, + short __e2, short __e1, short __e0) { + return __extension__ (__m512i)(__v32hi) + {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7, + __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15, + __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23, + __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H, + int __I, int __J, int __K, int __L, + int __M, int __N, int __O, int __P) +{ + return __extension__ (__m512i)(__v16si) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7, \ + e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \ + (e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi64 (long long __A, long long __B, long long __C, + long long __D, long long __E, long long __F, + long long __G, long long __H) +{ + return __extension__ (__m512i) (__v8di) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_set_pd (double __A, double __B, double __C, double __D, + double __E, double __F, double __G, double __H) +{ + return __extension__ (__m512d) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_set_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H, + float __I, float __J, float __K, float __L, + float __M, float __N, float __O, float __P) +{ + return __extension__ (__m512) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \ + (e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_abs_ps(__m512 __A) +{ + return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ; +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A) +{ + return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_abs_pd(__m512d __A) +{ + return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A) +{ + return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A); +} + +/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as + * outputs. This class of vector operation forms the basis of many scientific + * computations. In vector-reduction arithmetic, the evaluation order is + * independent of the order of the input elements of V. + + * For floating-point intrinsics: + * 1. When using fadd/fmul intrinsics, the order of operations within the + * vector is unspecified (associative math). + * 2. When using fmin/fmax intrinsics, NaN or -0.0 elements within the vector + * produce unspecified results. + + * Used bisection method. At each step, we partition the vector with previous + * step in half, and the operation is performed on its two halves. + * This takes log2(n) steps where n is the number of elements in the vector. + */ + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) { + return __builtin_ia32_reduce_add_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) { + return __builtin_ia32_reduce_mul_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) { + return __builtin_ia32_reduce_and_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) { + return __builtin_ia32_reduce_or_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi64(__M, __W); + return __builtin_ia32_reduce_add_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W); + return __builtin_ia32_reduce_mul_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W); + return __builtin_ia32_reduce_and_q512(__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi64(__M, __W); + return __builtin_ia32_reduce_or_q512(__W); +} + +// -0.0 is used to ignore the start value since it is the neutral value of +// floating point addition. For more information, please refer to +// https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fadd-intrinsic +static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) { + return __builtin_ia32_reduce_fadd_pd512(-0.0, __W); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) { + return __builtin_ia32_reduce_fmul_pd512(1.0, __W); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) { + __W = _mm512_maskz_mov_pd(__M, __W); + return __builtin_ia32_reduce_fadd_pd512(-0.0, __W); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) { + __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W); + return __builtin_ia32_reduce_fmul_pd512(1.0, __W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_add_epi32(__m512i __W) { + return __builtin_ia32_reduce_add_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_mul_epi32(__m512i __W) { + return __builtin_ia32_reduce_mul_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_and_epi32(__m512i __W) { + return __builtin_ia32_reduce_and_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_or_epi32(__m512i __W) { + return __builtin_ia32_reduce_or_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi32(__M, __W); + return __builtin_ia32_reduce_add_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) { + __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W); + return __builtin_ia32_reduce_mul_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) { + __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W); + return __builtin_ia32_reduce_and_d512((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi32(__M, __W); + return __builtin_ia32_reduce_or_d512((__v16si)__W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_add_ps(__m512 __W) { + return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_mul_ps(__m512 __W) { + return __builtin_ia32_reduce_fmul_ps512(1.0f, __W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) { + __W = _mm512_maskz_mov_ps(__M, __W); + return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) { + __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W); + return __builtin_ia32_reduce_fmul_ps512(1.0f, __W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epi64(__m512i __V) { + return __builtin_ia32_reduce_smax_q512(__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epu64(__m512i __V) { + return __builtin_ia32_reduce_umax_q512(__V); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epi64(__m512i __V) { + return __builtin_ia32_reduce_smin_q512(__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epu64(__m512i __V) { + return __builtin_ia32_reduce_umin_q512(__V); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) { + __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V); + return __builtin_ia32_reduce_smax_q512(__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) { + __V = _mm512_maskz_mov_epi64(__M, __V); + return __builtin_ia32_reduce_umax_q512(__V); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) { + __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V); + return __builtin_ia32_reduce_smin_q512(__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) { + __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V); + return __builtin_ia32_reduce_umin_q512(__V); +} +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epi32(__m512i __V) { + return __builtin_ia32_reduce_smax_d512((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epu32(__m512i __V) { + return __builtin_ia32_reduce_umax_d512((__v16si)__V); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epi32(__m512i __V) { + return __builtin_ia32_reduce_smin_d512((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epu32(__m512i __V) { + return __builtin_ia32_reduce_umin_d512((__v16si)__V); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) { + __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V); + return __builtin_ia32_reduce_smax_d512((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) { + __V = _mm512_maskz_mov_epi32(__M, __V); + return __builtin_ia32_reduce_umax_d512((__v16si)__V); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) { + __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V); + return __builtin_ia32_reduce_smin_d512((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) { + __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V); + return __builtin_ia32_reduce_umin_d512((__v16si)__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_pd(__m512d __V) { + return __builtin_ia32_reduce_fmax_pd512(__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_pd(__m512d __V) { + return __builtin_ia32_reduce_fmin_pd512(__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) { + __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V); + return __builtin_ia32_reduce_fmax_pd512(__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) { + __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V); + return __builtin_ia32_reduce_fmin_pd512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_ps(__m512 __V) { + return __builtin_ia32_reduce_fmax_ps512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_ps(__m512 __V) { + return __builtin_ia32_reduce_fmin_ps512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) { + __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V); + return __builtin_ia32_reduce_fmax_ps512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) { + __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V); + return __builtin_ia32_reduce_fmin_ps512(__V); +} + +/// Moves the least significant 32 bits of a vector of [16 x i32] to a +/// 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __A +/// A vector of [16 x i32]. The least significant 32 bits are moved to the +/// destination. +/// \returns A 32-bit signed integer containing the moved value. +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_cvtsi512_si32(__m512i __A) { + __v16si __b = (__v16si)__A; + return __b[0]; +} + +/// Loads 8 double-precision (64-bit) floating-point elements stored at memory +/// locations starting at location \a base_addr at packed 32-bit integer indices +/// stored in the lower half of \a vindex scaled by \a scale them in dst. +/// +/// This intrinsic corresponds to the VGATHERDPD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endoperation +#define _mm512_i32logather_pd(vindex, base_addr, scale) \ + _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale)) + +/// Loads 8 double-precision (64-bit) floating-point elements from memory +/// starting at location \a base_addr at packed 32-bit integer indices stored in +/// the lower half of \a vindex scaled by \a scale into dst using writemask +/// \a mask (elements are copied from \a src when the corresponding mask bit is +/// not set). +/// +/// This intrinsic corresponds to the VGATHERDPD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ELSE +/// dst[i+63:i] := src[i+63:i] +/// FI +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endoperation +#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale) \ + _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex), \ + (base_addr), (scale)) + +/// Loads 8 64-bit integer elements from memory starting at location \a base_addr +/// at packed 32-bit integer indices stored in the lower half of \a vindex +/// scaled by \a scale and stores them in dst. +/// +/// This intrinsic corresponds to the VPGATHERDQ instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endoperation +#define _mm512_i32logather_epi64(vindex, base_addr, scale) \ + _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale)) + +/// Loads 8 64-bit integer elements from memory starting at location \a base_addr +/// at packed 32-bit integer indices stored in the lower half of \a vindex +/// scaled by \a scale and stores them in dst using writemask \a mask (elements +/// are copied from \a src when the corresponding mask bit is not set). +/// +/// This intrinsic corresponds to the VPGATHERDQ instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ELSE +/// dst[i+63:i] := src[i+63:i] +/// FI +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endoperation +#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale) \ + _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex), \ + (base_addr), (scale)) + +/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1 +/// and to memory locations starting at location \a base_addr at packed 32-bit +/// integer indices stored in \a vindex scaled by \a scale. +/// +/// This intrinsic corresponds to the VSCATTERDPD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := v1[i+63:i] +/// ENDFOR +/// \endoperation +#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale) \ + _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale)) + +/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1 +/// to memory locations starting at location \a base_addr at packed 32-bit +/// integer indices stored in \a vindex scaled by \a scale. Only those elements +/// whose corresponding mask bit is set in writemask \a mask are written to +/// memory. +/// +/// This intrinsic corresponds to the VSCATTERDPD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := a[i+63:i] +/// FI +/// ENDFOR +/// \endoperation +#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale) \ + _mm512_mask_i32scatter_pd((base_addr), (mask), \ + _mm512_castsi512_si256(vindex), (v1), (scale)) + +/// Stores 8 packed 64-bit integer elements located in \a v1 and stores them in +/// memory locations starting at location \a base_addr at packed 32-bit integer +/// indices stored in \a vindex scaled by \a scale. +/// +/// This intrinsic corresponds to the VPSCATTERDQ instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := a[i+63:i] +/// ENDFOR +/// \endoperation +#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale) \ + _mm512_i32scatter_epi64((base_addr), \ + _mm512_castsi512_si256(vindex), (v1), (scale)) + +/// Stores 8 packed 64-bit integer elements located in a and stores them in +/// memory locations starting at location \a base_addr at packed 32-bit integer +/// indices stored in \a vindex scaled by scale using writemask \a mask (elements +/// whose corresponding mask bit is not set are not written to memory). +/// +/// This intrinsic corresponds to the VPSCATTERDQ instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := a[i+63:i] +/// FI +/// ENDFOR +/// \endoperation +#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale) \ + _mm512_mask_i32scatter_epi64((base_addr), (mask), \ + _mm512_castsi512_si256(vindex), (v1), (scale)) + +#undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS + +#endif /* __AVX512FINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512ifmaintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512ifmaintrin.h new file mode 100644 index 0000000..5f7da52 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512ifmaintrin.h @@ -0,0 +1,68 @@ +/*===------------- avx512ifmaintrin.h - IFMA intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IFMAINTRIN_H +#define __IFMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_vpmadd52huq512((__v8di) __X, (__v8di) __Y, + (__v8di) __Z); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52hi_epu64(__W, __X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52hi_epu64(__X, __Y, __Z), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_vpmadd52luq512((__v8di) __X, (__v8di) __Y, + (__v8di) __Z); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52lo_epu64(__W, __X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52lo_epu64(__X, __Y, __Z), + (__v8di)_mm512_setzero_si512()); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512ifmavlintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512ifmavlintrin.h new file mode 100644 index 0000000..5889401 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512ifmavlintrin.h @@ -0,0 +1,119 @@ +/*===------------- avx512ifmavlintrin.h - IFMA intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IFMAVLINTRIN_H +#define __IFMAVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(256))) + + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di) __X, (__v2di) __Y, + (__v2di) __Z); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52hi_epu64(__W, __X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52hi_epu64(__X, __Y, __Z), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52hi_epu64(__W, __X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52hi_epu64(__X, __Y, __Z), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52lo_epu64(__W, __X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52lo_epu64(__X, __Y, __Z), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52lo_epu64(__W, __X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52lo_epu64(__X, __Y, __Z), + (__v4di)_mm256_setzero_si256()); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512pfintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512pfintrin.h new file mode 100644 index 0000000..b8bcf49 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512pfintrin.h @@ -0,0 +1,97 @@ +/*===------------- avx512pfintrin.h - PF intrinsics ------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512PFINTRIN_H +#define __AVX512PFINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512pf"))) + +#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \ + __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfdps((__mmask16)(mask), \ + (__v16si)(__m512i)(index), (void const *)(addr), \ + (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \ + __builtin_ia32_gatherpfdps((__mmask16) -1, \ + (__v16si)(__m512i)(index), (void const *)(addr), \ + (int)(scale), (int)(hint)) + +#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \ + __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \ + __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \ + __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \ + __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \ + (void *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfdps((__mmask16)(mask), \ + (__v16si)(__m512i)(index), (void *)(addr), \ + (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \ + __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \ + __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), (int)(hint)) + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmi2intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmi2intrin.h new file mode 100644 index 0000000..a231446 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmi2intrin.h @@ -0,0 +1,357 @@ +/*===------------- avx512vbmi2intrin.h - VBMI2 intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VBMI2INTRIN_H +#define __AVX512VBMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2"), __min_vector_width__(512))) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_compress_epi16(__m512i __S, __mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D, + (__v32hi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_compress_epi16(__mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D, + (__v32hi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D, + (__v64qi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D, + (__v64qi) _mm512_setzero_si512(), + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_epi16(void *__P, __mmask32 __U, __m512i __D) +{ + __builtin_ia32_compressstorehi512_mask ((__v32hi *) __P, (__v32hi) __D, + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D) +{ + __builtin_ia32_compressstoreqi512_mask ((__v64qi *) __P, (__v64qi) __D, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expand_epi16(__m512i __S, __mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D, + (__v32hi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expand_epi16(__mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D, + (__v32hi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D, + (__v64qi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D, + (__v64qi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_epi16(__m512i __S, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P, + (__v32hi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P, + (__v32hi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P, + (__v64qi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P, + (__v64qi) _mm512_setzero_si512(), + __U); +} + +#define _mm512_shldi_epi64(A, B, I) \ + (__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(I)) + +#define _mm512_mask_shldi_epi64(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shldi_epi64((A), (B), (I)), \ + (__v8di)(__m512i)(S)) + +#define _mm512_maskz_shldi_epi64(U, A, B, I) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shldi_epi64((A), (B), (I)), \ + (__v8di)_mm512_setzero_si512()) + +#define _mm512_shldi_epi32(A, B, I) \ + (__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(I)) + +#define _mm512_mask_shldi_epi32(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shldi_epi32((A), (B), (I)), \ + (__v16si)(__m512i)(S)) + +#define _mm512_maskz_shldi_epi32(U, A, B, I) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shldi_epi32((A), (B), (I)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_shldi_epi16(A, B, I) \ + (__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \ + (__v32hi)(__m512i)(B), (int)(I)) + +#define _mm512_mask_shldi_epi16(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \ + (__v32hi)(__m512i)(S)) + +#define _mm512_maskz_shldi_epi16(U, A, B, I) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \ + (__v32hi)_mm512_setzero_si512()) + +#define _mm512_shrdi_epi64(A, B, I) \ + (__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(I)) + +#define _mm512_mask_shrdi_epi64(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \ + (__v8di)(__m512i)(S)) + +#define _mm512_maskz_shrdi_epi64(U, A, B, I) \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \ + (__v8di)_mm512_setzero_si512()) + +#define _mm512_shrdi_epi32(A, B, I) \ + (__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(I)) + +#define _mm512_mask_shrdi_epi32(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \ + (__v16si)(__m512i)(S)) + +#define _mm512_maskz_shrdi_epi32(U, A, B, I) \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \ + (__v16si)_mm512_setzero_si512()) + +#define _mm512_shrdi_epi16(A, B, I) \ + (__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \ + (__v32hi)(__m512i)(B), (int)(I)) + +#define _mm512_mask_shrdi_epi16(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \ + (__v32hi)(__m512i)(S)) + +#define _mm512_maskz_shrdi_epi16(U, A, B, I) \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \ + (__v32hi)_mm512_setzero_si512()) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshldvq512((__v8di)__A, (__v8di)__B, + (__v8di)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shldv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shldv_epi64(__A, __B, __C), + (__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shldv_epi64(__A, __B, __C), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shldv_epi32(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshldvd512((__v16si)__A, (__v16si)__B, + (__v16si)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shldv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shldv_epi32(__A, __B, __C), + (__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shldv_epi32(__A, __B, __C), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shldv_epi16(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshldvw512((__v32hi)__A, (__v32hi)__B, + (__v32hi)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shldv_epi16(__m512i __A, __mmask32 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shldv_epi16(__A, __B, __C), + (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shldv_epi16(__A, __B, __C), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shrdv_epi64(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshrdvq512((__v8di)__A, (__v8di)__B, + (__v8di)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shrdv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shrdv_epi64(__A, __B, __C), + (__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shrdv_epi64(__A, __B, __C), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shrdv_epi32(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshrdvd512((__v16si)__A, (__v16si)__B, + (__v16si)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shrdv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shrdv_epi32(__A, __B, __C), + (__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shrdv_epi32(__A, __B, __C), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shrdv_epi16(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshrdvw512((__v32hi)__A, (__v32hi)__B, + (__v32hi)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shrdv_epi16(__m512i __A, __mmask32 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shrdv_epi16(__A, __B, __C), + (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shrdv_epi16(__A, __B, __C), + (__v32hi)_mm512_setzero_si512()); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif + diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmiintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmiintrin.h new file mode 100644 index 0000000..c0e0f94 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmiintrin.h @@ -0,0 +1,105 @@ +/*===------------- avx512vbmiintrin.h - VBMI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VBMIINTRIN_H +#define __VBMIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi"), __min_vector_width__(512))) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2varqi512((__v64qi)__A, (__v64qi)__I, + (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512(__U, + (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), + (__v64qi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512(__U, + (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), + (__v64qi)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512(__U, + (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutexvar_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_permvarqi512((__v64qi) __B, (__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_permutexvar_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_permutexvar_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_vpmultishiftqb512((__v64qi)__X, (__v64qi) __Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y), + (__v64qi)_mm512_setzero_si512()); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmivlintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmivlintrin.h new file mode 100644 index 0000000..c5b96ae --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vbmivlintrin.h @@ -0,0 +1,188 @@ +/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VBMIVLINTRIN_H +#define __VBMIVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"), __min_vector_width__(256))) + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpermi2varqi128((__v16qi)__A, + (__v16qi)__I, + (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128(__U, + (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), + (__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128(__U, + (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), + (__v16qi)__I); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128(__U, + (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpermi2varqi256((__v32qi)__A, (__v32qi)__I, + (__v32qi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256(__U, + (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), + (__v32qi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256(__U, + (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), + (__v32qi)__I); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256(__U, + (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutexvar_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_permvarqi128((__v16qi)__B, (__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_permutexvar_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_permutexvar_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_permvarqi256((__v32qi) __B, (__v32qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_permutexvar_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_permutexvar_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_vpmultishiftqb128((__v16qi)__X, (__v16qi)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X, + __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_multishift_epi64_epi8(__X, __Y), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_multishift_epi64_epi8(__X, __Y), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_vpmultishiftqb256((__v32qi)__X, (__v32qi)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_multishift_epi64_epi8(__X, __Y), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_multishift_epi64_epi8(__X, __Y), + (__v32qi)_mm256_setzero_si256()); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbf16intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbf16intrin.h new file mode 100644 index 0000000..1b1a744 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbf16intrin.h @@ -0,0 +1,474 @@ +/*===--------- avx512vlbf16intrin.h - AVX512_BF16 intrinsics ---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLBF16INTRIN_H +#define __AVX512VLBF16INTRIN_H + +typedef short __m128bh __attribute__((__vector_size__(16), __aligned__(16))); + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl, avx512bf16"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl, avx512bf16"), __min_vector_width__(256))) + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __B +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __B, and higher 64 bits come from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_cvtne2ps_pbh(__m128 __A, __m128 __B) { + return (__m128bh)__builtin_ia32_cvtne2ps2bf16_128((__v4sf) __A, + (__v4sf) __B); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __B +/// A 128-bit vector of [4 x float]. +/// \param __W +/// A 128-bit vector of [8 x bfloat]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element from __W. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __B, and higher 64 bits come from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtne2ps_pbh(__A, __B), + (__v8hi)__W); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __B +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element is zero. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __B, and higher 64 bits come from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtne2ps_pbh(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __B +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from +/// conversion of __B, and higher 128 bits come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS256 +_mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) { + return (__m256bh)__builtin_ia32_cvtne2ps2bf16_256((__v8sf) __A, + (__v8sf) __B); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __B +/// A 256-bit vector of [8 x float]. +/// \param __W +/// A 256-bit vector of [16 x bfloat]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element from __W. +/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from +/// conversion of __B, and higher 128 bits come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) { + return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtne2ps_pbh(__A, __B), + (__v16hi)__W); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __B +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element is zero. +/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from +/// conversion of __B, and higher 128 bits come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) { + return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtne2ps_pbh(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __A, and higher 64 bits are 0. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_cvtneps_pbh(__m128 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8)-1); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __W +/// A 128-bit vector of [8 x bfloat]. +/// \param __U +/// A 4-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element from __W. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __A, and higher 64 bits are 0. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, + (__v8hi)__W, + (__mmask8)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 4-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element is zero. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __A, and higher 64 bits are 0. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, + (__v8hi)_mm_setzero_si128(), + (__mmask8)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_cvtneps_pbh(__m256 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, + (__v8hi)_mm_undefined_si128(), + (__mmask8)-1); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __W +/// A 256-bit vector of [8 x bfloat]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element from __W. +/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, + (__v8hi)__W, + (__mmask8)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element is zero. +/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, + (__v8hi)_mm_setzero_si128(), + (__mmask8)__U); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \param __B +/// A 128-bit vector of [8 x bfloat]. +/// \param __D +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) { + return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D, + (__v4si)__A, + (__v4si)__B); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \param __B +/// A 128-bit vector of [8 x bfloat]. +/// \param __D +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D. +/// \returns A 128-bit vector of [4 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_dpbf16_ps(__m128 __D, __mmask8 __U, __m128bh __A, __m128bh __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_dpbf16_ps(__D, __A, __B), + (__v4sf)__D); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \param __B +/// A 128-bit vector of [8 x bfloat]. +/// \param __D +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0. +/// \returns A 128-bit vector of [4 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_dpbf16_ps(__D, __A, __B), + (__v4sf)_mm_setzero_si128()); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \param __B +/// A 256-bit vector of [16 x bfloat]. +/// \param __D +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) { + return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D, + (__v8si)__A, + (__v8si)__B); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \param __B +/// A 256-bit vector of [16 x bfloat]. +/// \param __D +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D. +/// \returns A 256-bit vector of [8 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_dpbf16_ps(__m256 __D, __mmask8 __U, __m256bh __A, __m256bh __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_dpbf16_ps(__D, __A, __B), + (__v8sf)__D); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \param __B +/// A 256-bit vector of [16 x bfloat]. +/// \param __D +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0. +/// \returns A 256-bit vector of [8 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_dpbf16_ps(__D, __A, __B), + (__v8sf)_mm256_setzero_si256()); +} + +/// Convert One Single float Data to One BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A float data. +/// \returns A bf16 data whose sign field and exponent field keep unchanged, +/// and fraction field is truncated to 7 bits. +static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) { + __v4sf __V = {__A, 0, 0, 0}; + __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask( + (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); + return __R[0]; +} + +/// Convert Packed BF16 Data to Packed float Data. +/// +/// \headerfile +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \returns A 256-bit vector of [8 x float] come from convertion of __A +static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) { + return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32( + (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using zeroing mask. +/// +/// \headerfile +/// +/// \param __U +/// A 8-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \returns A 256-bit vector of [8 x float] come from convertion of __A +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { + return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32( + (__m256i)_mm256_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using merging mask. +/// +/// \headerfile +/// +/// \param __S +/// A 256-bit vector of [8 x float]. Elements are copied from __S when +/// the corresponding mask bit is not set. +/// \param __U +/// A 8-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \returns A 256-bit vector of [8 x float] come from convertion of __A +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) { + return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32( + (__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), + 16)); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbitalgintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbitalgintrin.h new file mode 100644 index 0000000..5154eae --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbitalgintrin.h @@ -0,0 +1,145 @@ +/*===---- avx512vlbitalgintrin.h - BITALG intrinsics -----------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLBITALGINTRIN_H +#define __AVX512VLBITALGINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg"), __min_vector_width__(256))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi16(__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcntw_256((__v16hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_selectw_256((__mmask16) __U, + (__v16hi) _mm256_popcnt_epi16(__B), + (__v16hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B) +{ + return _mm256_mask_popcnt_epi16((__m256i) _mm256_setzero_si256(), + __U, + __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcntw_128((__v8hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B) +{ + return (__m128i) __builtin_ia32_selectw_128((__mmask8) __U, + (__v8hi) _mm_popcnt_epi16(__B), + (__v8hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B) +{ + return _mm_mask_popcnt_epi16((__m128i) _mm_setzero_si128(), + __U, + __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi8(__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcntb_256((__v32qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_selectb_256((__mmask32) __U, + (__v32qi) _mm256_popcnt_epi8(__B), + (__v32qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B) +{ + return _mm256_mask_popcnt_epi8((__m256i) _mm256_setzero_si256(), + __U, + __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcntb_128((__v16qi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B) +{ + return (__m128i) __builtin_ia32_selectb_128((__mmask16) __U, + (__v16qi) _mm_popcnt_epi8(__B), + (__v16qi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B) +{ + return _mm_mask_popcnt_epi8((__m128i) _mm_setzero_si128(), + __U, + __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_mask_bitshuffle_epi64_mask(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask((__v32qi) __A, + (__v32qi) __B, + __U); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B) +{ + return _mm256_mask_bitshuffle_epi64_mask((__mmask32) -1, + __A, + __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_mask_bitshuffle_epi64_mask(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask((__v16qi) __A, + (__v16qi) __B, + __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B) +{ + return _mm_mask_bitshuffle_epi64_mask((__mmask16) -1, + __A, + __B); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbwintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbwintrin.h new file mode 100644 index 0000000..6ed10ed --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlbwintrin.h @@ -0,0 +1,2809 @@ +/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLBWINTRIN_H +#define __AVX512VLBWINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(256))) + +/* Integer compare */ + +#define _mm_cmp_epi8_mask(a, b, p) \ + (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm_mask_cmp_epi8_mask(m, a, b, p) \ + (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)(m)) + +#define _mm_cmp_epu8_mask(a, b, p) \ + (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm_mask_cmp_epu8_mask(m, a, b, p) \ + (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)(m)) + +#define _mm256_cmp_epi8_mask(a, b, p) \ + (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)-1) + +#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \ + (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)(m)) + +#define _mm256_cmp_epu8_mask(a, b, p) \ + (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)-1) + +#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \ + (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)(m)) + +#define _mm_cmp_epi16_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_epi16_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm_cmp_epu16_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_epu16_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_epi16_mask(a, b, p) \ + (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \ + (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)(m)) + +#define _mm256_cmp_epu16_mask(a, b, p) \ + (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)-1) + +#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \ + (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)(m)) + +#define _mm_cmpeq_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_add_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_add_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_add_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_add_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_sub_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_sub_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sub_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sub_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_add_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_add_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_add_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_add_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_sub_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_sub_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sub_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sub_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mullo_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mullo_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mullo_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mullo_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __W, + (__v16qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __W, + (__v32qi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __W, + (__v8hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __W, + (__v16hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_abs_epi8(__A), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_abs_epi8(__A), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_abs_epi8(__A), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_abs_epi8(__A), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_abs_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_abs_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_abs_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_abs_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packs_epi32(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packs_epi32(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packs_epi32(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packs_epi32(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packs_epi16(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packs_epi16(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packs_epi16(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packs_epi16(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packus_epi32(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packus_epi32(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packus_epi32(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packus_epi32(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packus_epi16(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packus_epi16(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packus_epi16(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packus_epi16(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_avg_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_avg_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_avg_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_avg_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_avg_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_avg_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_avg_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_avg_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_shuffle_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_shuffle_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_shuffle_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_shuffle_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I, + (__v8hi) __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), + (__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), + (__v8hi)__I); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I, + (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), + (__v16hi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), + (__v16hi)__I); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_maddubs_epi16(__X, __Y), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_maddubs_epi16(__X, __Y), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X, + __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_maddubs_epi16(__X, __Y), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_maddubs_epi16(__X, __Y), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_madd_epi16(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_madd_epi16(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_madd_epi16(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_madd_epi16(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi16_epi8 (__m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi16_epi8 (__m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi16_epi8 (__m128i __A) { + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v8hi)__A, __v8qi), + (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi8 (__m256i __A) { + return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm256_cvtepi16_epi8(__A), + (__v16qi)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm256_cvtepi16_epi8(__A), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhrs_epi16(__X, __Y), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhrs_epi16(__X, __Y), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhrs_epi16(__X, __Y), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhrs_epi16(__X, __Y), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpackhi_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpackhi_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpackhi_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpackhi_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpackhi_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpackhi_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpackhi_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpackhi_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpacklo_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpacklo_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpacklo_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpacklo_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpacklo_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpacklo_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpacklo_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpacklo_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepi8_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepi8_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepi8_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepi8_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepu8_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepu8_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepu8_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepu8_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + + +#define _mm_mask_shufflehi_epi16(W, U, A, imm) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflehi_epi16((A), (imm)), \ + (__v8hi)(__m128i)(W)) + +#define _mm_maskz_shufflehi_epi16(U, A, imm) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflehi_epi16((A), (imm)), \ + (__v8hi)_mm_setzero_si128()) + +#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \ + (__v16hi)(__m256i)(W)) + +#define _mm256_maskz_shufflehi_epi16(U, A, imm) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \ + (__v16hi)_mm256_setzero_si256()) + +#define _mm_mask_shufflelo_epi16(W, U, A, imm) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflelo_epi16((A), (imm)), \ + (__v8hi)(__m128i)(W)) + +#define _mm_maskz_shufflelo_epi16(U, A, imm) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflelo_epi16((A), (imm)), \ + (__v8hi)_mm_setzero_si128()) + +#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflelo_epi16((A), \ + (imm)), \ + (__v16hi)(__m256i)(W)) + +#define _mm256_maskz_shufflelo_epi16(U, A, imm) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflelo_epi16((A), \ + (imm)), \ + (__v16hi)_mm256_setzero_si256()) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sllv_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sllv_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sllv_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sllv_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sll_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sll_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sll_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sll_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_slli_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_slli_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, + unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_slli_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_slli_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srlv_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srlv_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srlv_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srlv_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srav_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srav_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srav_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srav_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sra_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sra_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sra_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sra_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srai_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srai_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, + unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srai_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srai_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srl_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srl_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srl_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srl_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srli_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srli_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srli_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srli_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __A, + (__v8hi) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __A, + (__v8hi) _mm_setzero_si128 ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __A, + (__v16hi) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __A, + (__v16hi) _mm256_setzero_si256 ()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __A, + (__v16qi) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __A, + (__v16qi) _mm_setzero_si128 ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __A, + (__v32qi) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __A, + (__v32qi) _mm256_setzero_si256 ()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_selectb_128(__M, + (__v16qi) _mm_set1_epi8(__A), + (__v16qi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi8 (__mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_selectb_128(__M, + (__v16qi) _mm_set1_epi8(__A), + (__v16qi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_set1_epi8(__A), + (__v32qi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi8 (__mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_set1_epi8(__A), + (__v32qi) _mm256_setzero_si256()); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi16 (void const *__P) +{ + struct __loadu_epi16 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi16*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi16 (void const *__P) +{ + struct __loadu_epi16 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi16*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi8 (void const *__P) +{ + struct __loadu_epi8 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi8*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P, + (__v16qi) __W, + (__mmask16) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi8 (void const *__P) +{ + struct __loadu_epi8 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi8*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P, + (__v32qi) __W, + (__mmask32) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi16 (void *__P, __m128i __A) +{ + struct __storeu_epi16 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi16*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedquhi128_mask ((__v8hi *) __P, + (__v8hi) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi16 (void *__P, __m256i __A) +{ + struct __storeu_epi16 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi16*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A) +{ + __builtin_ia32_storedquhi256_mask ((__v16hi *) __P, + (__v16hi) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi8 (void *__P, __m128i __A) +{ + struct __storeu_epi8 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi8*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A) +{ + __builtin_ia32_storedquqi128_mask ((__v16qi *) __P, + (__v16qi) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi8 (void *__P, __m256i __A) +{ + struct __storeu_epi8 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi8*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A) +{ + __builtin_ia32_storedquqi256_mask ((__v32qi *) __P, + (__v32qi) __A, + (__mmask32) __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_test_epi8_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_test_epi8_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi8_mask (__U, _mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_test_epi16_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi16_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_test_epi16_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi16_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256 ()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi16_mask (__U, _mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_testn_epi8_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi8_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi8_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi8_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi8_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_testn_epi16_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi16_mask (__U, _mm_and_si128(__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi16_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi16_mask (_mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi16_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_movepi8_mask (__m128i __A) +{ + return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_movepi8_mask (__m256i __A) +{ + return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_movepi16_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_movepi16_mask (__m256i __A) +{ + return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi8 (__mmask16 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2b128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi8 (__mmask32 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2b256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi16 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2w128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi16 (__mmask16 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2w256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128(__M, + (__v16qi) _mm_broadcastb_epi8(__A), + (__v16qi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128(__M, + (__v16qi) _mm_broadcastb_epi8(__A), + (__v16qi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_broadcastb_epi8(__A), + (__v32qi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_broadcastb_epi8(__A), + (__v32qi) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128(__M, + (__v8hi) _mm_broadcastw_epi16(__A), + (__v8hi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128(__M, + (__v8hi) _mm_broadcastw_epi16(__A), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256(__M, + (__v16hi) _mm256_broadcastw_epi16(__A), + (__v16hi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256(__M, + (__v16hi) _mm256_broadcastw_epi16(__A), + (__v16hi) _mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_selectw_256 (__M, + (__v16hi) _mm256_set1_epi16(__A), + (__v16hi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi16 (__mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_selectw_256(__M, + (__v16hi)_mm256_set1_epi16(__A), + (__v16hi) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_selectw_128(__M, + (__v8hi) _mm_set1_epi16(__A), + (__v8hi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi16 (__mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_selectw_128(__M, + (__v8hi) _mm_set1_epi16(__A), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutexvar_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_permvarhi128((__v8hi) __B, (__v8hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_permutexvar_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_permutexvar_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_permvarhi256((__v16hi) __B, (__v16hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_permutexvar_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_permutexvar_epi16(__A, __B), + (__v16hi)__W); +} + +#define _mm_mask_alignr_epi8(W, U, A, B, N) \ + (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \ + (__v16qi)(__m128i)(W)) + +#define _mm_maskz_alignr_epi8(U, A, B, N) \ + (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \ + (__v16qi)_mm_setzero_si128()) + +#define _mm256_mask_alignr_epi8(W, U, A, B, N) \ + (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \ + (__v32qi)(__m256i)(W)) + +#define _mm256_maskz_alignr_epi8(U, A, B, N) \ + (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \ + (__v32qi)_mm256_setzero_si256()) + +#define _mm_dbsad_epu8(A, B, imm) \ + (__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(imm)) + +#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \ + (__v8hi)(__m128i)(W)) + +#define _mm_maskz_dbsad_epu8(U, A, B, imm) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \ + (__v8hi)_mm_setzero_si128()) + +#define _mm256_dbsad_epu8(A, B, imm) \ + (__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), (int)(imm)) + +#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \ + (__v16hi)(__m256i)(W)) + +#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \ + (__v16hi)_mm256_setzero_si256()) + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __AVX512VLBWINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlcdintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlcdintrin.h new file mode 100644 index 0000000..cc8b725 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlcdintrin.h @@ -0,0 +1,225 @@ +/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLCDINTRIN_H +#define __AVX512VLCDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"), __min_vector_width__(256))) + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m128i) _mm_set1_epi64x((long long) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m256i) _mm256_set1_epi64x((long long)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m128i) _mm_set1_epi32((int)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m256i) _mm256_set1_epi32((int)__A); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_conflict_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128 ((__v2di) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_conflict_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_conflict_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_conflict_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256 ((__v4di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_conflict_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_conflict_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_conflict_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128 ((__v4si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_conflict_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_conflict_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_conflict_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256 ((__v8si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_conflict_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_conflict_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_lzcnt_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128 ((__v4si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_lzcnt_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_lzcnt_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_lzcnt_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256 ((__v8si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_lzcnt_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_lzcnt_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_lzcnt_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128 ((__v2di) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_lzcnt_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_lzcnt_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_lzcnt_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256 ((__v4di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_lzcnt_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_lzcnt_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __AVX512VLCDINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vldqintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vldqintrin.h new file mode 100644 index 0000000..95ba574 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vldqintrin.h @@ -0,0 +1,1167 @@ +/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLDQINTRIN_H +#define __AVX512VLDQINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"), __min_vector_width__(256))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi64 (__m256i __A, __m256i __B) { + return (__m256i) ((__v4du) __A * (__v4du) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mullo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_mullo_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mullo_epi64(__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_mullo_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mullo_epi64 (__m128i __A, __m128i __B) { + return (__m128i) ((__v2du) __A * (__v2du) __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mullo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_mullo_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mullo_epi64(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_mullo_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_andnot_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_andnot_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_andnot_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_andnot_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_andnot_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_andnot_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_andnot_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_andnot_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_and_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_and_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_and_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_and_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_and_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_and_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_and_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_and_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_and_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_and_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_and_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_and_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_and_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_and_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_xor_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_xor_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_xor_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_xor_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_xor_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_xor_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_xor_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_xor_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_xor_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_xor_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_or_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_or_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_or_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_or_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_or_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_or_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_or_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_or_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_or_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_or_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_or_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_or_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_or_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_or_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtpd_epi64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtpd_epi64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtpd_epu64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtpd_epu64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtps_epi64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtps_epi64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtps_epu64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtps_epu64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_pd (__m128i __A) { + return (__m128d)__builtin_convertvector((__v2di)__A, __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepi64_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepi64_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_pd (__m256i __A) { + return (__m256d)__builtin_convertvector((__v4di)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepi64_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepi64_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_ps (__m256i __A) { + return (__m128)__builtin_convertvector((__v4di)__A, __v4sf); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepi64_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepi64_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttpd_epi64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttpd_epi64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttpd_epu64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttpd_epu64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttps_epi64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttps_epi64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttps_epu64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttps_epu64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtepu64_pd (__m128i __A) { + return (__m128d)__builtin_convertvector((__v2du)__A, __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepu64_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepu64_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_cvtepu64_pd (__m256i __A) { + return (__m256d)__builtin_convertvector((__v4du)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepu64_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepu64_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtepu64_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_cvtepu64_ps (__m256i __A) { + return (__m128)__builtin_convertvector((__v4du)__A, __v4sf); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepu64_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepu64_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +#define _mm_range_pd(A, B, C) \ + (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1) + +#define _mm_mask_range_pd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)) + +#define _mm_maskz_range_pd(U, A, B, C) \ + (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm256_range_pd(A, B, C) \ + (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1) + +#define _mm256_mask_range_pd(W, U, A, B, C) \ + (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_range_pd(U, A, B, C) \ + (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm_range_ps(A, B, C) \ + (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1) + +#define _mm_mask_range_ps(W, U, A, B, C) \ + (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)(__m128)(W), (__mmask8)(U)) + +#define _mm_maskz_range_ps(U, A, B, C) \ + (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm256_range_ps(A, B, C) \ + (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1) + +#define _mm256_mask_range_ps(W, U, A, B, C) \ + (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)(__m256)(W), (__mmask8)(U)) + +#define _mm256_maskz_range_ps(U, A, B, C) \ + (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm_reduce_pd(A, B) \ + (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1) + +#define _mm_mask_reduce_pd(W, U, A, B) \ + (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)) + +#define _mm_maskz_reduce_pd(U, A, B) \ + (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm256_reduce_pd(A, B) \ + (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1) + +#define _mm256_mask_reduce_pd(W, U, A, B) \ + (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_reduce_pd(U, A, B) \ + (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm_reduce_ps(A, B) \ + (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1) + +#define _mm_mask_reduce_ps(W, U, A, B) \ + (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)) + +#define _mm_maskz_reduce_ps(U, A, B) \ + (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm256_reduce_ps(A, B) \ + (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1) + +#define _mm256_mask_reduce_ps(W, U, A, B) \ + (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_reduce_ps(U, A, B) \ + (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)) + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_movepi32_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_movepi32_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi32 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2d128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi32 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2d256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi64 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2q128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi64 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2q256 (__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_movepi64_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_movepi64_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcast_f32x2 (__m128 __A) +{ + return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x2(__A), + (__v8sf)__O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x2(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_broadcast_f64x2(__m128d __A) +{ + return (__m256d)__builtin_shufflevector((__v2df)__A, (__v2df)__A, + 0, 1, 0, 1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M, + (__v4df)_mm256_broadcast_f64x2(__A), + (__v4df)__O); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M, + (__v4df)_mm256_broadcast_f64x2(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcast_i32x2 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 0, 1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_broadcast_i32x2(__A), + (__v4si)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_broadcast_i32x2(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcast_i32x2 (__m128i __A) +{ + return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x2(__A), + (__v8si)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x2(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcast_i64x2(__m128i __A) +{ + return (__m256i)__builtin_shufflevector((__v2di)__A, (__v2di)__A, + 0, 1, 0, 1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_broadcast_i64x2(__A), + (__v4di)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_broadcast_i64x2(__A), + (__v4di)_mm256_setzero_si256()); +} + +#define _mm256_extractf64x2_pd(A, imm) \ + (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1) + +#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \ + (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_extractf64x2_pd(U, A, imm) \ + (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm256_extracti64x2_epi64(A, imm) \ + (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \ + (int)(imm), \ + (__v2di)_mm_undefined_si128(), \ + (__mmask8)-1) + +#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \ + (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \ + (int)(imm), \ + (__v2di)(__m128i)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_extracti64x2_epi64(U, A, imm) \ + (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \ + (int)(imm), \ + (__v2di)_mm_setzero_si128(), \ + (__mmask8)(U)) + +#define _mm256_insertf64x2(A, B, imm) \ + (__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \ + (__v2df)(__m128d)(B), (int)(imm)) + +#define _mm256_mask_insertf64x2(W, U, A, B, imm) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_insertf64x2((A), (B), (imm)), \ + (__v4df)(__m256d)(W)) + +#define _mm256_maskz_insertf64x2(U, A, B, imm) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_insertf64x2((A), (B), (imm)), \ + (__v4df)_mm256_setzero_pd()) + +#define _mm256_inserti64x2(A, B, imm) \ + (__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \ + (__v2di)(__m128i)(B), (int)(imm)) + +#define _mm256_mask_inserti64x2(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_inserti64x2((A), (B), (imm)), \ + (__v4di)(__m256i)(W)) + +#define _mm256_maskz_inserti64x2(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_inserti64x2((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256()) + +#define _mm_mask_fpclass_pd_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_fpclass_pd_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)-1) + +#define _mm256_mask_fpclass_pd_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_fpclass_pd_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_fpclass_ps_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_fpclass_ps_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)-1) + +#define _mm256_mask_fpclass_ps_mask(U, A, imm) \ + (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_fpclass_ps_mask(A, imm) \ + (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__mmask8)-1) + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlintrin.h new file mode 100644 index 0000000..968c10e --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlintrin.h @@ -0,0 +1,8445 @@ +/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLINTRIN_H +#define __AVX512VLINTRIN_H + +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256))) + +typedef short __v2hi __attribute__((__vector_size__(4))); +typedef char __v4qi __attribute__((__vector_size__(4))); +typedef char __v2qi __attribute__((__vector_size__(2))); + +/* Integer compare */ + +#define _mm_cmpeq_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_add_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_add_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_add_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_add_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sub_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sub_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sub_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sub_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_add_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_add_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_add_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_add_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sub_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sub_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sub_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sub_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epi32(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epi32(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epi32(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epi32(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epu32(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epu32(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epu32(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epu32(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_mullo_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_mullo_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_mullo_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_mullo_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a & (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_and_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_and_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a & (__v4su)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_and_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_epi32(__m256i __A, __m256i __B) +{ + return (__m256i)(~(__v8su)__A & (__v8su)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_andnot_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(), + __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_andnot_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)(~(__v4su)__A & (__v4su)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_andnot_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a | (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_or_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_or_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a | (__v4su)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_or_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a ^ (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_xor_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_xor_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a ^ (__v4su)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_xor_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a & (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_and_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_and_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a & (__v2du)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_and_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_epi64(__m256i __A, __m256i __B) +{ + return (__m256i)(~(__v4du)__A & (__v4du)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_andnot_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(), + __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_andnot_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)(~(__v2du)__A & (__v2du)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_andnot_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a | (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_or_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_or_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a | (__v2du)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_or_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a ^ (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_xor_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_xor_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a ^ (__v2du)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_xor_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +#define _mm_cmp_epi32_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_epi32_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm_cmp_epu32_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_epu32_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_epi32_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_epu32_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm_cmp_epi64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_epi64_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm_cmp_epu64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_epu64_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_epi64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_epu64_mask(a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_ps_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm256_mask_cmp_ps_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm256_cmp_pd_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm256_mask_cmp_pd_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm_cmp_ps_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_ps_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (int)(p), \ + (__mmask8)(m)) + +#define _mm_cmp_pd_mask(a, b, p) \ + (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (int)(p), \ + (__mmask8)-1) + +#define _mm_mask_cmp_pd_mask(m, a, b, p) \ + (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (int)(p), \ + (__mmask8)(m)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd (-(__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd (-(__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd (-(__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 (-(__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 (-(__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 (-(__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps (-(__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps (-(__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps (-(__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 (-(__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 (-(__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 (-(__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + -(__v2df) __B, + (__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + -(__v4df) __B, + (__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + -(__v4sf) __B, + (__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + -(__v8sf) __B, + (__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + -(__v2df) __B, + -(__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + -(__v2df) __B, + -(__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + -(__v4df) __B, + -(__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + -(__v4df) __B, + -(__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + -(__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + -(__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + -(__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + -(__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) { + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __W, + (__v4si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) { + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __W, + (__v8si) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) { + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __W, + (__v2df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) { + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __W, + (__v4df) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) { + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __W, + (__v4sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) { + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __W, + (__v8sf) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) { + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __W, + (__v2di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) { + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __W, + (__v4di) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) { + __builtin_ia32_compressstoredf128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) { + __builtin_ia32_compressstoredf256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) { + __builtin_ia32_compressstoredi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) { + __builtin_ia32_compressstoredi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) { + __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) { + __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) { + __builtin_ia32_compressstoresi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) { + __builtin_ia32_compressstoresi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepi32_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_ps (__mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepi32_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepi32_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_ps (__mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepi32_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvtpd_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvtpd_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) { + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) { + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtpd_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtpd_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtpd_epu32 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtpd_epu32 (__m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtps_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtps_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtps_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtps_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtps_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtps_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtps_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtps_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtps_epu32 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtps_epu32 (__m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvttpd_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvttpd_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttpd_epu32 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvttpd_epu32 (__m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvttps_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvttps_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvttps_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvttps_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttps_epu32 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttps_epu32 (__m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtepu32_pd (__m128i __A) { + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_pd (__m128i __A) { + return (__m256d)__builtin_convertvector((__v4su)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtepu32_ps (__m128i __A) { + return (__m128)__builtin_convertvector((__v4su)__A, __v4sf); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepu32_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepu32_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_ps (__m256i __A) { + return (__m256)__builtin_convertvector((__v8su)__A, __v8sf); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepu32_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepu32_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) { + return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P, + (__v2df) __W, + (__mmask8) + __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { + return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) { + return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P, + (__v4df) __W, + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { + return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U, + void const *__P) { + return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { + return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) { + return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { + return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) + __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) { + return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { + return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U, + void const *__P) { + return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { + return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_getexp_pd (__m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_getexp_pd (__m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_getexp_ps (__m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_getexp_ps (__m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_max_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_max_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_max_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_max_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_max_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_max_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_max_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_max_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_min_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_min_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_min_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_min_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_min_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_min_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_min_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_min_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_abs_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_abs_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_abs_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_abs_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_abs_epi64 (__m128i __A) { + return (__m128i)__builtin_ia32_pabsq128((__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_abs_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_abs_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi64 (__m256i __A) { + return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_abs_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_abs_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_max_epi64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epu32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epu32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epu32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epu32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_max_epu64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epu64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epu64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epu64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epu64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_min_epi64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epu32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epu32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epu32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epu32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_min_epu64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epu64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epu64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epu64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epu64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_roundscale_pd(A, imm) \ + (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1) + + +#define _mm_mask_roundscale_pd(W, U, A, imm) \ + (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)) + + +#define _mm_maskz_roundscale_pd(U, A, imm) \ + (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)) + + +#define _mm256_roundscale_pd(A, imm) \ + (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1) + + +#define _mm256_mask_roundscale_pd(W, U, A, imm) \ + (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)) + + +#define _mm256_maskz_roundscale_pd(U, A, imm) \ + (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm_roundscale_ps(A, imm) \ + (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1) + + +#define _mm_mask_roundscale_ps(W, U, A, imm) \ + (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)) + + +#define _mm_maskz_roundscale_ps(U, A, imm) \ + (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm256_roundscale_ps(A, imm) \ + (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1) + +#define _mm256_mask_roundscale_ps(W, U, A, imm) \ + (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)) + + +#define _mm256_maskz_roundscale_ps(U, A, imm) \ + (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_scalef_pd (__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_scalef_pd (__m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_scalef_ps (__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_scalef_ps (__m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_i64scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_i64scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm256_i64scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_i64scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm_i64scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_i64scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm256_i64scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm256_i64scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm_i32scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_i32scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm256_i32scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_i32scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm_i32scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_i32scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm256_i32scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ + (int)(scale)) + +#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ + (int)(scale)) + +#define _mm256_i32scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + +#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sqrt_pd(__A), + (__v2df)__W); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sqrt_pd(__A), + (__v2df)_mm_setzero_pd()); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sqrt_pd(__A), + (__v4df)__W); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sqrt_pd(__A), + (__v4df)_mm256_setzero_pd()); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sqrt_ps(__A), + (__v4sf)__W); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sqrt_ps(__A), + (__v4sf)_mm_setzero_ps()); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sqrt_ps(__A), + (__v8sf)__W); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sqrt_ps(__A), + (__v8sf)_mm256_setzero_ps()); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)__W); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)_mm_setzero_pd()); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)__W); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)__W); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)__W); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) { + return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I, + (__v4si)__B); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_permutex2var_epi32(__A, __I, __B), + (__v4si)__A); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_permutex2var_epi32(__A, __I, __B), + (__v4si)__I); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_permutex2var_epi32(__A, __I, __B), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) { + return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I, + (__v8si) __B); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_permutex2var_epi32(__A, __I, __B), + (__v8si)__A); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, + __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_permutex2var_epi32(__A, __I, __B), + (__v8si)__I); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_permutex2var_epi32(__A, __I, __B), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) { + return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I, + (__v2df)__B); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128(__U, + (__v2df)_mm_permutex2var_pd(__A, __I, __B), + (__v2df)__A); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128(__U, + (__v2df)_mm_permutex2var_pd(__A, __I, __B), + (__v2df)(__m128d)__I); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128(__U, + (__v2df)_mm_permutex2var_pd(__A, __I, __B), + (__v2df)_mm_setzero_pd()); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) { + return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I, + (__v4df)__B); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, + __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256(__U, + (__v4df)_mm256_permutex2var_pd(__A, __I, __B), + (__v4df)__A); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, + __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256(__U, + (__v4df)_mm256_permutex2var_pd(__A, __I, __B), + (__v4df)(__m256d)__I); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, + __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256(__U, + (__v4df)_mm256_permutex2var_pd(__A, __I, __B), + (__v4df)_mm256_setzero_pd()); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) { + return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I, + (__v4sf)__B); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128(__U, + (__v4sf)_mm_permutex2var_ps(__A, __I, __B), + (__v4sf)__A); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128(__U, + (__v4sf)_mm_permutex2var_ps(__A, __I, __B), + (__v4sf)(__m128)__I); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128(__U, + (__v4sf)_mm_permutex2var_ps(__A, __I, __B), + (__v4sf)_mm_setzero_ps()); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) { + return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I, + (__v8sf) __B); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256(__U, + (__v8sf)_mm256_permutex2var_ps(__A, __I, __B), + (__v8sf)__A); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, + __m256 __B) { + return (__m256)__builtin_ia32_selectps_256(__U, + (__v8sf)_mm256_permutex2var_ps(__A, __I, __B), + (__v8sf)(__m256)__I); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, + __m256 __B) { + return (__m256)__builtin_ia32_selectps_256(__U, + (__v8sf)_mm256_permutex2var_ps(__A, __I, __B), + (__v8sf)_mm256_setzero_ps()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) { + return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I, + (__v2di)__B); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_permutex2var_epi64(__A, __I, __B), + (__v2di)__A); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_permutex2var_epi64(__A, __I, __B), + (__v2di)__I); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_permutex2var_epi64(__A, __I, __B), + (__v2di)_mm_setzero_si128()); + } + + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) { + return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I, + (__v4di) __B); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_permutex2var_epi64(__A, __I, __B), + (__v4di)__A); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, + __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_permutex2var_epi64(__A, __I, __B), + (__v4di)__I); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_permutex2var_epi64(__A, __I, __B), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi8_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi8_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi8_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi8_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi8_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi8_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi8_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi8_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi32_epi64(__X), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi32_epi64(__X), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi32_epi64(__X), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi32_epi64(__X), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi16_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi16_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi16_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi16_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi16_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi16_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi16_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi16_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu8_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu8_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu8_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu8_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu8_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu8_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu8_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu8_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu32_epi64(__X), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu32_epi64(__X), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu32_epi64(__X), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu32_epi64(__X), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu16_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu16_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu16_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu16_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu16_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu16_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu16_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu16_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + +#define _mm_rol_epi32(a, b) \ + (__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)) + +#define _mm_mask_rol_epi32(w, u, a, b) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_rol_epi32((a), (b)), \ + (__v4si)(__m128i)(w)) + +#define _mm_maskz_rol_epi32(u, a, b) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_rol_epi32((a), (b)), \ + (__v4si)_mm_setzero_si128()) + +#define _mm256_rol_epi32(a, b) \ + (__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)) + +#define _mm256_mask_rol_epi32(w, u, a, b) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_rol_epi32((a), (b)), \ + (__v8si)(__m256i)(w)) + +#define _mm256_maskz_rol_epi32(u, a, b) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_rol_epi32((a), (b)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_rol_epi64(a, b) \ + (__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)) + +#define _mm_mask_rol_epi64(w, u, a, b) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_rol_epi64((a), (b)), \ + (__v2di)(__m128i)(w)) + +#define _mm_maskz_rol_epi64(u, a, b) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_rol_epi64((a), (b)), \ + (__v2di)_mm_setzero_si128()) + +#define _mm256_rol_epi64(a, b) \ + (__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)) + +#define _mm256_mask_rol_epi64(w, u, a, b) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_rol_epi64((a), (b)), \ + (__v4di)(__m256i)(w)) + +#define _mm256_maskz_rol_epi64(u, a, b) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_rol_epi64((a), (b)), \ + (__v4di)_mm256_setzero_si256()) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rolv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rolv_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rolv_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rolv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rolv_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rolv_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rolv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rolv_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rolv_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rolv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rolv_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rolv_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_ror_epi32(a, b) \ + (__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)) + +#define _mm_mask_ror_epi32(w, u, a, b) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_ror_epi32((a), (b)), \ + (__v4si)(__m128i)(w)) + +#define _mm_maskz_ror_epi32(u, a, b) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_ror_epi32((a), (b)), \ + (__v4si)_mm_setzero_si128()) + +#define _mm256_ror_epi32(a, b) \ + (__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)) + +#define _mm256_mask_ror_epi32(w, u, a, b) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_ror_epi32((a), (b)), \ + (__v8si)(__m256i)(w)) + +#define _mm256_maskz_ror_epi32(u, a, b) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_ror_epi32((a), (b)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_ror_epi64(a, b) \ + (__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)) + +#define _mm_mask_ror_epi64(w, u, a, b) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_ror_epi64((a), (b)), \ + (__v2di)(__m128i)(w)) + +#define _mm_maskz_ror_epi64(u, a, b) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_ror_epi64((a), (b)), \ + (__v2di)_mm_setzero_si128()) + +#define _mm256_ror_epi64(a, b) \ + (__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)) + +#define _mm256_mask_ror_epi64(w, u, a, b) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_ror_epi64((a), (b)), \ + (__v4di)(__m256i)(w)) + +#define _mm256_maskz_ror_epi64(u, a, b) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_ror_epi64((a), (b)), \ + (__v4di)_mm256_setzero_si256()) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sll_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sll_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sll_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sll_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_slli_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_slli_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_slli_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_slli_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sll_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sll_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sll_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sll_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_slli_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_slli_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_slli_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_slli_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rorv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rorv_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rorv_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rorv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rorv_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rorv_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rorv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rorv_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rorv_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rorv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rorv_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rorv_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sllv_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sllv_epi64(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sllv_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sllv_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sllv_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sllv_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sllv_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sllv_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srlv_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srlv_epi64(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srlv_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srlv_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srlv_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srlv_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srlv_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srlv_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srl_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srl_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srl_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srl_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srli_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srli_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srli_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srli_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srl_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srl_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srl_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srl_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srli_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srli_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srli_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srli_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srav_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srav_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srav_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srav_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srav_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srav_epi64(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srav_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srav_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __A, + (__v4si) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __A, + (__v4si) _mm_setzero_si128 ()); +} + + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __A, + (__v8si) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __A, + (__v8si) _mm256_setzero_si256 ()); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_load_epi32 (void const *__P) +{ + return *(const __m128i *) __P; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_load_epi32 (void const *__P) +{ + return *(const __m256i *) __P; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_store_epi32 (void *__P, __m128i __A) +{ + *(__m128i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa32store128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_store_epi32 (void *__P, __m256i __A) +{ + *(__m256i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa32store256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __A, + (__v2di) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __A, + (__v2di) _mm_setzero_si128 ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __A, + (__v4di) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __A, + (__v4di) _mm256_setzero_si256 ()); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_load_epi64 (void const *__P) +{ + return *(const __m128i *) __P; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_load_epi64 (void const *__P) +{ + return *(const __m256i *) __P; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_store_epi64 (void *__P, __m128i __A) +{ + *(__m128i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa64store128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_store_epi64 (void *__P, __m256i __A) +{ + *(__m256i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa64store256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_movedup_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_movedup_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_movedup_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_movedup_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_set1_epi32(__A), + (__v4si)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi32( __mmask8 __M, int __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_set1_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_set1_epi32(__A), + (__v8si)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi32( __mmask8 __M, int __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_set1_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_selectq_128(__M, + (__v2di) _mm_set1_epi64x(__A), + (__v2di) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_selectq_128(__M, + (__v2di) _mm_set1_epi64x(__A), + (__v2di) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_selectq_256(__M, + (__v4di) _mm256_set1_epi64x(__A), + (__v4di) __O) ; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_selectq_256(__M, + (__v4di) _mm256_set1_epi64x(__A), + (__v4di) _mm256_setzero_si256()); +} + +#define _mm_fixupimm_pd(A, B, C, imm) \ + (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \ + (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \ + (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), \ + (int)(imm), (__mmask8)(U)) + +#define _mm256_fixupimm_pd(A, B, C, imm) \ + (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \ + (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \ + (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), \ + (int)(imm), (__mmask8)(U)) + +#define _mm_fixupimm_ps(A, B, C, imm) \ + (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \ + (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \ + (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_fixupimm_ps(A, B, C, imm) \ + (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \ + (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \ + (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi64 (void const *__P) +{ + struct __loadu_epi64 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi64*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi64 (void const *__P) +{ + struct __loadu_epi64 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi64*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi32 (void const *__P) +{ + struct __loadu_epi32 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi32*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi32 (void const *__P) +{ + struct __loadu_epi32 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi32*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeapd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeapd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeaps128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeaps256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi64 (void *__P, __m128i __A) +{ + struct __storeu_epi64 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi64*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqudi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi64 (void *__P, __m256i __A) +{ + struct __storeu_epi64 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi64*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqudi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi32 (void *__P, __m128i __A) +{ + struct __storeu_epi32 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi32*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqusi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi32 (void *__P, __m256i __A) +{ + struct __storeu_epi32 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi32*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqusi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeupd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeupd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeups128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeups256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpackhi_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpackhi_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpackhi_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpackhi_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpackhi_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpackhi_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpackhi_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpackhi_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpacklo_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpacklo_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpacklo_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpacklo_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpacklo_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpacklo_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpacklo_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpacklo_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rcp14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_rcp14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rcp14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_rcp14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_mask_permute_pd(W, U, X, C) \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_permute_pd((X), (C)), \ + (__v2df)(__m128d)(W)) + +#define _mm_maskz_permute_pd(U, X, C) \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_permute_pd((X), (C)), \ + (__v2df)_mm_setzero_pd()) + +#define _mm256_mask_permute_pd(W, U, X, C) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permute_pd((X), (C)), \ + (__v4df)(__m256d)(W)) + +#define _mm256_maskz_permute_pd(U, X, C) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permute_pd((X), (C)), \ + (__v4df)_mm256_setzero_pd()) + +#define _mm_mask_permute_ps(W, U, X, C) \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_permute_ps((X), (C)), \ + (__v4sf)(__m128)(W)) + +#define _mm_maskz_permute_ps(U, X, C) \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_permute_ps((X), (C)), \ + (__v4sf)_mm_setzero_ps()) + +#define _mm256_mask_permute_ps(W, U, X, C) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_permute_ps((X), (C)), \ + (__v8sf)(__m256)(W)) + +#define _mm256_maskz_permute_ps(U, X, C) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_permute_ps((X), (C)), \ + (__v8sf)_mm256_setzero_ps()) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_permutevar_pd(__A, __C), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_permutevar_pd(__A, __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutevar_pd(__A, __C), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutevar_pd(__A, __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_permutevar_ps(__A, __C), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_permutevar_ps(__A, __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutevar_ps(__A, __C), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutevar_ps(__A, __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_test_epi32_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi32_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_test_epi32_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi32_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi32_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_test_epi64_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi64_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_test_epi64_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi64_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi64_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_testn_epi32_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi32_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi32_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi32_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi32_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_testn_epi64_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi64_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi64_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi64_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi64_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpackhi_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpackhi_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpackhi_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpackhi_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpackhi_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpackhi_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpackhi_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpackhi_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpacklo_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpacklo_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpacklo_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpacklo_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpacklo_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpacklo_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpacklo_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpacklo_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sra_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sra_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sra_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sra_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srai_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srai_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srai_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srai_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sra_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_sra_epi64(__A, __B), \ + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_sra_epi64(__A, __B), \ + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi64(__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_sra_epi64(__A, __B), \ + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_sra_epi64(__A, __B), \ + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srai_epi64(__m128i __A, unsigned int __imm) +{ + return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_srai_epi64(__A, __imm), \ + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_srai_epi64(__A, __imm), \ + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi64(__m256i __A, unsigned int __imm) +{ + return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, + unsigned int __imm) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_srai_epi64(__A, __imm), \ + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_srai_epi64(__A, __imm), \ + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_ternarylogic_epi32(A, B, C, imm) \ + (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \ + (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \ + (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_ternarylogic_epi32(A, B, C, imm) \ + (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \ + (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \ + (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_ternarylogic_epi64(A, B, C, imm) \ + (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \ + (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \ + (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_ternarylogic_epi64(A, B, C, imm) \ + (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)-1) + +#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \ + (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + +#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \ + (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)) + + + +#define _mm256_shuffle_f32x4(A, B, imm) \ + (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(imm)) + +#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ + (__v8sf)(__m256)(W)) + +#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ + (__v8sf)_mm256_setzero_ps()) + +#define _mm256_shuffle_f64x2(A, B, imm) \ + (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(imm)) + +#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ + (__v4df)(__m256d)(W)) + +#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ + (__v4df)_mm256_setzero_pd()) + +#define _mm256_shuffle_i32x4(A, B, imm) \ + (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(imm)) + +#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ + (__v8si)(__m256i)(W)) + +#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm256_shuffle_i64x2(A, B, imm) \ + (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(imm)) + +#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ + (__v4di)(__m256i)(W)) + + +#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256()) + +#define _mm_mask_shuffle_pd(W, U, A, B, M) \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)(__m128d)(W)) + +#define _mm_maskz_shuffle_pd(U, A, B, M) \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)_mm_setzero_pd()) + +#define _mm256_mask_shuffle_pd(W, U, A, B, M) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)(__m256d)(W)) + +#define _mm256_maskz_shuffle_pd(U, A, B, M) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)_mm256_setzero_pd()) + +#define _mm_mask_shuffle_ps(W, U, A, B, M) \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)(__m128)(W)) + +#define _mm_maskz_shuffle_ps(U, A, B, M) \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)_mm_setzero_ps()) + +#define _mm256_mask_shuffle_ps(W, U, A, B, M) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)(__m256)(W)) + +#define _mm256_maskz_shuffle_ps(U, A, B, M) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)_mm256_setzero_ps()) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_rsqrt14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_rsqrt14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcast_f32x4(__m128 __A) +{ + return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x4(__A), + (__v8sf)__O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x4(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcast_i32x4(__m128i __A) +{ + return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x4(__A), + (__v8si)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x4(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256(__M, + (__v4df) _mm256_broadcastsd_pd(__A), + (__v4df) __O); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256(__M, + (__v4df) _mm256_broadcastsd_pd(__A), + (__v4df) _mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128(__M, + (__v4sf) _mm_broadcastss_ps(__A), + (__v4sf) __O); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128(__M, + (__v4sf) _mm_broadcastss_ps(__A), + (__v4sf) _mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256(__M, + (__v8sf) _mm256_broadcastss_ps(__A), + (__v8sf) __O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256(__M, + (__v8sf) _mm256_broadcastss_ps(__A), + (__v8sf) _mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_broadcastd_epi32(__A), + (__v4si) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_broadcastd_epi32(__A), + (__v4si) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_broadcastd_epi32(__A), + (__v8si) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_broadcastd_epi32(__A), + (__v8si) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di) _mm_broadcastq_epi64(__A), + (__v2di) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di) _mm_broadcastq_epi64(__A), + (__v2di) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di) _mm256_broadcastq_epi64(__A), + (__v4di) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di) _mm256_broadcastq_epi64(__A), + (__v4di) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)_mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi32_epi8 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi8 (__m256i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v8si)__A, __v8qi), + (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi32_epi16 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi16 (__m256i __A) +{ + return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_epi8 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_epi8 (__m256i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_epi32 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_epi32 (__m256i __A) +{ + return (__m128i)__builtin_convertvector((__v4di)__A, __v4si); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm256_cvtepi64_epi32(__A), + (__v4si)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm256_cvtepi64_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_epi16 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3, + 3, 3, 3, 3); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_epi16 (__m256i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +#define _mm256_extractf32x4_ps(A, imm) \ + (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ + (int)(imm), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1) + +#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \ + (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ + (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_extractf32x4_ps(U, A, imm) \ + (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ + (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm256_extracti32x4_epi32(A, imm) \ + (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ + (int)(imm), \ + (__v4si)_mm_undefined_si128(), \ + (__mmask8)-1) + +#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \ + (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ + (int)(imm), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \ + (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ + (int)(imm), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(U)) + +#define _mm256_insertf32x4(A, B, imm) \ + (__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \ + (__v4sf)(__m128)(B), (int)(imm)) + +#define _mm256_mask_insertf32x4(W, U, A, B, imm) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ + (__v8sf)(__m256)(W)) + +#define _mm256_maskz_insertf32x4(U, A, B, imm) \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ + (__v8sf)_mm256_setzero_ps()) + +#define _mm256_inserti32x4(A, B, imm) \ + (__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \ + (__v4si)(__m128i)(B), (int)(imm)) + +#define _mm256_mask_inserti32x4(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ + (__v8si)(__m256i)(W)) + +#define _mm256_maskz_inserti32x4(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_getmant_pd(A, B, C) \ + (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1) + +#define _mm_mask_getmant_pd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)) + +#define _mm_maskz_getmant_pd(U, A, B, C) \ + (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm256_getmant_pd(A, B, C) \ + (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1) + +#define _mm256_mask_getmant_pd(W, U, A, B, C) \ + (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_getmant_pd(U, A, B, C) \ + (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)) + +#define _mm_getmant_ps(A, B, C) \ + (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1) + +#define _mm_mask_getmant_ps(W, U, A, B, C) \ + (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)) + +#define _mm_maskz_getmant_ps(U, A, B, C) \ + (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm256_getmant_ps(A, B, C) \ + (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1) + +#define _mm256_mask_getmant_ps(W, U, A, B, C) \ + (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_getmant_ps(U, A, B, C) \ + (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)) + +#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ + (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ + (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ + (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ + (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ + (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ + (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ + (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ + (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ + (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ + (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ + (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ + (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ + (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ + (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ + (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ + (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)) + +#define _mm256_permutex_pd(X, C) \ + (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)) + +#define _mm256_mask_permutex_pd(W, U, X, C) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permutex_pd((X), (C)), \ + (__v4df)(__m256d)(W)) + +#define _mm256_maskz_permutex_pd(U, X, C) \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permutex_pd((X), (C)), \ + (__v4df)_mm256_setzero_pd()) + +#define _mm256_permutex_epi64(X, C) \ + (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)) + +#define _mm256_mask_permutex_epi64(W, U, X, C) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_permutex_epi64((X), (C)), \ + (__v4di)(__m256i)(W)) + +#define _mm256_maskz_permutex_epi64(U, X, C) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_permutex_epi64((X), (C)), \ + (__v4di)_mm256_setzero_si256()) + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_pd (__m256i __X, __m256d __Y) +{ + return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X, + __m256d __Y) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutexvar_pd(__X, __Y), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutexvar_pd(__X, __Y), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_permutexvar_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_permutexvar_epi64(__X, __Y), + (__v4di)__W); +} + +#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A)) + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutexvar_ps(__X, __Y), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutexvar_ps(__X, __Y), + (__v8sf)_mm256_setzero_ps()); +} + +#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_permutexvar_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_permutexvar_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +#define _mm_alignr_epi32(A, B, imm) \ + (__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(imm)) + +#define _mm_mask_alignr_epi32(W, U, A, B, imm) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ + (__v4si)(__m128i)(W)) + +#define _mm_maskz_alignr_epi32(U, A, B, imm) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ + (__v4si)_mm_setzero_si128()) + +#define _mm256_alignr_epi32(A, B, imm) \ + (__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(imm)) + +#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ + (__v8si)(__m256i)(W)) + +#define _mm256_maskz_alignr_epi32(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_alignr_epi64(A, B, imm) \ + (__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (int)(imm)) + +#define _mm_mask_alignr_epi64(W, U, A, B, imm) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ + (__v2di)(__m128i)(W)) + +#define _mm_maskz_alignr_epi64(U, A, B, imm) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ + (__v2di)_mm_setzero_si128()) + +#define _mm256_alignr_epi64(A, B, imm) \ + (__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(imm)) + +#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ + (__v4di)(__m256i)(W)) + +#define _mm256_maskz_alignr_epi64(U, A, B, imm) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256()) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_movehdup_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_movehdup_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_movehdup_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_movehdup_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_moveldup_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_moveldup_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_moveldup_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_moveldup_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +#define _mm256_mask_shuffle_epi32(W, U, A, I) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_epi32((A), (I)), \ + (__v8si)(__m256i)(W)) + +#define _mm256_maskz_shuffle_epi32(U, A, I) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_epi32((A), (I)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_mask_shuffle_epi32(W, U, A, I) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shuffle_epi32((A), (I)), \ + (__v4si)(__m128i)(W)) + +#define _mm_maskz_shuffle_epi32(U, A, I) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shuffle_epi32((A), (I)), \ + (__v4si)_mm_setzero_si128()) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __A, + (__v2df) __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __A, + (__v2df) _mm_setzero_pd ()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __A, + (__v4df) __W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __A, + (__v4df) _mm256_setzero_pd ()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __A, + (__v4sf) __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __A, + (__v4sf) _mm_setzero_ps ()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __A, + (__v8sf) __W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __A, + (__v8sf) _mm256_setzero_ps ()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_mask_cvt_roundps_ph(W, U, A, I) \ + (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U)) + +#define _mm_maskz_cvt_roundps_ph(U, A, I) \ + (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U)) + +#define _mm_mask_cvtps_ph _mm_mask_cvt_roundps_ph +#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph + +#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \ + (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U)) + +#define _mm256_maskz_cvt_roundps_ph(U, A, I) \ + (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U)) + +#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph +#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __AVX512VLINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvbmi2intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvbmi2intrin.h new file mode 100644 index 0000000..a40f926 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvbmi2intrin.h @@ -0,0 +1,689 @@ +/*===------------- avx512vlvbmi2intrin.h - VBMI2 intrinsics -----------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLVBMI2INTRIN_H +#define __AVX512VLVBMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D, + (__v8hi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi16(__mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D, + (__v8hi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D, + (__v16qi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi8(__mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D, + (__v16qi) _mm_setzero_si128(), + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D) +{ + __builtin_ia32_compressstorehi128_mask ((__v8hi *) __P, (__v8hi) __D, + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D) +{ + __builtin_ia32_compressstoreqi128_mask ((__v16qi *) __P, (__v16qi) __D, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D, + (__v8hi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi16(__mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D, + (__v8hi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D, + (__v16qi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi8(__mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D, + (__v16qi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P, + (__v8hi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi16(__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P, + (__v8hi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P, + (__v16qi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi8(__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P, + (__v16qi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D, + (__v16hi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D, + (__v16hi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D, + (__v32qi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D, + (__v32qi) _mm256_setzero_si256(), + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D) +{ + __builtin_ia32_compressstorehi256_mask ((__v16hi *) __P, (__v16hi) __D, + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D) +{ + __builtin_ia32_compressstoreqi256_mask ((__v32qi *) __P, (__v32qi) __D, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D, + (__v16hi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D, + (__v16hi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D, + (__v32qi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D, + (__v32qi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P, + (__v16hi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P, + (__v16hi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P, + (__v32qi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P, + (__v32qi) _mm256_setzero_si256(), + __U); +} + +#define _mm256_shldi_epi64(A, B, I) \ + (__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(I)) + +#define _mm256_mask_shldi_epi64(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shldi_epi64((A), (B), (I)), \ + (__v4di)(__m256i)(S)) + +#define _mm256_maskz_shldi_epi64(U, A, B, I) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shldi_epi64((A), (B), (I)), \ + (__v4di)_mm256_setzero_si256()) + +#define _mm_shldi_epi64(A, B, I) \ + (__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (int)(I)) + +#define _mm_mask_shldi_epi64(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shldi_epi64((A), (B), (I)), \ + (__v2di)(__m128i)(S)) + +#define _mm_maskz_shldi_epi64(U, A, B, I) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shldi_epi64((A), (B), (I)), \ + (__v2di)_mm_setzero_si128()) + +#define _mm256_shldi_epi32(A, B, I) \ + (__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(I)) + +#define _mm256_mask_shldi_epi32(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shldi_epi32((A), (B), (I)), \ + (__v8si)(__m256i)(S)) + +#define _mm256_maskz_shldi_epi32(U, A, B, I) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shldi_epi32((A), (B), (I)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_shldi_epi32(A, B, I) \ + (__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I)) + +#define _mm_mask_shldi_epi32(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shldi_epi32((A), (B), (I)), \ + (__v4si)(__m128i)(S)) + +#define _mm_maskz_shldi_epi32(U, A, B, I) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shldi_epi32((A), (B), (I)), \ + (__v4si)_mm_setzero_si128()) + +#define _mm256_shldi_epi16(A, B, I) \ + (__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \ + (__v16hi)(__m256i)(B), (int)(I)) + +#define _mm256_mask_shldi_epi16(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \ + (__v16hi)(__m256i)(S)) + +#define _mm256_maskz_shldi_epi16(U, A, B, I) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \ + (__v16hi)_mm256_setzero_si256()) + +#define _mm_shldi_epi16(A, B, I) \ + (__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (int)(I)) + +#define _mm_mask_shldi_epi16(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shldi_epi16((A), (B), (I)), \ + (__v8hi)(__m128i)(S)) + +#define _mm_maskz_shldi_epi16(U, A, B, I) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shldi_epi16((A), (B), (I)), \ + (__v8hi)_mm_setzero_si128()) + +#define _mm256_shrdi_epi64(A, B, I) \ + (__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(I)) + +#define _mm256_mask_shrdi_epi64(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \ + (__v4di)(__m256i)(S)) + +#define _mm256_maskz_shrdi_epi64(U, A, B, I) \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \ + (__v4di)_mm256_setzero_si256()) + +#define _mm_shrdi_epi64(A, B, I) \ + (__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (int)(I)) + +#define _mm_mask_shrdi_epi64(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shrdi_epi64((A), (B), (I)), \ + (__v2di)(__m128i)(S)) + +#define _mm_maskz_shrdi_epi64(U, A, B, I) \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shrdi_epi64((A), (B), (I)), \ + (__v2di)_mm_setzero_si128()) + +#define _mm256_shrdi_epi32(A, B, I) \ + (__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(I)) + +#define _mm256_mask_shrdi_epi32(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \ + (__v8si)(__m256i)(S)) + +#define _mm256_maskz_shrdi_epi32(U, A, B, I) \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \ + (__v8si)_mm256_setzero_si256()) + +#define _mm_shrdi_epi32(A, B, I) \ + (__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I)) + +#define _mm_mask_shrdi_epi32(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shrdi_epi32((A), (B), (I)), \ + (__v4si)(__m128i)(S)) + +#define _mm_maskz_shrdi_epi32(U, A, B, I) \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shrdi_epi32((A), (B), (I)), \ + (__v4si)_mm_setzero_si128()) + +#define _mm256_shrdi_epi16(A, B, I) \ + (__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \ + (__v16hi)(__m256i)(B), (int)(I)) + +#define _mm256_mask_shrdi_epi16(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \ + (__v16hi)(__m256i)(S)) + +#define _mm256_maskz_shrdi_epi16(U, A, B, I) \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \ + (__v16hi)_mm256_setzero_si256()) + +#define _mm_shrdi_epi16(A, B, I) \ + (__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (int)(I)) + +#define _mm_mask_shrdi_epi16(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \ + (__v8hi)(__m128i)(S)) + +#define _mm_maskz_shrdi_epi16(U, A, B, I) \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \ + (__v8hi)_mm_setzero_si128()) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshldvq256((__v4di)__A, (__v4di)__B, + (__v4di)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shldv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shldv_epi64(__A, __B, __C), + (__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shldv_epi64(__A, __B, __C), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshldvq128((__v2di)__A, (__v2di)__B, + (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shldv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shldv_epi64(__A, __B, __C), + (__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shldv_epi64(__A, __B, __C), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshldvd256((__v8si)__A, (__v8si)__B, + (__v8si)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shldv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shldv_epi32(__A, __B, __C), + (__v8si)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shldv_epi32(__A, __B, __C), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshldvd128((__v4si)__A, (__v4si)__B, + (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shldv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shldv_epi32(__A, __B, __C), + (__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shldv_epi32(__A, __B, __C), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshldvw256((__v16hi)__A, (__v16hi)__B, + (__v16hi)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shldv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shldv_epi16(__A, __B, __C), + (__v16hi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shldv_epi16(__A, __B, __C), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshldvw128((__v8hi)__A, (__v8hi)__B, + (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shldv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shldv_epi16(__A, __B, __C), + (__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shldv_epi16(__A, __B, __C), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshrdvq256((__v4di)__A, (__v4di)__B, + (__v4di)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shrdv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shrdv_epi64(__A, __B, __C), + (__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shrdv_epi64(__A, __B, __C), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshrdvq128((__v2di)__A, (__v2di)__B, + (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shrdv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shrdv_epi64(__A, __B, __C), + (__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shrdv_epi64(__A, __B, __C), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshrdvd256((__v8si)__A, (__v8si)__B, + (__v8si)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shrdv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shrdv_epi32(__A, __B, __C), + (__v8si)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shrdv_epi32(__A, __B, __C), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshrdvd128((__v4si)__A, (__v4si)__B, + (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shrdv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shrdv_epi32(__A, __B, __C), + (__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shrdv_epi32(__A, __B, __C), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshrdvw256((__v16hi)__A, (__v16hi)__B, + (__v16hi)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shrdv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shrdv_epi16(__A, __B, __C), + (__v16hi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shrdv_epi16(__A, __B, __C), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshrdvw128((__v8hi)__A, (__v8hi)__B, + (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shrdv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shrdv_epi16(__A, __B, __C), + (__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shrdv_epi16(__A, __B, __C), + (__v8hi)_mm_setzero_si128()); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvnniintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvnniintrin.h new file mode 100644 index 0000000..71ac1b4 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvnniintrin.h @@ -0,0 +1,304 @@ +/*===------------- avx512vlvnniintrin.h - VNNI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLVNNIINTRIN_H +#define __AVX512VLVNNIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(256))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +#define _mm256_dpbusd_epi32(S, A, B) \ + (__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +#define _mm256_dpbusds_epi32(S, A, B) \ + (__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +#define _mm256_dpwssd_epi32(S, A, B) \ + (__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +#define _mm256_dpwssds_epi32(S, A, B) \ + (__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +#define _mm_dpbusd_epi32(S, A, B) \ + (__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +#define _mm_dpbusds_epi32(S, A, B) \ + (__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +#define _mm_dpwssd_epi32(S, A, B) \ + (__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +#define _mm_dpwssds_epi32(S, A, B) \ + (__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusd_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpbusd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusd_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpbusds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusds_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpbusds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusds_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpwssd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssd_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpwssd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssd_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpwssds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssds_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpwssds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssds_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusd_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusd_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusds_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusds_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssd_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssd_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssds_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpwssds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssds_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvp2intersectintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvp2intersectintrin.h new file mode 100644 index 0000000..3e0815e --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vlvp2intersectintrin.h @@ -0,0 +1,121 @@ +/*===------ avx512vlvp2intersectintrin.h - VL VP2INTERSECT intrinsics ------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VLVP2INTERSECT_H +#define _AVX512VLVP2INTERSECT_H + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vp2intersect"), \ + __min_vector_width__(128))) + +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vp2intersect"), \ + __min_vector_width__(256))) +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between dwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_2intersect_epi32(__m256i __a, __m256i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_d_256((__v8si)__a, (__v8si)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between quadwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64]. +/// \param __b +/// A 256-bit vector of [4 x i64] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_2intersect_epi64(__m256i __a, __m256i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_q_256((__v4di)__a, (__v4di)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between dwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \param __b +/// A 128-bit vector of [4 x i32] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_2intersect_epi32(__m128i __a, __m128i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_d_128((__v4si)__a, (__v4si)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between quadwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. +/// \param __b +/// A 128-bit vector of [2 x i64] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_2intersect_epi64(__m128i __a, __m128i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_q_128((__v2di)__a, (__v2di)__b, __m0, __m1); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vnniintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vnniintrin.h new file mode 100644 index 0000000..9935a11 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vnniintrin.h @@ -0,0 +1,115 @@ +/*===------------- avx512vnniintrin.h - VNNI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VNNIINTRIN_H +#define __AVX512VNNIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vnni"), __min_vector_width__(512))) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpbusd_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpbusd512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpbusd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusd_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpbusd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusd_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpbusds_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpbusds512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpbusds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusds_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpbusds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusds_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpwssd_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpwssd512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpwssd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssd_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpwssd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssd_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpwssds_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpwssds512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpwssds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssds_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpwssds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssds_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vp2intersectintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vp2intersectintrin.h new file mode 100644 index 0000000..5d3cb48 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vp2intersectintrin.h @@ -0,0 +1,77 @@ +/*===------- avx512vpintersectintrin.h - VP2INTERSECT intrinsics ------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VP2INTERSECT_H +#define _AVX512VP2INTERSECT_H + +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vp2intersect"), \ + __min_vector_width__(512))) + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between dwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTD instruction. +/// +/// \param __a +/// A 512-bit vector of [16 x i32]. +/// \param __b +/// A 512-bit vector of [16 x i32] +/// \param __m0 +/// A pointer point to 16-bit mask +/// \param __m1 +/// A pointer point to 16-bit mask +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_2intersect_epi32(__m512i __a, __m512i __b, __mmask16 *__m0, __mmask16 *__m1) { + __builtin_ia32_vp2intersect_d_512((__v16si)__a, (__v16si)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between quadwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTQ instruction. +/// +/// \param __a +/// A 512-bit vector of [8 x i64]. +/// \param __b +/// A 512-bit vector of [8 x i64] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_2intersect_epi64(__m512i __a, __m512i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_q_512((__v8di)__a, (__v8di)__b, __m0, __m1); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vpopcntdqintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vpopcntdqintrin.h new file mode 100644 index 0000000..bb435e6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vpopcntdqintrin.h @@ -0,0 +1,54 @@ +/*===----- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics-------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifndef __AVX512VPOPCNTDQINTRIN_H +#define __AVX512VPOPCNTDQINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi64(__m512i __A) { + return (__m512i)__builtin_ia32_vpopcntq_512((__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) { + return (__m512i)__builtin_ia32_selectq_512( + (__mmask8)__U, (__v8di)_mm512_popcnt_epi64(__A), (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi64(__mmask8 __U, __m512i __A) { + return _mm512_mask_popcnt_epi64((__m512i)_mm512_setzero_si512(), __U, __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi32(__m512i __A) { + return (__m512i)__builtin_ia32_vpopcntd_512((__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) { + return (__m512i)__builtin_ia32_selectd_512( + (__mmask16)__U, (__v16si)_mm512_popcnt_epi32(__A), (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi32(__mmask16 __U, __m512i __A) { + return _mm512_mask_popcnt_epi32((__m512i)_mm512_setzero_si512(), __U, __A); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vpopcntdqvlintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vpopcntdqvlintrin.h new file mode 100644 index 0000000..a3cb9b6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avx512vpopcntdqvlintrin.h @@ -0,0 +1,91 @@ +/*===---- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics -------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifndef __AVX512VPOPCNTDQVLINTRIN_H +#define __AVX512VPOPCNTDQVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl"), __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi64(__m128i __A) { + return (__m128i)__builtin_ia32_vpopcntq_128((__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectq_128( + (__mmask8)__U, (__v2di)_mm_popcnt_epi64(__A), (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) { + return _mm_mask_popcnt_epi64((__m128i)_mm_setzero_si128(), __U, __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi32(__m128i __A) { + return (__m128i)__builtin_ia32_vpopcntd_128((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128( + (__mmask8)__U, (__v4si)_mm_popcnt_epi32(__A), (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) { + return _mm_mask_popcnt_epi32((__m128i)_mm_setzero_si128(), __U, __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi64(__m256i __A) { + return (__m256i)__builtin_ia32_vpopcntq_256((__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectq_256( + (__mmask8)__U, (__v4di)_mm256_popcnt_epi64(__A), (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) { + return _mm256_mask_popcnt_epi64((__m256i)_mm256_setzero_si256(), __U, __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi32(__m256i __A) { + return (__m256i)__builtin_ia32_vpopcntd_256((__v8si)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256( + (__mmask8)__U, (__v8si)_mm256_popcnt_epi32(__A), (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) { + return _mm256_mask_popcnt_epi32((__m256i)_mm256_setzero_si256(), __U, __A); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avxintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avxintrin.h new file mode 100644 index 0000000..382b621 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avxintrin.h @@ -0,0 +1,5053 @@ +/*===---- avxintrin.h - AVX intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVXINTRIN_H +#define __AVXINTRIN_H + +typedef double __v4df __attribute__ ((__vector_size__ (32))); +typedef float __v8sf __attribute__ ((__vector_size__ (32))); +typedef long long __v4di __attribute__ ((__vector_size__ (32))); +typedef int __v8si __attribute__ ((__vector_size__ (32))); +typedef short __v16hi __attribute__ ((__vector_size__ (32))); +typedef char __v32qi __attribute__ ((__vector_size__ (32))); + +/* Unsigned types */ +typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); +typedef unsigned int __v8su __attribute__ ((__vector_size__ (32))); +typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32))); +typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32))); + +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v32qs __attribute__((__vector_size__(32))); + +typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32))); +typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32))); +typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32))); + +typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1))); +typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1))); +typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128))) + +/* Arithmetic */ +/// Adds two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the sums of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_add_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a+(__v4df)__b); +} + +/// Adds two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the sums of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_add_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a+(__v8sf)__b); +} + +/// Subtracts two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the minuend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the differences between +/// both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_sub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a-(__v4df)__b); +} + +/// Subtracts two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the minuend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the differences between +/// both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_sub_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a-(__v8sf)__b); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the left source operand. +/// \param __b +/// A 256-bit vector of [4 x double] containing the right source operand. +/// \returns A 256-bit vector of [4 x double] containing the alternating sums +/// and differences between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_addsub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the left source operand. +/// \param __b +/// A 256-bit vector of [8 x float] containing the right source operand. +/// \returns A 256-bit vector of [8 x float] containing the alternating sums and +/// differences between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_addsub_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b); +} + +/// Divides two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the dividend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the divisor. +/// \returns A 256-bit vector of [4 x double] containing the quotients of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_div_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a/(__v4df)__b); +} + +/// Divides two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the dividend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the divisor. +/// \returns A 256-bit vector of [8 x float] containing the quotients of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_div_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a/(__v8sf)__b); +} + +/// Compares two 256-bit vectors of [4 x double] and returns the greater +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the maximum values +/// between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_max_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b); +} + +/// Compares two 256-bit vectors of [8 x float] and returns the greater +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the maximum values +/// between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_max_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b); +} + +/// Compares two 256-bit vectors of [4 x double] and returns the lesser +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the minimum values +/// between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_min_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b); +} + +/// Compares two 256-bit vectors of [8 x float] and returns the lesser +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the minimum values +/// between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_min_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b); +} + +/// Multiplies two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the products of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_mul_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a * (__v4df)__b); +} + +/// Multiplies two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the products of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_mul_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a * (__v8sf)__b); +} + +/// Calculates the square roots of the values in a 256-bit vector of +/// [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the square roots of the +/// values in the operand. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_sqrt_pd(__m256d __a) +{ + return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); +} + +/// Calculates the square roots of the values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the square roots of the +/// values in the operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_sqrt_ps(__m256 __a) +{ + return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); +} + +/// Calculates the reciprocal square roots of the values in a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the reciprocal square +/// roots of the values in the operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_rsqrt_ps(__m256 __a) +{ + return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a); +} + +/// Calculates the reciprocals of the values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the reciprocals of the +/// values in the operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_rcp_ps(__m256 __a) +{ + return (__m256)__builtin_ia32_rcpps256((__v8sf)__a); +} + +/// Rounds the values in a 256-bit vector of [4 x double] as specified +/// by the byte operand. The source values are rounded to integer values and +/// returned as 64-bit double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_round_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used. \n +/// 1: The PE field is not updated. \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M. \n +/// 1: Use the current MXCSR setting. \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest. \n +/// 01: Downward (toward negative infinity). \n +/// 10: Upward (toward positive infinity). \n +/// 11: Truncated. +/// \returns A 256-bit vector of [4 x double] containing the rounded values. +#define _mm256_round_pd(V, M) \ + (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)) + +/// Rounds the values stored in a 256-bit vector of [8 x float] as +/// specified by the byte operand. The source values are rounded to integer +/// values and returned as floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_round_ps(__m256 V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used. \n +/// 1: The PE field is not updated. \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M. \n +/// 1: Use the current MXCSR setting. \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest. \n +/// 01: Downward (toward negative infinity). \n +/// 10: Upward (toward positive infinity). \n +/// 11: Truncated. +/// \returns A 256-bit vector of [8 x float] containing the rounded values. +#define _mm256_round_ps(V, M) \ + (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)) + +/// Rounds up the values stored in a 256-bit vector of [4 x double]. The +/// source values are rounded up to integer values and returned as 64-bit +/// double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_ceil_pd(__m256d V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the rounded up values. +#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL) + +/// Rounds down the values stored in a 256-bit vector of [4 x double]. +/// The source values are rounded down to integer values and returned as +/// 64-bit double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_floor_pd(__m256d V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the rounded down +/// values. +#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR) + +/// Rounds up the values stored in a 256-bit vector of [8 x float]. The +/// source values are rounded up to integer values and returned as +/// floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_ceil_ps(__m256 V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the rounded up values. +#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL) + +/// Rounds down the values stored in a 256-bit vector of [8 x float]. The +/// source values are rounded down to integer values and returned as +/// floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_floor_ps(__m256 V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the rounded down values. +#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR) + +/* Logical */ +/// Performs a bitwise AND of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the +/// values between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_and_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4du)__a & (__v4du)__b); +} + +/// Performs a bitwise AND of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the +/// values between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_and_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8su)__a & (__v8su)__b); +} + +/// Performs a bitwise AND of two 256-bit vectors of [4 x double], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 256-bit vector of [4 x double] containing the right source operand. +/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the +/// values of the second operand and the one's complement of the first +/// operand. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_andnot_pd(__m256d __a, __m256d __b) +{ + return (__m256d)(~(__v4du)__a & (__v4du)__b); +} + +/// Performs a bitwise AND of two 256-bit vectors of [8 x float], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 256-bit vector of [8 x float] containing the right source operand. +/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the +/// values of the second operand and the one's complement of the first +/// operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_andnot_ps(__m256 __a, __m256 __b) +{ + return (__m256)(~(__v8su)__a & (__v8su)__b); +} + +/// Performs a bitwise OR of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the +/// values between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_or_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4du)__a | (__v4du)__b); +} + +/// Performs a bitwise OR of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the +/// values between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_or_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8su)__a | (__v8su)__b); +} + +/// Performs a bitwise XOR of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the +/// values between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_xor_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4du)__a ^ (__v4du)__b); +} + +/// Performs a bitwise XOR of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the +/// values between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_xor_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8su)__a ^ (__v8su)__b); +} + +/* Horizontal arithmetic */ +/// Horizontally adds the adjacent pairs of values contained in two +/// 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal sums of the values are returned in the even-indexed +/// elements of a vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal sums of the values are returned in the odd-indexed +/// elements of a vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of +/// both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_hadd_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in two +/// 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal sums of the values are returned in the elements with +/// index 0, 1, 4, 5 of a vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal sums of the values are returned in the elements with +/// index 2, 3, 6, 7 of a vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of +/// both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_hadd_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in two +/// 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// even-indexed elements of a vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// odd-indexed elements of a vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the horizontal +/// differences of both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_hsub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in two +/// 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// elements with index 0, 1, 4, 5 of a vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// elements with index 2, 3, 6, 7 of a vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the horizontal +/// differences of both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_hsub_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b); +} + +/* Vector permutations */ +/// Copies the values in a 128-bit vector of [2 x double] as specified +/// by the 128-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __c +/// A 128-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [65]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +static __inline __m128d __DEFAULT_FN_ATTRS128 +_mm_permutevar_pd(__m128d __a, __m128i __c) +{ + return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); +} + +/// Copies the values in a 256-bit vector of [4 x double] as specified +/// by the 256-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [65]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// Bit [129]: \n +/// 0: Bits [191:128] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// Bit [193]: \n +/// 0: Bits [191:128] of the source are copied to bits [255:192] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [255:192] of the +/// returned vector. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_permutevar_pd(__m256d __a, __m256i __c) +{ + return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c); +} + +/// Copies the values stored in a 128-bit vector of [4 x float] as +/// specified by the 128-bit integer vector operand. +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __c +/// A 128-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [33:32]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [65:64]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [97:96]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_permutevar_ps(__m128 __a, __m128i __c) +{ + return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); +} + +/// Copies the values stored in a 256-bit vector of [8 x float] as +/// specified by the 256-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __c +/// A 256-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [33:32]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [65:64]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [97:96]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// Bits [129:128]: \n +/// 00: Bits [159:128] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// Bits [161:160]: \n +/// 00: Bits [159:128] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// Bits [193:192]: \n +/// 00: Bits [159:128] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// Bits [225:224]: \n +/// 00: Bits [159:128] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [255:224] of the +/// returned vector. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_permutevar_ps(__m256 __a, __m256i __c) +{ + return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c); +} + +/// Copies the values in a 128-bit vector of [2 x double] as specified +/// by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_permute_pd(__m128d A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param A +/// A 128-bit vector of [2 x double]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bit [0]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +#define _mm_permute_pd(A, C) \ + (__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C)) + +/// Copies the values in a 256-bit vector of [4 x double] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute_pd(__m256d A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param A +/// A 256-bit vector of [4 x double]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bit [0]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// Bit [2]: \n +/// 0: Bits [191:128] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// Bit [3]: \n +/// 0: Bits [191:128] of the source are copied to bits [255:192] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [255:192] of the +/// returned vector. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_permute_pd(A, C) \ + (__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C)) + +/// Copies the values in a 128-bit vector of [4 x float] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_permute_ps(__m128 A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param A +/// A 128-bit vector of [4 x float]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +#define _mm_permute_ps(A, C) \ + (__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C)) + +/// Copies the values in a 256-bit vector of [8 x float] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_permute_ps(__m256 A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param A +/// A 256-bit vector of [8 x float]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// Bits [1:0]: \n +/// 00: Bits [159:128] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [159:128] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [159:128] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [159:128] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [255:224] of the +/// returned vector. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +#define _mm256_permute_ps(A, C) \ + (__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C)) + +/// Permutes 128-bit data values stored in two 256-bit vectors of +/// [4 x double], as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double. +/// \param M +/// An immediate integer operand specifying how the values are to be +/// permuted. \n +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_permute2f128_pd(V1, V2, M) \ + (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), (int)(M)) + +/// Permutes 128-bit data values stored in two 256-bit vectors of +/// [8 x float], as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. +/// \param V2 +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer operand specifying how the values are to be +/// permuted. \n +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +#define _mm256_permute2f128_ps(V1, V2, M) \ + (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (int)(M)) + +/// Permutes 128-bit data values stored in two 256-bit integer vectors, +/// as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector. +/// \param V2 +/// A 256-bit integer vector. +/// \param M +/// An immediate integer operand specifying how the values are to be copied. +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit integer vector containing the copied values. +#define _mm256_permute2f128_si256(V1, V2, M) \ + (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (int)(M)) + +/* Vector Blend */ +/// Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPD instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer operand, with mask bits [3:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 64-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 64-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_blend_pd(V1, V2, M) \ + (__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), (int)(M)) + +/// Merges 32-bit single-precision data values stored in either of the +/// two 256-bit vectors of [8 x float], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPS instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. +/// \param V2 +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer operand, with mask bits [7:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 32-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 32-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +#define _mm256_blend_ps(V1, V2, M) \ + (__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (int)(M)) + +/// Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 64-bit element in operand \a __a is copied to the same +/// position in the destination. When a mask bit is 1, the corresponding +/// 64-bit element in operand \a __b is copied to the same position in the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) +{ + return (__m256d)__builtin_ia32_blendvpd256( + (__v4df)__a, (__v4df)__b, (__v4df)__c); +} + +/// Merges 32-bit single-precision data values stored in either of the +/// two 256-bit vectors of [8 x float], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63, +/// and 31 specifying how the values are to be copied. The position of the +/// mask bit corresponds to the most significant bit of a copied value. When +/// a mask bit is 0, the corresponding 32-bit element in operand \a __a is +/// copied to the same position in the destination. When a mask bit is 1, the +/// corresponding 32-bit element in operand \a __b is copied to the same +/// position in the destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) +{ + return (__m256)__builtin_ia32_blendvps256( + (__v8sf)__a, (__v8sf)__b, (__v8sf)__c); +} + +/* Vector Dot Product */ +/// Computes two dot products in parallel, using the lower and upper +/// halves of two [8 x float] vectors as input to the two computations, and +/// returning the two dot products in the lower and upper halves of the +/// [8 x float] result. +/// +/// The immediate integer operand controls which input elements will +/// contribute to the dot product, and where the final results are returned. +/// In general, for each dot product, the four corresponding elements of the +/// input vectors are multiplied; the first two and second two products are +/// summed, then the two sums are added to form the final result. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPS instruction. +/// +/// \param V1 +/// A vector of [8 x float] values, treated as two [4 x float] vectors. +/// \param V2 +/// A vector of [8 x float] values, treated as two [4 x float] vectors. +/// \param M +/// An immediate integer argument. Bits [7:4] determine which elements of +/// the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [7] corresponding to the highest element of each [4 x +/// float] subvector. If a bit is set, the corresponding elements from the +/// two input vectors are used as an input for dot product; otherwise that +/// input is treated as zero. Bits [3:0] determine which elements of the +/// result will receive a copy of the final dot product, with bit [0] +/// corresponding to the lowest element and bit [3] corresponding to the +/// highest element of each [4 x float] subvector. If a bit is set, the dot +/// product is returned in the corresponding element; otherwise that element +/// is set to zero. The bitmask is applied in the same way to each of the +/// two parallel dot product computations. +/// \returns A 256-bit vector of [8 x float] containing the two dot products. +#define _mm256_dp_ps(V1, V2, M) \ + (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (M)) + +/* Vector shuffle */ +/// Selects 8 float values from the 256-bit operands of [8 x float], as +/// specified by the immediate value operand. +/// +/// The four selected elements in each operand are copied to the destination +/// according to the bits specified in the immediate operand. The selected +/// elements from the first 256-bit operand are copied to bits [63:0] and +/// bits [191:128] of the destination, and the selected elements from the +/// second 256-bit operand are copied to bits [127:64] and bits [255:192] of +/// the destination. For example, if bits [7:0] of the immediate operand +/// contain a value of 0xFF, the 256-bit destination vector would contain the +/// following values: b[7], b[7], a[7], a[7], b[3], b[3], a[3], a[3]. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. The four selected elements in this +/// operand are copied to bits [63:0] and bits [191:128] in the destination, +/// according to the bits specified in the immediate operand. +/// \param b +/// A 256-bit vector of [8 x float]. The four selected elements in this +/// operand are copied to bits [127:64] and bits [255:192] in the +/// destination, according to the bits specified in the immediate operand. +/// \param mask +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a and \a b \n. +/// Bits [3:0] specify the values copied from operand \a a. \n +/// Bits [7:4] specify the values copied from operand \a b. \n +/// The destinations within the 256-bit destination are assigned values as +/// follows, according to the bit value assignments described below: \n +/// Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [127:96] and [255:224] in +/// the destination. \n +/// Bit value assignments: \n +/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \n +/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \n +/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n +/// 11: Bits [127:96] and [255:224] are copied from the selected operand. +/// \returns A 256-bit vector of [8 x float] containing the shuffled values. +#define _mm256_shuffle_ps(a, b, mask) \ + (__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(mask)) + +/// Selects four double-precision values from the 256-bit operands of +/// [4 x double], as specified by the immediate value operand. +/// +/// The selected elements from the first 256-bit operand are copied to bits +/// [63:0] and bits [191:128] in the destination, and the selected elements +/// from the second 256-bit operand are copied to bits [127:64] and bits +/// [255:192] in the destination. For example, if bits [3:0] of the immediate +/// operand contain a value of 0xF, the 256-bit destination vector would +/// contain the following values: b[3], a[3], b[1], a[1]. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param mask +/// An immediate value containing 8-bit values specifying which elements to +/// copy from \a a and \a b: \n +/// Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the +/// destination. \n +/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the shuffled values. +#define _mm256_shuffle_pd(a, b, mask) \ + (__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(mask)) + +/* Compare */ +#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ +#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ +#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ +#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ +#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ +#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ +#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ +#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */ +#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */ +#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */ +#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ +#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */ +#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */ +#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */ +#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ +#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */ +#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */ +#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */ +#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */ +#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */ +#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */ +#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */ +#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unordered, non-signaling) */ +#define _CMP_ORD_S 0x17 /* Ordered (signaling) */ +#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */ +#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unordered, non-signaling) */ +#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */ +#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */ +#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */ +#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */ +#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ +#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ + +/// Compares each of the corresponding double-precision values of two +/// 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Returns a [2 x double] vector consisting of two doubles corresponding to +/// the two comparison results: zero if the comparison is false, and all 1's +/// if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_pd(a, b, c) \ + (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (c)) + +/// Compares each of the corresponding values of two 128-bit vectors of +/// [4 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Returns a [4 x float] vector consisting of four floats corresponding to +/// the four comparison results: zero if the comparison is false, and all 1's +/// if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ps(a, b, c) \ + (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (c)) + +/// Compares each of the corresponding double-precision values of two +/// 256-bit vectors of [4 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Returns a [4 x double] vector consisting of four doubles corresponding to +/// the four comparison results: zero if the comparison is false, and all 1's +/// if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 256-bit vector of [4 x double] containing the comparison results. +#define _mm256_cmp_pd(a, b, c) \ + (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (c)) + +/// Compares each of the corresponding values of two 256-bit vectors of +/// [8 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Returns a [8 x float] vector consisting of eight floats corresponding to +/// the eight comparison results: zero if the comparison is false, and all +/// 1's if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. +/// \param b +/// A 256-bit vector of [8 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 256-bit vector of [8 x float] containing the comparison results. +#define _mm256_cmp_ps(a, b, c) \ + (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (c)) + +/// Compares each of the corresponding scalar double-precision values of +/// two 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// If the result is true, all 64 bits of the destination vector are set; +/// otherwise they are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPSD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_sd(a, b, c) \ + (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (c)) + +/// Compares each of the corresponding scalar values of two 128-bit +/// vectors of [4 x float], using the operation specified by the immediate +/// integer operand. +/// +/// If the result is true, all 32 bits of the destination vector are set; +/// otherwise they are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPSS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ss(a, b, c) \ + (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (c)) + +/// Takes a [8 x i32] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __imm +/// An immediate integer operand with bits [2:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 32 bits of extended +/// packed data. +#define _mm256_extract_epi32(X, N) \ + (int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N)) + +/// Takes a [16 x i16] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit integer vector of [16 x i16]. +/// \param __imm +/// An immediate integer operand with bits [3:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 16 bits of zero extended +/// packed data. +#define _mm256_extract_epi16(X, N) \ + (int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \ + (int)(N)) + +/// Takes a [32 x i8] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit integer vector of [32 x i8]. +/// \param __imm +/// An immediate integer operand with bits [4:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 8 bits of zero extended +/// packed data. +#define _mm256_extract_epi8(X, N) \ + (int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \ + (int)(N)) + +#ifdef __x86_64__ +/// Takes a [4 x i64] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit integer vector of [4 x i64]. +/// \param __imm +/// An immediate integer operand with bits [1:0] determining which vector +/// element is extracted and returned. +/// \returns A 64-bit integer containing the extracted 64 bits of extended +/// packed data. +#define _mm256_extract_epi64(X, N) \ + (long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N)) +#endif + +/// Takes a [8 x i32] vector and replaces the vector element value +/// indexed by the immediate constant operand by a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [8 x i32] to be used by the insert operation. +/// \param __b +/// An integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. +#define _mm256_insert_epi32(X, I, N) \ + (__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \ + (int)(I), (int)(N)) + + +/// Takes a [16 x i16] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [16 x i16] to be used by the insert operation. +/// \param __b +/// An i16 integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. +#define _mm256_insert_epi16(X, I, N) \ + (__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \ + (int)(I), (int)(N)) + +/// Takes a [32 x i8] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [32 x i8] to be used by the insert operation. +/// \param __b +/// An i8 integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. +#define _mm256_insert_epi8(X, I, N) \ + (__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \ + (int)(I), (int)(N)) + +#ifdef __x86_64__ +/// Takes a [4 x i64] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [4 x i64] to be used by the insert operation. +/// \param __b +/// A 64-bit integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. +#define _mm256_insert_epi64(X, I, N) \ + (__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \ + (long long)(I), (int)(N)) +#endif + +/* Conversion */ +/// Converts a vector of [4 x i32] into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PD instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. +/// \returns A 256-bit vector of [4 x double] containing the converted values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_cvtepi32_pd(__m128i __a) +{ + return (__m256d)__builtin_convertvector((__v4si)__a, __v4df); +} + +/// Converts a vector of [8 x i32] into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PS instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit vector of [8 x float] containing the converted values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_cvtepi32_ps(__m256i __a) +{ + return (__m256)__builtin_convertvector((__v8si)__a, __v8sf); +} + +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2PS instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit vector of [4 x float] containing the converted values. +static __inline __m128 __DEFAULT_FN_ATTRS +_mm256_cvtpd_ps(__m256d __a) +{ + return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a); +} + +/// Converts a vector of [8 x float] into a vector of [8 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit integer vector containing the converted values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_cvtps_epi32(__m256 __a) +{ + return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a); +} + +/// Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 +/// x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2PD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit vector of [4 x double] containing the converted values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_cvtps_pd(__m128 __a) +{ + return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); +} + +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 +/// x i32], truncating the result by rounding towards zero when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPD2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit integer vector containing the converted values. +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_cvttpd_epi32(__m256d __a) +{ + return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); +} + +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 +/// x i32]. When a conversion is inexact, the value returned is rounded +/// according to the rounding control bits in the MXCSR register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit integer vector containing the converted values. +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_cvtpd_epi32(__m256d __a) +{ + return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); +} + +/// Converts a vector of [8 x float] into a vector of [8 x i32], +/// truncating the result by rounding towards zero when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPS2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit integer vector containing the converted values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_cvttps_epi32(__m256 __a) +{ + return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a); +} + +/// Returns the first element of the input vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 64 bit double containing the first element of the input vector. +static __inline double __DEFAULT_FN_ATTRS +_mm256_cvtsd_f64(__m256d __a) +{ + return __a[0]; +} + +/// Returns the first element of the input vector of [8 x i32]. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \returns A 32 bit integer containing the first element of the input vector. +static __inline int __DEFAULT_FN_ATTRS +_mm256_cvtsi256_si32(__m256i __a) +{ + __v8si __b = (__v8si)__a; + return __b[0]; +} + +/// Returns the first element of the input vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 32 bit float containing the first element of the input vector. +static __inline float __DEFAULT_FN_ATTRS +_mm256_cvtss_f32(__m256 __a) +{ + return __a[0]; +} + +/* Vector replicate */ +/// Moves and duplicates odd-indexed values from a 256-bit vector of +/// [8 x float] to float values in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSHDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [255:224] of \a __a are written to bits [255:224] and [223:192] of +/// the return value. \n +/// Bits [191:160] of \a __a are written to bits [191:160] and [159:128] of +/// the return value. \n +/// Bits [127:96] of \a __a are written to bits [127:96] and [95:64] of the +/// return value. \n +/// Bits [63:32] of \a __a are written to bits [63:32] and [31:0] of the +/// return value. +/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated +/// values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_movehdup_ps(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7); +} + +/// Moves and duplicates even-indexed values from a 256-bit vector of +/// [8 x float] to float values in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSLDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [223:192] of \a __a are written to bits [255:224] and [223:192] of +/// the return value. \n +/// Bits [159:128] of \a __a are written to bits [191:160] and [159:128] of +/// the return value. \n +/// Bits [95:64] of \a __a are written to bits [127:96] and [95:64] of the +/// return value. \n +/// Bits [31:0] of \a __a are written to bits [63:32] and [31:0] of the +/// return value. +/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated +/// values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_moveldup_ps(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6); +} + +/// Moves and duplicates double-precision floating point values from a +/// 256-bit vector of [4 x double] to double-precision values in a 256-bit +/// vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. \n +/// Bits [63:0] of \a __a are written to bits [127:64] and [63:0] of the +/// return value. \n +/// Bits [191:128] of \a __a are written to bits [255:192] and [191:128] of +/// the return value. +/// \returns A 256-bit vector of [4 x double] containing the moved and +/// duplicated values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_movedup_pd(__m256d __a) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2); +} + +/* Unpack and Interleave */ +/// Unpacks the odd-indexed vector elements from two 256-bit vectors of +/// [4 x double] and interleaves them into a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [127:64] are written to bits [63:0] of the return value. \n +/// Bits [255:192] are written to bits [191:128] of the return value. \n +/// \param __b +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the return value. \n +/// Bits [255:192] are written to bits [255:192] of the return value. \n +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_unpackhi_pd(__m256d __a, __m256d __b) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2); +} + +/// Unpacks the even-indexed vector elements from two 256-bit vectors of +/// [4 x double] and interleaves them into a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the return value. \n +/// Bits [191:128] are written to bits [191:128] of the return value. +/// \param __b +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [63:0] are written to bits [127:64] of the return value. \n +/// Bits [191:128] are written to bits [255:192] of the return value. \n +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_unpacklo_pd(__m256d __a, __m256d __b) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2); +} + +/// Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the +/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [95:64] are written to bits [31:0] of the return value. \n +/// Bits [127:96] are written to bits [95:64] of the return value. \n +/// Bits [223:192] are written to bits [159:128] of the return value. \n +/// Bits [255:224] are written to bits [223:192] of the return value. +/// \param __b +/// A 256-bit vector of [8 x float]. \n +/// Bits [95:64] are written to bits [63:32] of the return value. \n +/// Bits [127:96] are written to bits [127:96] of the return value. \n +/// Bits [223:192] are written to bits [191:160] of the return value. \n +/// Bits [255:224] are written to bits [255:224] of the return value. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_unpackhi_ps(__m256 __a, __m256 __b) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); +} + +/// Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the +/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [31:0] are written to bits [31:0] of the return value. \n +/// Bits [63:32] are written to bits [95:64] of the return value. \n +/// Bits [159:128] are written to bits [159:128] of the return value. \n +/// Bits [191:160] are written to bits [223:192] of the return value. +/// \param __b +/// A 256-bit vector of [8 x float]. \n +/// Bits [31:0] are written to bits [63:32] of the return value. \n +/// Bits [63:32] are written to bits [127:96] of the return value. \n +/// Bits [159:128] are written to bits [191:160] of the return value. \n +/// Bits [191:160] are written to bits [255:224] of the return value. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_unpacklo_ps(__m256 __a, __m256 __b) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); +} + +/* Bit Test */ +/// Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns the ZF flag in the EFLAGS register. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testz_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); +} + +/// Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns the CF flag in the EFLAGS register. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testc_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); +} + +/// Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testnzc_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); +} + +/// Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testz_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); +} + +/// Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testc_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); +} + +/// Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testnzc_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b); +} + +/// Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testz_pd(__m256d __a, __m256d __b) +{ + return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b); +} + +/// Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testc_pd(__m256d __a, __m256d __b) +{ + return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b); +} + +/// Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testnzc_pd(__m256d __a, __m256d __b) +{ + return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b); +} + +/// Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testz_ps(__m256 __a, __m256 __b) +{ + return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b); +} + +/// Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testc_ps(__m256 __a, __m256 __b) +{ + return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b); +} + +/// Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testnzc_ps(__m256 __a, __m256 __b) +{ + return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b); +} + +/// Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testz_si256(__m256i __a, __m256i __b) +{ + return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b); +} + +/// Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testc_si256(__m256i __a, __m256i __b) +{ + return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b); +} + +/// Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testnzc_si256(__m256i __a, __m256i __b) +{ + return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b); +} + +/* Vector extract sign mask */ +/// Extracts the sign bits of double-precision floating point elements +/// in a 256-bit vector of [4 x double] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the double-precision +/// floating point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [3:0]. +static __inline int __DEFAULT_FN_ATTRS +_mm256_movemask_pd(__m256d __a) +{ + return __builtin_ia32_movmskpd256((__v4df)__a); +} + +/// Extracts the sign bits of single-precision floating point elements +/// in a 256-bit vector of [8 x float] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the single-precision floating +/// point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [7:0]. +static __inline int __DEFAULT_FN_ATTRS +_mm256_movemask_ps(__m256 __a) +{ + return __builtin_ia32_movmskps256((__v8sf)__a); +} + +/* Vector __zero */ +/// Zeroes the contents of all XMM or YMM registers. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VZEROALL instruction. +static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx"))) +_mm256_zeroall(void) +{ + __builtin_ia32_vzeroall(); +} + +/// Zeroes the upper 128 bits (bits 255:128) of all YMM registers. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VZEROUPPER instruction. +static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx"))) +_mm256_zeroupper(void) +{ + __builtin_ia32_vzeroupper(); +} + +/* Vector load with broadcast */ +/// Loads a scalar single-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [4 x float] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS instruction. +/// +/// \param __a +/// The single-precision floating point value to be broadcast. +/// \returns A 128-bit vector of [4 x float] whose 32-bit elements are set +/// equal to the broadcast value. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_broadcast_ss(float const *__a) +{ + float __f = *__a; + return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f }; +} + +/// Loads a scalar double-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [4 x double] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSD instruction. +/// +/// \param __a +/// The double-precision floating point value to be broadcast. +/// \returns A 256-bit vector of [4 x double] whose 64-bit elements are set +/// equal to the broadcast value. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_broadcast_sd(double const *__a) +{ + double __d = *__a; + return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d }; +} + +/// Loads a scalar single-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [8 x float] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS instruction. +/// +/// \param __a +/// The single-precision floating point value to be broadcast. +/// \returns A 256-bit vector of [8 x float] whose 32-bit elements are set +/// equal to the broadcast value. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_broadcast_ss(float const *__a) +{ + float __f = *__a; + return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f }; +} + +/// Loads the data from a 128-bit vector of [2 x double] from the +/// specified address pointed to by \a __a and broadcasts it to 128-bit +/// elements in a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTF128 instruction. +/// +/// \param __a +/// The 128-bit vector of [2 x double] to be broadcast. +/// \returns A 256-bit vector of [4 x double] whose 128-bit elements are set +/// equal to the broadcast value. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_broadcast_pd(__m128d const *__a) +{ + __m128d __b = _mm_loadu_pd((const double *)__a); + return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b, + 0, 1, 0, 1); +} + +/// Loads the data from a 128-bit vector of [4 x float] from the +/// specified address pointed to by \a __a and broadcasts it to 128-bit +/// elements in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTF128 instruction. +/// +/// \param __a +/// The 128-bit vector of [4 x float] to be broadcast. +/// \returns A 256-bit vector of [8 x float] whose 128-bit elements are set +/// equal to the broadcast value. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_broadcast_ps(__m128 const *__a) +{ + __m128 __b = _mm_loadu_ps((const float *)__a); + return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +/* SIMD load ops */ +/// Loads 4 double-precision floating point values from a 32-byte aligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location containing +/// double-precision floating point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_load_pd(double const *__p) +{ + return *(const __m256d *)__p; +} + +/// Loads 8 single-precision floating point values from a 32-byte aligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location containing float values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_load_ps(float const *__p) +{ + return *(const __m256 *)__p; +} + +/// Loads 4 double-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location containing double-precision floating +/// point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_loadu_pd(double const *__p) +{ + struct __loadu_pd { + __m256d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__p)->__v; +} + +/// Loads 8 single-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location containing single-precision floating +/// point values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_loadu_ps(float const *__p) +{ + struct __loadu_ps { + __m256_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +/// Loads 256 bits of integer data from a 32-byte aligned memory +/// location pointed to by \a __p into elements of a 256-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a 256-bit integer vector containing integer +/// values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_load_si256(__m256i const *__p) +{ + return *__p; +} + +/// Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_loadu_si256(__m256i_u const *__p) +{ + struct __loadu_si256 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si256*)__p)->__v; +} + +/// Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may +/// perform better than \c _mm256_loadu_si256 when the data crosses a cache +/// line boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_lddqu_si256(__m256i const *__p) +{ + return (__m256i)__builtin_ia32_lddqu256((char const *)__p); +} + +/* SIMD store ops */ +/// Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to a 32-byte aligned memory location pointed to by +/// \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// double-precision floaing point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_store_pd(double *__p, __m256d __a) +{ + *(__m256d *)__p = __a; +} + +/// Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to a 32-byte aligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_store_ps(float *__p, __m256 __a) +{ + *(__m256 *)__p = __a; +} + +/// Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the double-precision +/// floating point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_pd(double *__p, __m256d __a) +{ + struct __storeu_pd { + __m256d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__p)->__v = __a; +} + +/// Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_ps(float *__p, __m256 __a) +{ + struct __storeu_ps { + __m256_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; +} + +/// Stores integer values from a 256-bit integer vector to a 32-byte +/// aligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_store_si256(__m256i *__p, __m256i __a) +{ + *__p = __a; +} + +/// Stores integer values from a 256-bit integer vector to an unaligned +/// memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_si256(__m256i_u *__p, __m256i __a) +{ + struct __storeu_si256 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si256*)__p)->__v = __a; +} + +/* Conditional load ops */ +/// Conditionally loads double-precision floating point elements from a +/// memory location pointed to by \a __p into a 128-bit vector of +/// [2 x double], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the double-precision +/// floating point values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each data element represents the mask bits. If a mask bit is zero, the +/// corresponding value in the memory location is not loaded and the +/// corresponding field in the return value is set to zero. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. +static __inline __m128d __DEFAULT_FN_ATTRS128 +_mm_maskload_pd(double const *__p, __m128i __m) +{ + return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m); +} + +/// Conditionally loads double-precision floating point elements from a +/// memory location pointed to by \a __p into a 256-bit vector of +/// [4 x double], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the double-precision +/// floating point values. +/// \param __m +/// A 256-bit integer vector of [4 x quadword] containing the mask. The most +/// significant bit of each quadword element represents the mask bits. If a +/// mask bit is zero, the corresponding value in the memory location is not +/// loaded and the corresponding field in the return value is set to zero. +/// \returns A 256-bit vector of [4 x double] containing the loaded values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_maskload_pd(double const *__p, __m256i __m) +{ + return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p, + (__v4di)__m); +} + +/// Conditionally loads single-precision floating point elements from a +/// memory location pointed to by \a __p into a 128-bit vector of +/// [4 x float], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the single-precision +/// floating point values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each data element represents the mask bits. If a mask bit is zero, the +/// corresponding value in the memory location is not loaded and the +/// corresponding field in the return value is set to zero. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_maskload_ps(float const *__p, __m128i __m) +{ + return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m); +} + +/// Conditionally loads single-precision floating point elements from a +/// memory location pointed to by \a __p into a 256-bit vector of +/// [8 x float], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the single-precision +/// floating point values. +/// \param __m +/// A 256-bit integer vector of [8 x dword] containing the mask. The most +/// significant bit of each dword element represents the mask bits. If a mask +/// bit is zero, the corresponding value in the memory location is not loaded +/// and the corresponding field in the return value is set to zero. +/// \returns A 256-bit vector of [8 x float] containing the loaded values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_maskload_ps(float const *__p, __m256i __m) +{ + return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m); +} + +/* Conditional store ops */ +/// Moves single-precision floating point values from a 256-bit vector +/// of [8 x float] to a memory location pointed to by \a __p, according to +/// the specified mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 256-bit integer vector of [8 x dword] containing the mask. The most +/// significant bit of each dword element in the mask vector represents the +/// mask bits. If a mask bit is zero, the corresponding value from vector +/// \a __a is not stored and the corresponding field in the memory location +/// pointed to by \a __p is not changed. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS +_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a) +{ + __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a); +} + +/// Moves double-precision values from a 128-bit vector of [2 x double] +/// to a memory location pointed to by \a __p, according to the specified +/// mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each field in the mask vector represents the mask bits. If a mask bit is +/// zero, the corresponding value from vector \a __a is not stored and the +/// corresponding field in the memory location pointed to by \a __p is not +/// changed. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS128 +_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a) +{ + __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a); +} + +/// Moves double-precision values from a 256-bit vector of [4 x double] +/// to a memory location pointed to by \a __p, according to the specified +/// mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 256-bit integer vector of [4 x quadword] containing the mask. The most +/// significant bit of each quadword element in the mask vector represents +/// the mask bits. If a mask bit is zero, the corresponding value from vector +/// __a is not stored and the corresponding field in the memory location +/// pointed to by \a __p is not changed. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS +_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a) +{ + __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a); +} + +/// Moves single-precision floating point values from a 128-bit vector +/// of [4 x float] to a memory location pointed to by \a __p, according to +/// the specified mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each field in the mask vector represents the mask bits. If a mask bit is +/// zero, the corresponding value from vector __a is not stored and the +/// corresponding field in the memory location pointed to by \a __p is not +/// changed. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS128 +_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a) +{ + __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a); +} + +/* Cacheability support ops */ +/// Moves integer data from a 256-bit integer vector to a 32-byte +/// aligned memory location. To minimize caching, the data is flagged as +/// non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTDQ instruction. +/// +/// \param __a +/// A pointer to a 32-byte aligned memory location that will receive the +/// integer values. +/// \param __b +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_stream_si256(__m256i *__a, __m256i __b) +{ + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a); +} + +/// Moves double-precision values from a 256-bit vector of [4 x double] +/// to a 32-byte aligned memory location. To minimize caching, the data is +/// flagged as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPD instruction. +/// +/// \param __a +/// A pointer to a 32-byte aligned memory location that will receive the +/// double-precision floating-point values. +/// \param __b +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_stream_pd(double *__a, __m256d __b) +{ + typedef __v4df __v4df_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a); +} + +/// Moves single-precision floating point values from a 256-bit vector +/// of [8 x float] to a 32-byte aligned memory location. To minimize +/// caching, the data is flagged as non-temporal (unlikely to be used again +/// soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS instruction. +/// +/// \param __p +/// A pointer to a 32-byte aligned memory location that will receive the +/// single-precision floating point values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_stream_ps(float *__p, __m256 __a) +{ + typedef __v8sf __v8sf_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p); +} + +/* Create vectors */ +/// Create a 256-bit vector of [4 x double] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit vector of [4 x double] containing undefined values. +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_undefined_pd(void) +{ + return (__m256d)__builtin_ia32_undef256(); +} + +/// Create a 256-bit vector of [8 x float] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit vector of [8 x float] containing undefined values. +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_undefined_ps(void) +{ + return (__m256)__builtin_ia32_undef256(); +} + +/// Create a 256-bit integer vector with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit integer vector containing undefined values. +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_undefined_si256(void) +{ + return (__m256i)__builtin_ia32_undef256(); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set_pd(double __a, double __b, double __c, double __d) +{ + return __extension__ (__m256d){ __d, __c, __b, __a }; +} + +/// Constructs a 256-bit floating-point vector of [8 x float] initialized +/// with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set_ps(float __a, float __b, float __c, float __d, + float __e, float __f, float __g, float __h) +{ + return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, + int __i4, int __i5, int __i6, int __i7) +{ + return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w15 +/// A 16-bit integral value used to initialize bits [255:240] of the result. +/// \param __w14 +/// A 16-bit integral value used to initialize bits [239:224] of the result. +/// \param __w13 +/// A 16-bit integral value used to initialize bits [223:208] of the result. +/// \param __w12 +/// A 16-bit integral value used to initialize bits [207:192] of the result. +/// \param __w11 +/// A 16-bit integral value used to initialize bits [191:176] of the result. +/// \param __w10 +/// A 16-bit integral value used to initialize bits [175:160] of the result. +/// \param __w09 +/// A 16-bit integral value used to initialize bits [159:144] of the result. +/// \param __w08 +/// A 16-bit integral value used to initialize bits [143:128] of the result. +/// \param __w07 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \param __w06 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w05 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w04 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w03 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w02 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w01 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w00 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, + short __w11, short __w10, short __w09, short __w08, + short __w07, short __w06, short __w05, short __w04, + short __w03, short __w02, short __w01, short __w00) +{ + return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06, + __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b31 +/// An 8-bit integral value used to initialize bits [255:248] of the result. +/// \param __b30 +/// An 8-bit integral value used to initialize bits [247:240] of the result. +/// \param __b29 +/// An 8-bit integral value used to initialize bits [239:232] of the result. +/// \param __b28 +/// An 8-bit integral value used to initialize bits [231:224] of the result. +/// \param __b27 +/// An 8-bit integral value used to initialize bits [223:216] of the result. +/// \param __b26 +/// An 8-bit integral value used to initialize bits [215:208] of the result. +/// \param __b25 +/// An 8-bit integral value used to initialize bits [207:200] of the result. +/// \param __b24 +/// An 8-bit integral value used to initialize bits [199:192] of the result. +/// \param __b23 +/// An 8-bit integral value used to initialize bits [191:184] of the result. +/// \param __b22 +/// An 8-bit integral value used to initialize bits [183:176] of the result. +/// \param __b21 +/// An 8-bit integral value used to initialize bits [175:168] of the result. +/// \param __b20 +/// An 8-bit integral value used to initialize bits [167:160] of the result. +/// \param __b19 +/// An 8-bit integral value used to initialize bits [159:152] of the result. +/// \param __b18 +/// An 8-bit integral value used to initialize bits [151:144] of the result. +/// \param __b17 +/// An 8-bit integral value used to initialize bits [143:136] of the result. +/// \param __b16 +/// An 8-bit integral value used to initialize bits [135:128] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b09 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b08 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b07 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b06 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b05 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b04 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b03 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b02 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b01 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b00 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, + char __b27, char __b26, char __b25, char __b24, + char __b23, char __b22, char __b21, char __b20, + char __b19, char __b18, char __b17, char __b16, + char __b15, char __b14, char __b13, char __b12, + char __b11, char __b10, char __b09, char __b08, + char __b07, char __b06, char __b05, char __b04, + char __b03, char __b02, char __b01, char __b00) +{ + return __extension__ (__m256i)(__v32qi){ + __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, + __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15, + __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23, + __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31 + }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) +{ + return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a }; +} + +/* Create vectors with elements in reverse order */ +/// Constructs a 256-bit floating-point vector of [4 x double], +/// initialized in reverse order with the specified double-precision +/// floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_setr_pd(double __a, double __b, double __c, double __d) +{ + return _mm256_set_pd(__d, __c, __b, __a); +} + +/// Constructs a 256-bit floating-point vector of [8 x float], +/// initialized in reverse order with the specified single-precision +/// float-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_setr_ps(float __a, float __b, float __c, float __d, + float __e, float __f, float __g, float __h) +{ + return _mm256_set_ps(__h, __g, __f, __e, __d, __c, __b, __a); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, + int __i4, int __i5, int __i6, int __i7) +{ + return _mm256_set_epi32(__i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w15 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \param __w14 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w13 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w12 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w11 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w10 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w09 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w08 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \param __w07 +/// A 16-bit integral value used to initialize bits [143:128] of the result. +/// \param __w06 +/// A 16-bit integral value used to initialize bits [159:144] of the result. +/// \param __w05 +/// A 16-bit integral value used to initialize bits [175:160] of the result. +/// \param __w04 +/// A 16-bit integral value used to initialize bits [191:176] of the result. +/// \param __w03 +/// A 16-bit integral value used to initialize bits [207:192] of the result. +/// \param __w02 +/// A 16-bit integral value used to initialize bits [223:208] of the result. +/// \param __w01 +/// A 16-bit integral value used to initialize bits [239:224] of the result. +/// \param __w00 +/// A 16-bit integral value used to initialize bits [255:240] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, + short __w11, short __w10, short __w09, short __w08, + short __w07, short __w06, short __w05, short __w04, + short __w03, short __w02, short __w01, short __w00) +{ + return _mm256_set_epi16(__w00, __w01, __w02, __w03, + __w04, __w05, __w06, __w07, + __w08, __w09, __w10, __w11, + __w12, __w13, __w14, __w15); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b31 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \param __b30 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b29 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b28 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b27 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b26 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b25 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b24 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b23 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b22 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b21 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b20 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b19 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b18 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b17 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b16 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [135:128] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [143:136] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [151:144] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [159:152] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [167:160] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [175:168] of the result. +/// \param __b09 +/// An 8-bit integral value used to initialize bits [183:176] of the result. +/// \param __b08 +/// An 8-bit integral value used to initialize bits [191:184] of the result. +/// \param __b07 +/// An 8-bit integral value used to initialize bits [199:192] of the result. +/// \param __b06 +/// An 8-bit integral value used to initialize bits [207:200] of the result. +/// \param __b05 +/// An 8-bit integral value used to initialize bits [215:208] of the result. +/// \param __b04 +/// An 8-bit integral value used to initialize bits [223:216] of the result. +/// \param __b03 +/// An 8-bit integral value used to initialize bits [231:224] of the result. +/// \param __b02 +/// An 8-bit integral value used to initialize bits [239:232] of the result. +/// \param __b01 +/// An 8-bit integral value used to initialize bits [247:240] of the result. +/// \param __b00 +/// An 8-bit integral value used to initialize bits [255:248] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, + char __b27, char __b26, char __b25, char __b24, + char __b23, char __b22, char __b21, char __b20, + char __b19, char __b18, char __b17, char __b16, + char __b15, char __b14, char __b13, char __b12, + char __b11, char __b10, char __b09, char __b08, + char __b07, char __b06, char __b05, char __b04, + char __b03, char __b02, char __b01, char __b00) +{ + return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, + __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15, + __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23, + __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d) +{ + return _mm256_set_epi64x(__d, __c, __b, __a); +} + +/* Create vectors with repeated elements */ +/// Constructs a 256-bit floating-point vector of [4 x double], with each +/// of the four double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set1_pd(double __w) +{ + return _mm256_set_pd(__w, __w, __w, __w); +} + +/// Constructs a 256-bit floating-point vector of [8 x float], with each +/// of the eight single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set1_ps(float __w) +{ + return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Constructs a 256-bit integer vector of [8 x i32], with each of the +/// 32-bit integral vector elements set to the specified 32-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __i +/// A 32-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [8 x i32]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi32(int __i) +{ + return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i); +} + +/// Constructs a 256-bit integer vector of [16 x i16], with each of the +/// 16-bit integral vector elements set to the specified 16-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction. +/// +/// \param __w +/// A 16-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [16 x i16]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi16(short __w) +{ + return _mm256_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Constructs a 256-bit integer vector of [32 x i8], with each of the +/// 8-bit integral vector elements set to the specified 8-bit integral value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction. +/// +/// \param __b +/// An 8-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [32 x i8]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi8(char __b) +{ + return _mm256_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b, __b, __b, __b); +} + +/// Constructs a 256-bit integer vector of [4 x i64], with each of the +/// 64-bit integral vector elements set to the specified 64-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __q +/// A 64-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [4 x i64]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi64x(long long __q) +{ + return _mm256_set_epi64x(__q, __q, __q, __q); +} + +/* Create __zeroed vectors */ +/// Constructs a 256-bit floating-point vector of [4 x double] with all +/// vector elements initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit vector of [4 x double] with all elements set to zero. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_setzero_pd(void) +{ + return __extension__ (__m256d){ 0, 0, 0, 0 }; +} + +/// Constructs a 256-bit floating-point vector of [8 x float] with all +/// vector elements initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit vector of [8 x float] with all elements set to zero. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_setzero_ps(void) +{ + return __extension__ (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 }; +} + +/// Constructs a 256-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit integer vector initialized to zero. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setzero_si256(void) +{ + return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 }; +} + +/* Cast between vector types */ +/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// floating-point vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castpd_ps(__m256d __a) +{ + return (__m256)__a; +} + +/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castpd_si256(__m256d __a) +{ + return (__m256i)__a; +} + +/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// floating-point vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castps_pd(__m256 __a) +{ + return (__m256d)__a; +} + +/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castps_si256(__m256 __a) +{ + return (__m256i)__a; +} + +/// Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castsi256_ps(__m256i __a) +{ + return (__m256)__a; +} + +/// Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castsi256_pd(__m256i __a) +{ + return (__m256d)__a; +} + +/// Returns the lower 128 bits of a 256-bit floating-point vector of +/// [4 x double] as a 128-bit floating-point vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 128-bit floating-point vector of [2 x double] containing the +/// lower 128 bits of the parameter. +static __inline __m128d __DEFAULT_FN_ATTRS +_mm256_castpd256_pd128(__m256d __a) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1); +} + +/// Returns the lower 128 bits of a 256-bit floating-point vector of +/// [8 x float] as a 128-bit floating-point vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 128-bit floating-point vector of [4 x float] containing the +/// lower 128 bits of the parameter. +static __inline __m128 __DEFAULT_FN_ATTRS +_mm256_castps256_ps128(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3); +} + +/// Truncates a 256-bit integer vector into a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 128-bit integer vector containing the lower 128 bits of the +/// parameter. +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_castsi256_si128(__m256i __a) +{ + return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] from a +/// 128-bit floating-point vector of [2 x double]. +/// +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits +/// contain the value of the parameter. The contents of the upper 128 bits +/// are undefined. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castpd128_pd256(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] from a +/// 128-bit floating-point vector of [4 x float]. +/// +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits +/// contain the value of the parameter. The contents of the upper 128 bits +/// are undefined. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castps128_ps256(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1); +} + +/// Constructs a 256-bit integer vector from a 128-bit integer vector. +/// +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 256-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The contents of the upper 128 bits are undefined. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castsi128_si256(__m128i __a) +{ + return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] from a +/// 128-bit floating-point vector of [2 x double]. The lower 128 bits +/// contain the value of the source vector. The upper 128 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits +/// contain the value of the parameter. The upper 128 bits are set to zero. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_zextpd128_pd256(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] from a +/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain +/// the value of the source vector. The upper 128 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits +/// contain the value of the parameter. The upper 128 bits are set to zero. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_zextps128_ps256(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 256-bit integer vector from a 128-bit integer vector. +/// The lower 128 bits contain the value of the source vector. The upper +/// 128 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 256-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The upper 128 bits are set to zero. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_zextsi128_si256(__m128i __a) +{ + return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3); +} + +/* + Vector insert. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +/// Constructs a new 256-bit vector of [8 x float] by first duplicating +/// a 256-bit vector of [8 x float] given in the first parameter, and then +/// replacing either the upper or the lower 128 bits with the contents of a +/// 128-bit vector of [4 x float] in the second parameter. +/// +/// The immediate integer parameter determines between the upper or the lower +/// 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_insertf128_ps(__m256 V1, __m128 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. This vector is copied to the result +/// first, and then either the upper or the lower 128 bits of the result will +/// be replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit vector of [4 x float]. The contents of this parameter are +/// written to either the upper or the lower 128 bits of the result depending +/// on the value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. +#define _mm256_insertf128_ps(V1, V2, M) \ + (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \ + (__v4sf)(__m128)(V2), (int)(M)) + +/// Constructs a new 256-bit vector of [4 x double] by first duplicating +/// a 256-bit vector of [4 x double] given in the first parameter, and then +/// replacing either the upper or the lower 128 bits with the contents of a +/// 128-bit vector of [2 x double] in the second parameter. +/// +/// The immediate integer parameter determines between the upper or the lower +/// 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_insertf128_pd(__m256d V1, __m128d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. This vector is copied to the result +/// first, and then either the upper or the lower 128 bits of the result will +/// be replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit vector of [2 x double]. The contents of this parameter are +/// written to either the upper or the lower 128 bits of the result depending +/// on the value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. +#define _mm256_insertf128_pd(V1, V2, M) \ + (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \ + (__v2df)(__m128d)(V2), (int)(M)) + +/// Constructs a new 256-bit integer vector by first duplicating a +/// 256-bit integer vector given in the first parameter, and then replacing +/// either the upper or the lower 128 bits with the contents of a 128-bit +/// integer vector in the second parameter. +/// +/// The immediate integer parameter determines between the upper or the lower +/// 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insertf128_si256(__m256i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector. This vector is copied to the result first, and +/// then either the upper or the lower 128 bits of the result will be +/// replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit integer vector. The contents of this parameter are written to +/// either the upper or the lower 128 bits of the result depending on the +/// value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit integer vector containing the interleaved values. +#define _mm256_insertf128_si256(V1, V2, M) \ + (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \ + (__v4si)(__m128i)(V2), (int)(M)) + +/* + Vector extract. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +/// Extracts either the upper or the lower 128 bits from a 256-bit vector +/// of [8 x float], as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_extractf128_ps(__m256 V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit vector of [4 x float] containing the extracted bits. +#define _mm256_extractf128_ps(V, M) \ + (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M)) + +/// Extracts either the upper or the lower 128 bits from a 256-bit vector +/// of [4 x double], as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm256_extractf128_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit vector of [2 x double] containing the extracted bits. +#define _mm256_extractf128_pd(V, M) \ + (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M)) + +/// Extracts either the upper or the lower 128 bits from a 256-bit +/// integer vector, as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit integer vector. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_extractf128_si256(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit integer vector. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit integer vector containing the extracted bits. +#define _mm256_extractf128_si256(V, M) \ + (__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M)) + +/* SIMD load ops (unaligned) */ +/// Loads two 128-bit floating-point vectors of [4 x float] from +/// unaligned memory locations and constructs a 256-bit floating-point vector +/// of [8 x float] by concatenating the two 128-bit vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing 4 consecutive +/// single-precision floating-point values. These values are to be copied to +/// bits[255:128] of the result. The address of the memory location does not +/// have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing 4 consecutive +/// single-precision floating-point values. These values are to be copied to +/// bits[127:0] of the result. The address of the memory location does not +/// have to be aligned. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) +{ + __m256 __v256 = _mm256_castps128_ps256(_mm_loadu_ps(__addr_lo)); + return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1); +} + +/// Loads two 128-bit floating-point vectors of [2 x double] from +/// unaligned memory locations and constructs a 256-bit floating-point vector +/// of [4 x double] by concatenating the two 128-bit vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing two consecutive +/// double-precision floating-point values. These values are to be copied to +/// bits[255:128] of the result. The address of the memory location does not +/// have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing two consecutive +/// double-precision floating-point values. These values are to be copied to +/// bits[127:0] of the result. The address of the memory location does not +/// have to be aligned. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo) +{ + __m256d __v256 = _mm256_castpd128_pd256(_mm_loadu_pd(__addr_lo)); + return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1); +} + +/// Loads two 128-bit integer vectors from unaligned memory locations and +/// constructs a 256-bit integer vector by concatenating the two 128-bit +/// vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing a 128-bit integer +/// vector. This vector is to be copied to bits[255:128] of the result. The +/// address of the memory location does not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing a 128-bit integer +/// vector. This vector is to be copied to bits[127:0] of the result. The +/// address of the memory location does not have to be aligned. +/// \returns A 256-bit integer vector containing the concatenated result. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo) +{ + __m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo)); + return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1); +} + +/* SIMD store ops (unaligned) */ +/// Stores the upper and lower 128 bits of a 256-bit floating-point +/// vector of [8 x float] into two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a) +{ + __m128 __v128; + + __v128 = _mm256_castps256_ps128(__a); + _mm_storeu_ps(__addr_lo, __v128); + __v128 = _mm256_extractf128_ps(__a, 1); + _mm_storeu_ps(__addr_hi, __v128); +} + +/// Stores the upper and lower 128 bits of a 256-bit floating-point +/// vector of [4 x double] into two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a) +{ + __m128d __v128; + + __v128 = _mm256_castpd256_pd128(__a); + _mm_storeu_pd(__addr_lo, __v128); + __v128 = _mm256_extractf128_pd(__a, 1); + _mm_storeu_pd(__addr_hi, __v128); +} + +/// Stores the upper and lower 128 bits of a 256-bit integer vector into +/// two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit integer vector. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a) +{ + __m128i __v128; + + __v128 = _mm256_castsi256_si128(__a); + _mm_storeu_si128(__addr_lo, __v128); + __v128 = _mm256_extractf128_si256(__a, 1); + _mm_storeu_si128(__addr_hi, __v128); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] by +/// concatenating two 128-bit floating-point vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit floating-point vector of [4 x float] to be copied to the upper +/// 128 bits of the result. +/// \param __lo +/// A 128-bit floating-point vector of [4 x float] to be copied to the lower +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set_m128 (__m128 __hi, __m128 __lo) +{ + return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] by +/// concatenating two 128-bit floating-point vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit floating-point vector of [2 x double] to be copied to the upper +/// 128 bits of the result. +/// \param __lo +/// A 128-bit floating-point vector of [2 x double] to be copied to the lower +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set_m128d (__m128d __hi, __m128d __lo) +{ + return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3); +} + +/// Constructs a 256-bit integer vector by concatenating two 128-bit +/// integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit integer vector to be copied to the upper 128 bits of the +/// result. +/// \param __lo +/// A 128-bit integer vector to be copied to the lower 128 bits of the +/// result. +/// \returns A 256-bit integer vector containing the concatenated result. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_m128i (__m128i __hi, __m128i __lo) +{ + return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] by +/// concatenating two 128-bit floating-point vectors of [4 x float]. This is +/// similar to _mm256_set_m128, but the order of the input parameters is +/// swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit floating-point vector of [4 x float] to be copied to the lower +/// 128 bits of the result. +/// \param __hi +/// A 128-bit floating-point vector of [4 x float] to be copied to the upper +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_setr_m128 (__m128 __lo, __m128 __hi) +{ + return _mm256_set_m128(__hi, __lo); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] by +/// concatenating two 128-bit floating-point vectors of [2 x double]. This is +/// similar to _mm256_set_m128d, but the order of the input parameters is +/// swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit floating-point vector of [2 x double] to be copied to the lower +/// 128 bits of the result. +/// \param __hi +/// A 128-bit floating-point vector of [2 x double] to be copied to the upper +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_setr_m128d (__m128d __lo, __m128d __hi) +{ + return (__m256d)_mm256_set_m128d(__hi, __lo); +} + +/// Constructs a 256-bit integer vector by concatenating two 128-bit +/// integer vectors. This is similar to _mm256_set_m128i, but the order of +/// the input parameters is swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit integer vector to be copied to the lower 128 bits of the +/// result. +/// \param __hi +/// A 128-bit integer vector to be copied to the upper 128 bits of the +/// result. +/// \returns A 256-bit integer vector containing the concatenated result. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_m128i (__m128i __lo, __m128i __hi) +{ + return (__m256i)_mm256_set_m128i(__hi, __lo); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS128 + +#endif /* __AVXINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avxvnniintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avxvnniintrin.h new file mode 100644 index 0000000..ad45cb7 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/avxvnniintrin.h @@ -0,0 +1,225 @@ +/*===--------------- avxvnniintrin.h - VNNI intrinsics --------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVXVNNIINTRIN_H +#define __AVXVNNIINTRIN_H + +/* Below intrinsics defined in avx512vlvnniintrin.h can be used for AVXVNNI */ +/// \fn __m256i _mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m256i _mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m256i _mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m256i _mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m128i _mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B) +/// \fn __m128i _mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B) +/// \fn __m128i _mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B) +/// \fn __m128i _mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B) + +/* Intrinsics with _avx_ prefix are for compatibility with msvc. */ +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avxvnni"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avxvnni"), __min_vector_width__(128))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpbusd256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpbusds256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpwssd256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \operation +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endoperation +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpwssds256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpbusd128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpbusds128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpwssd128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \operation +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endoperation +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpwssds128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXVNNIINTRIN_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/bmi2intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/bmi2intrin.h new file mode 100644 index 0000000..0b56aed --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/bmi2intrin.h @@ -0,0 +1,81 @@ +/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __BMI2INTRIN_H +#define __BMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2"))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bzhi_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bzhi_si(__X, __Y); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_pdep_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pdep_si(__X, __Y); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_pext_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pext_si(__X, __Y); +} + +#ifdef __x86_64__ + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bzhi_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bzhi_di(__X, __Y); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_pdep_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pdep_di(__X, __Y); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_pext_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pext_di(__X, __Y); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mulx_u64 (unsigned long long __X, unsigned long long __Y, + unsigned long long *__P) +{ + unsigned __int128 __res = (unsigned __int128) __X * __Y; + *__P = (unsigned long long) (__res >> 64); + return (unsigned long long) __res; +} + +#else /* !__x86_64__ */ + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P) +{ + unsigned long long __res = (unsigned long long) __X * __Y; + *__P = (unsigned int) (__res >> 32); + return (unsigned int) __res; +} + +#endif /* !__x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __BMI2INTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/bmiintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/bmiintrin.h new file mode 100644 index 0000000..f583c21 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/bmiintrin.h @@ -0,0 +1,427 @@ +/*===---- bmiintrin.h - BMI intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __BMIINTRIN_H +#define __BMIINTRIN_H + +/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT + instruction behaves as BSF on non-BMI targets, there is code that expects + to use it as a potentially faster version of BSF. */ +#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +#define _tzcnt_u16(a) (__tzcnt_u16((a))) + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of trailing zero +/// bits in the operand. +static __inline__ unsigned short __RELAXED_FN_ATTRS +__tzcnt_u16(unsigned short __X) +{ + return __builtin_ia32_tzcnt_u16(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of trailing zero +/// bits in the operand. +static __inline__ unsigned int __RELAXED_FN_ATTRS +__tzcnt_u32(unsigned int __X) +{ + return __builtin_ia32_tzcnt_u32(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An 32-bit integer containing the number of trailing zero bits in +/// the operand. +static __inline__ int __RELAXED_FN_ATTRS +_mm_tzcnt_32(unsigned int __X) +{ + return __builtin_ia32_tzcnt_u32(__X); +} + +#define _tzcnt_u32(a) (__tzcnt_u32((a))) + +#ifdef __x86_64__ + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of trailing zero +/// bits in the operand. +static __inline__ unsigned long long __RELAXED_FN_ATTRS +__tzcnt_u64(unsigned long long __X) +{ + return __builtin_ia32_tzcnt_u64(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An 64-bit integer containing the number of trailing zero bits in +/// the operand. +static __inline__ long long __RELAXED_FN_ATTRS +_mm_tzcnt_64(unsigned long long __X) +{ + return __builtin_ia32_tzcnt_u64(__X); +} + +#define _tzcnt_u64(a) (__tzcnt_u64((a))) + +#endif /* __x86_64__ */ + +#undef __RELAXED_FN_ATTRS + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__BMI__) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi"))) + +#define _andn_u32(a, b) (__andn_u32((a), (b))) + +/* _bextr_u32 != __bextr_u32 */ +#define _blsi_u32(a) (__blsi_u32((a))) + +#define _blsmsk_u32(a) (__blsmsk_u32((a))) + +#define _blsr_u32(a) (__blsr_u32((a))) + +/// Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ANDN instruction. +/// +/// \param __X +/// An unsigned integer containing one of the operands. +/// \param __Y +/// An unsigned integer containing one of the operands. +/// \returns An unsigned integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__andn_u32(unsigned int __X, unsigned int __Y) +{ + return ~__X & __Y; +} + +/* AMD-specified, double-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify which bits are extracted. Bits [7:0] +/// specify the index of the least significant bit. Bits [15:8] specify the +/// number of bits to be extracted. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. +/// \see _bextr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__bextr_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bextr_u32(__X, __Y); +} + +/* Intel-specified, single-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify the index of the least significant +/// bit for the bits to be extracted. Bits [7:0] specify the index. +/// \param __Z +/// An unsigned integer used to specify the number of bits to be extracted. +/// Bits [7:0] specify the number of bits. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z) +{ + return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); +} + +/* Intel-specified, single-leading-underscore version of BEXTR2 */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify which bits are extracted. Bits [7:0] +/// specify the index of the least significant bit. Bits [15:8] specify the +/// number of bits to be extracted. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bextr2_u32(unsigned int __X, unsigned int __Y) { + return __builtin_ia32_bextr_u32(__X, __Y); +} + +/// Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSI instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be cleared. +/// \returns An unsigned integer containing the result of clearing the bits from +/// the source operand. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsi_u32(unsigned int __X) +{ + return __X & -__X; +} + +/// Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSMSK instruction. +/// +/// \param __X +/// An unsigned integer used to create the mask. +/// \returns An unsigned integer containing the newly created mask. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsmsk_u32(unsigned int __X) +{ + return __X ^ (__X - 1); +} + +/// Clears the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSR instruction. +/// +/// \param __X +/// An unsigned integer containing the operand to be cleared. +/// \returns An unsigned integer containing the result of clearing the source +/// operand. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsr_u32(unsigned int __X) +{ + return __X & (__X - 1); +} + +#ifdef __x86_64__ + +#define _andn_u64(a, b) (__andn_u64((a), (b))) + +/* _bextr_u64 != __bextr_u64 */ +#define _blsi_u64(a) (__blsi_u64((a))) + +#define _blsmsk_u64(a) (__blsmsk_u64((a))) + +#define _blsr_u64(a) (__blsr_u64((a))) + +/// Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ANDN instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing one of the operands. +/// \param __Y +/// An unsigned 64-bit integer containing one of the operands. +/// \returns An unsigned 64-bit integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__andn_u64 (unsigned long long __X, unsigned long long __Y) +{ + return ~__X & __Y; +} + +/* AMD-specified, double-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned 64-bit integer used to specify which bits are extracted. Bits +/// [7:0] specify the index of the least significant bit. Bits [15:8] specify +/// the number of bits to be extracted. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. +/// \see _bextr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__bextr_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bextr_u64(__X, __Y); +} + +/* Intel-specified, single-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify the index of the least significant +/// bit for the bits to be extracted. Bits [7:0] specify the index. +/// \param __Z +/// An unsigned integer used to specify the number of bits to be extracted. +/// Bits [7:0] specify the number of bits. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z) +{ + return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); +} + +/* Intel-specified, single-leading-underscore version of BEXTR2 */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned 64-bit integer used to specify which bits are extracted. Bits +/// [7:0] specify the index of the least significant bit. Bits [15:8] specify +/// the number of bits to be extracted. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bextr2_u64(unsigned long long __X, unsigned long long __Y) { + return __builtin_ia32_bextr_u64(__X, __Y); +} + +/// Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSI instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// bits from the source operand. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsi_u64(unsigned long long __X) +{ + return __X & -__X; +} + +/// Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSMSK instruction. +/// +/// \param __X +/// An unsigned 64-bit integer used to create the mask. +/// \returns An unsigned 64-bit integer containing the newly created mask. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsmsk_u64(unsigned long long __X) +{ + return __X ^ (__X - 1); +} + +/// Clears the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing the operand to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// source operand. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsr_u64(unsigned long long __X) +{ + return __X & (__X - 1); +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ + || defined(__BMI__) */ + +#endif /* __BMIINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/builtins.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/builtins.h new file mode 100644 index 0000000..6509586 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/builtins.h @@ -0,0 +1,16 @@ +/*===---- builtins.h - Standard header for extra builtins -----------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +/// Some legacy compilers have builtin definitions in a file named builtins.h. +/// This header file has been added to allow compatibility with code that was +/// written for those compilers. Code may have an include line for this file +/// and to avoid an error an empty file with this name is provided. +#ifndef __BUILTINS_H +#define __BUILTINS_H + +#endif /* __BUILTINS_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cet.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cet.h new file mode 100644 index 0000000..ffb19de --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cet.h @@ -0,0 +1,66 @@ +/*===------ cet.h -Control-flow Enforcement Technology feature ------------=== + * Add x86 feature with IBT and/or SHSTK bits to ELF program property if they + * are enabled. Otherwise, contents in this header file are unused. This file + * is mainly design for assembly source code which want to enable CET. + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CET_H +#define __CET_H + +#ifdef __ASSEMBLER__ + +#ifndef __CET__ +# define _CET_ENDBR +#endif + +#ifdef __CET__ + +# ifdef __LP64__ +# if __CET__ & 0x1 +# define _CET_ENDBR endbr64 +# else +# define _CET_ENDBR +# endif +# else +# if __CET__ & 0x1 +# define _CET_ENDBR endbr32 +# else +# define _CET_ENDBR +# endif +# endif + + +# ifdef __LP64__ +# define __PROPERTY_ALIGN 3 +# else +# define __PROPERTY_ALIGN 2 +# endif + + .pushsection ".note.gnu.property", "a" + .p2align __PROPERTY_ALIGN + .long 1f - 0f /* name length. */ + .long 4f - 1f /* data length. */ + /* NT_GNU_PROPERTY_TYPE_0. */ + .long 5 /* note type. */ +0: + .asciz "GNU" /* vendor name. */ +1: + .p2align __PROPERTY_ALIGN + /* GNU_PROPERTY_X86_FEATURE_1_AND. */ + .long 0xc0000002 /* pr_type. */ + .long 3f - 2f /* pr_datasz. */ +2: + /* GNU_PROPERTY_X86_FEATURE_1_XXX. */ + .long __CET__ +3: + .p2align __PROPERTY_ALIGN +4: + .popsection +#endif +#endif +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cetintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cetintrin.h new file mode 100644 index 0000000..4290e9d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cetintrin.h @@ -0,0 +1,99 @@ +/*===---- cetintrin.h - CET intrinsic --------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CETINTRIN_H +#define __CETINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("shstk"))) + +static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) { + __builtin_ia32_incsspd(__a); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _incsspq(unsigned long long __a) { + __builtin_ia32_incsspq(__a); +} +#endif /* __x86_64__ */ + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) { + __builtin_ia32_incsspq(__a); +} +#else /* __x86_64__ */ +static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) { + __builtin_ia32_incsspd((int)__a); +} +#endif /* __x86_64__ */ + +static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) { + return __builtin_ia32_rdsspd(__a); +} + +#ifdef __x86_64__ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) { + return __builtin_ia32_rdsspq(__a); +} +#endif /* __x86_64__ */ + +#ifdef __x86_64__ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS _get_ssp(void) { + return __builtin_ia32_rdsspq(0); +} +#else /* __x86_64__ */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) { + return __builtin_ia32_rdsspd(0); +} +#endif /* __x86_64__ */ + +static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() { + __builtin_ia32_saveprevssp(); +} + +static __inline__ void __DEFAULT_FN_ATTRS _rstorssp(void * __p) { + __builtin_ia32_rstorssp(__p); +} + +static __inline__ void __DEFAULT_FN_ATTRS _wrssd(unsigned int __a, void * __p) { + __builtin_ia32_wrssd(__a, __p); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _wrssq(unsigned long long __a, void * __p) { + __builtin_ia32_wrssq(__a, __p); +} +#endif /* __x86_64__ */ + +static __inline__ void __DEFAULT_FN_ATTRS _wrussd(unsigned int __a, void * __p) { + __builtin_ia32_wrussd(__a, __p); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void * __p) { + __builtin_ia32_wrussq(__a, __p); +} +#endif /* __x86_64__ */ + +static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() { + __builtin_ia32_setssbsy(); +} + +static __inline__ void __DEFAULT_FN_ATTRS _clrssbsy(void * __p) { + __builtin_ia32_clrssbsy(__p); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __CETINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cldemoteintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cldemoteintrin.h new file mode 100644 index 0000000..cfb951c1 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cldemoteintrin.h @@ -0,0 +1,36 @@ +/*===---- cldemoteintrin.h - CLDEMOTE intrinsic ----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLDEMOTEINTRIN_H +#define __CLDEMOTEINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("cldemote"))) + +/// Hint to hardware that the cache line that contains \p __P should be demoted +/// from the cache closest to the processor core to a level more distant from +/// the processor core. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLDEMOTE instruction. +static __inline__ void __DEFAULT_FN_ATTRS +_cldemote(const void * __P) { + __builtin_ia32_cldemote(__P); +} + +#define _mm_cldemote(p) _cldemote(p) +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clflushoptintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clflushoptintrin.h new file mode 100644 index 0000000..060eb36 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clflushoptintrin.h @@ -0,0 +1,27 @@ +/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLFLUSHOPTINTRIN_H +#define __CLFLUSHOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clflushopt(void const * __m) { + __builtin_ia32_clflushopt(__m); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clwbintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clwbintrin.h new file mode 100644 index 0000000..3360d20 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clwbintrin.h @@ -0,0 +1,38 @@ +/*===---- clwbintrin.h - CLWB intrinsic ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLWBINTRIN_H +#define __CLWBINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clwb"))) + +/// Writes back to memory the cache line (if modified) that contains the +/// linear address specified in \a __p from any level of the cache hierarchy in +/// the cache coherence domain +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLWB instruction. +/// +/// \param __p +/// A pointer to the memory location used to identify the cache line to be +/// written back. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clwb(void const *__p) { + __builtin_ia32_clwb(__p); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clzerointrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clzerointrin.h new file mode 100644 index 0000000..a180984 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/clzerointrin.h @@ -0,0 +1,36 @@ +/*===----------------------- clzerointrin.h - CLZERO ----------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLZEROINTRIN_H +#define __CLZEROINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("clzero"))) + +/// Loads the cache line address and zero's out the cacheline +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLZERO instruction. +/// +/// \param __line +/// A pointer to a cacheline which needs to be zeroed out. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clzero (void * __line) +{ + __builtin_ia32_clzero ((void *)__line); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __CLZEROINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cpuid.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cpuid.h new file mode 100644 index 0000000..34f0e76 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cpuid.h @@ -0,0 +1,319 @@ +/*===---- cpuid.h - X86 cpu model detection --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CPUID_H +#define __CPUID_H + +#if !(__x86_64__ || __i386__) +#error this header is for x86 only +#endif + +/* Responses identification request with %eax 0 */ +/* AMD: "AuthenticAMD" */ +#define signature_AMD_ebx 0x68747541 +#define signature_AMD_edx 0x69746e65 +#define signature_AMD_ecx 0x444d4163 +/* CENTAUR: "CentaurHauls" */ +#define signature_CENTAUR_ebx 0x746e6543 +#define signature_CENTAUR_edx 0x48727561 +#define signature_CENTAUR_ecx 0x736c7561 +/* CYRIX: "CyrixInstead" */ +#define signature_CYRIX_ebx 0x69727943 +#define signature_CYRIX_edx 0x736e4978 +#define signature_CYRIX_ecx 0x64616574 +/* HYGON: "HygonGenuine" */ +#define signature_HYGON_ebx 0x6f677948 +#define signature_HYGON_edx 0x6e65476e +#define signature_HYGON_ecx 0x656e6975 +/* INTEL: "GenuineIntel" */ +#define signature_INTEL_ebx 0x756e6547 +#define signature_INTEL_edx 0x49656e69 +#define signature_INTEL_ecx 0x6c65746e +/* TM1: "TransmetaCPU" */ +#define signature_TM1_ebx 0x6e617254 +#define signature_TM1_edx 0x74656d73 +#define signature_TM1_ecx 0x55504361 +/* TM2: "GenuineTMx86" */ +#define signature_TM2_ebx 0x756e6547 +#define signature_TM2_edx 0x54656e69 +#define signature_TM2_ecx 0x3638784d +/* NSC: "Geode by NSC" */ +#define signature_NSC_ebx 0x646f6547 +#define signature_NSC_edx 0x79622065 +#define signature_NSC_ecx 0x43534e20 +/* NEXGEN: "NexGenDriven" */ +#define signature_NEXGEN_ebx 0x4778654e +#define signature_NEXGEN_edx 0x72446e65 +#define signature_NEXGEN_ecx 0x6e657669 +/* RISE: "RiseRiseRise" */ +#define signature_RISE_ebx 0x65736952 +#define signature_RISE_edx 0x65736952 +#define signature_RISE_ecx 0x65736952 +/* SIS: "SiS SiS SiS " */ +#define signature_SIS_ebx 0x20536953 +#define signature_SIS_edx 0x20536953 +#define signature_SIS_ecx 0x20536953 +/* UMC: "UMC UMC UMC " */ +#define signature_UMC_ebx 0x20434d55 +#define signature_UMC_edx 0x20434d55 +#define signature_UMC_ecx 0x20434d55 +/* VIA: "VIA VIA VIA " */ +#define signature_VIA_ebx 0x20414956 +#define signature_VIA_edx 0x20414956 +#define signature_VIA_ecx 0x20414956 +/* VORTEX: "Vortex86 SoC" */ +#define signature_VORTEX_ebx 0x74726f56 +#define signature_VORTEX_edx 0x36387865 +#define signature_VORTEX_ecx 0x436f5320 + +/* Features in %ecx for leaf 1 */ +#define bit_SSE3 0x00000001 +#define bit_PCLMULQDQ 0x00000002 +#define bit_PCLMUL bit_PCLMULQDQ /* for gcc compat */ +#define bit_DTES64 0x00000004 +#define bit_MONITOR 0x00000008 +#define bit_DSCPL 0x00000010 +#define bit_VMX 0x00000020 +#define bit_SMX 0x00000040 +#define bit_EIST 0x00000080 +#define bit_TM2 0x00000100 +#define bit_SSSE3 0x00000200 +#define bit_CNXTID 0x00000400 +#define bit_FMA 0x00001000 +#define bit_CMPXCHG16B 0x00002000 +#define bit_xTPR 0x00004000 +#define bit_PDCM 0x00008000 +#define bit_PCID 0x00020000 +#define bit_DCA 0x00040000 +#define bit_SSE41 0x00080000 +#define bit_SSE4_1 bit_SSE41 /* for gcc compat */ +#define bit_SSE42 0x00100000 +#define bit_SSE4_2 bit_SSE42 /* for gcc compat */ +#define bit_x2APIC 0x00200000 +#define bit_MOVBE 0x00400000 +#define bit_POPCNT 0x00800000 +#define bit_TSCDeadline 0x01000000 +#define bit_AESNI 0x02000000 +#define bit_AES bit_AESNI /* for gcc compat */ +#define bit_XSAVE 0x04000000 +#define bit_OSXSAVE 0x08000000 +#define bit_AVX 0x10000000 +#define bit_F16C 0x20000000 +#define bit_RDRND 0x40000000 + +/* Features in %edx for leaf 1 */ +#define bit_FPU 0x00000001 +#define bit_VME 0x00000002 +#define bit_DE 0x00000004 +#define bit_PSE 0x00000008 +#define bit_TSC 0x00000010 +#define bit_MSR 0x00000020 +#define bit_PAE 0x00000040 +#define bit_MCE 0x00000080 +#define bit_CX8 0x00000100 +#define bit_CMPXCHG8B bit_CX8 /* for gcc compat */ +#define bit_APIC 0x00000200 +#define bit_SEP 0x00000800 +#define bit_MTRR 0x00001000 +#define bit_PGE 0x00002000 +#define bit_MCA 0x00004000 +#define bit_CMOV 0x00008000 +#define bit_PAT 0x00010000 +#define bit_PSE36 0x00020000 +#define bit_PSN 0x00040000 +#define bit_CLFSH 0x00080000 +#define bit_DS 0x00200000 +#define bit_ACPI 0x00400000 +#define bit_MMX 0x00800000 +#define bit_FXSR 0x01000000 +#define bit_FXSAVE bit_FXSR /* for gcc compat */ +#define bit_SSE 0x02000000 +#define bit_SSE2 0x04000000 +#define bit_SS 0x08000000 +#define bit_HTT 0x10000000 +#define bit_TM 0x20000000 +#define bit_PBE 0x80000000 + +/* Features in %ebx for leaf 7 sub-leaf 0 */ +#define bit_FSGSBASE 0x00000001 +#define bit_SGX 0x00000004 +#define bit_BMI 0x00000008 +#define bit_HLE 0x00000010 +#define bit_AVX2 0x00000020 +#define bit_SMEP 0x00000080 +#define bit_BMI2 0x00000100 +#define bit_ENH_MOVSB 0x00000200 +#define bit_INVPCID 0x00000400 +#define bit_RTM 0x00000800 +#define bit_MPX 0x00004000 +#define bit_AVX512F 0x00010000 +#define bit_AVX512DQ 0x00020000 +#define bit_RDSEED 0x00040000 +#define bit_ADX 0x00080000 +#define bit_AVX512IFMA 0x00200000 +#define bit_CLFLUSHOPT 0x00800000 +#define bit_CLWB 0x01000000 +#define bit_AVX512PF 0x04000000 +#define bit_AVX512ER 0x08000000 +#define bit_AVX512CD 0x10000000 +#define bit_SHA 0x20000000 +#define bit_AVX512BW 0x40000000 +#define bit_AVX512VL 0x80000000 + +/* Features in %ecx for leaf 7 sub-leaf 0 */ +#define bit_PREFTCHWT1 0x00000001 +#define bit_AVX512VBMI 0x00000002 +#define bit_PKU 0x00000004 +#define bit_OSPKE 0x00000010 +#define bit_WAITPKG 0x00000020 +#define bit_AVX512VBMI2 0x00000040 +#define bit_SHSTK 0x00000080 +#define bit_GFNI 0x00000100 +#define bit_VAES 0x00000200 +#define bit_VPCLMULQDQ 0x00000400 +#define bit_AVX512VNNI 0x00000800 +#define bit_AVX512BITALG 0x00001000 +#define bit_AVX512VPOPCNTDQ 0x00004000 +#define bit_RDPID 0x00400000 +#define bit_CLDEMOTE 0x02000000 +#define bit_MOVDIRI 0x08000000 +#define bit_MOVDIR64B 0x10000000 +#define bit_ENQCMD 0x20000000 + +/* Features in %edx for leaf 7 sub-leaf 0 */ +#define bit_AVX5124VNNIW 0x00000004 +#define bit_AVX5124FMAPS 0x00000008 +#define bit_UINTR 0x00000020 +#define bit_SERIALIZE 0x00004000 +#define bit_TSXLDTRK 0x00010000 +#define bit_PCONFIG 0x00040000 +#define bit_IBT 0x00100000 +#define bit_AMXBF16 0x00400000 +#define bit_AMXTILE 0x01000000 +#define bit_AMXINT8 0x02000000 + +/* Features in %eax for leaf 7 sub-leaf 1 */ +#define bit_AVXVNNI 0x00000008 +#define bit_AVX512BF16 0x00000020 +#define bit_HRESET 0x00400000 + +/* Features in %eax for leaf 13 sub-leaf 1 */ +#define bit_XSAVEOPT 0x00000001 +#define bit_XSAVEC 0x00000002 +#define bit_XSAVES 0x00000008 + +/* Features in %eax for leaf 0x14 sub-leaf 0 */ +#define bit_PTWRITE 0x00000010 + +/* Features in %ecx for leaf 0x80000001 */ +#define bit_LAHF_LM 0x00000001 +#define bit_ABM 0x00000020 +#define bit_LZCNT bit_ABM /* for gcc compat */ +#define bit_SSE4a 0x00000040 +#define bit_PRFCHW 0x00000100 +#define bit_XOP 0x00000800 +#define bit_LWP 0x00008000 +#define bit_FMA4 0x00010000 +#define bit_TBM 0x00200000 +#define bit_MWAITX 0x20000000 + +/* Features in %edx for leaf 0x80000001 */ +#define bit_MMXEXT 0x00400000 +#define bit_LM 0x20000000 +#define bit_3DNOWP 0x40000000 +#define bit_3DNOW 0x80000000 + +/* Features in %ebx for leaf 0x80000008 */ +#define bit_CLZERO 0x00000001 +#define bit_WBNOINVD 0x00000200 + + +#if __i386__ +#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \ + __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \ + : "0"(__leaf)) + +#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \ + __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \ + : "0"(__leaf), "2"(__count)) +#else +/* x86-64 uses %rbx as the base register, so preserve it. */ +#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \ + __asm(" xchgq %%rbx,%q1\n" \ + " cpuid\n" \ + " xchgq %%rbx,%q1" \ + : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \ + : "0"(__leaf)) + +#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \ + __asm(" xchgq %%rbx,%q1\n" \ + " cpuid\n" \ + " xchgq %%rbx,%q1" \ + : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \ + : "0"(__leaf), "2"(__count)) +#endif + +static __inline int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig) +{ + unsigned int __eax, __ebx, __ecx, __edx; +#if __i386__ + int __cpuid_supported; + + __asm(" pushfl\n" + " popl %%eax\n" + " movl %%eax,%%ecx\n" + " xorl $0x00200000,%%eax\n" + " pushl %%eax\n" + " popfl\n" + " pushfl\n" + " popl %%eax\n" + " movl $0,%0\n" + " cmpl %%eax,%%ecx\n" + " je 1f\n" + " movl $1,%0\n" + "1:" + : "=r" (__cpuid_supported) : : "eax", "ecx"); + if (!__cpuid_supported) + return 0; +#endif + + __cpuid(__leaf, __eax, __ebx, __ecx, __edx); + if (__sig) + *__sig = __ebx; + return __eax; +} + +static __inline int __get_cpuid (unsigned int __leaf, unsigned int *__eax, + unsigned int *__ebx, unsigned int *__ecx, + unsigned int *__edx) +{ + unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0); + + if (__max_leaf == 0 || __max_leaf < __leaf) + return 0; + + __cpuid(__leaf, *__eax, *__ebx, *__ecx, *__edx); + return 1; +} + +static __inline int __get_cpuid_count (unsigned int __leaf, + unsigned int __subleaf, + unsigned int *__eax, unsigned int *__ebx, + unsigned int *__ecx, unsigned int *__edx) +{ + unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0); + + if (__max_leaf == 0 || __max_leaf < __leaf) + return 0; + + __cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx); + return 1; +} + +#endif /* __CPUID_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/algorithm b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/algorithm new file mode 100644 index 0000000..f14a0b0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/algorithm @@ -0,0 +1,116 @@ +/*===---- algorithm - CUDA wrapper for -------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_ALGORITHM +#define __CLANG_CUDA_WRAPPERS_ALGORITHM + +// This header defines __device__ overloads of std::min/max. +// +// Ideally we'd declare these functions only if we're <= C++11. In C++14, +// these functions are constexpr, and so are implicitly __host__ __device__. +// +// However, the compiler being in C++14 mode does not imply that the standard +// library supports C++14. There is no macro we can test to check that the +// stdlib has constexpr std::min/max. Thus we have to unconditionally define +// our device overloads. +// +// A host+device function cannot be overloaded, and a constexpr function +// implicitly become host device if there's no explicitly host or device +// overload preceding it. So the simple thing to do would be to declare our +// device min/max overloads, and then #include_next . This way our +// device overloads would come first, and so if we have a C++14 stdlib, its +// min/max won't become host+device and conflict with our device overloads. +// +// But that also doesn't work. libstdc++ is evil and declares std::min/max in +// an internal header that is included *before* . Thus by the time +// we're inside of this file, std::min/max may already have been declared, and +// thus we can't prevent them from becoming host+device if they're constexpr. +// +// Therefore we perpetrate the following hack: We mark our __device__ overloads +// with __attribute__((enable_if(true, ""))). This causes the signature of the +// function to change without changing anything else about it. (Except that +// overload resolution will prefer it over the __host__ __device__ version +// rather than considering them equally good). + +#include_next + +// We need to define these overloads in exactly the namespace our standard +// library uses (including the right inline namespace), otherwise they won't be +// picked up by other functions in the standard library (e.g. functions in +// ). Thus the ugliness below. +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif +#endif + +#pragma push_macro("_CPP14_CONSTEXPR") +#if __cplusplus >= 201402L +#define _CPP14_CONSTEXPR constexpr +#else +#define _CPP14_CONSTEXPR +#endif + +template +__attribute__((enable_if(true, ""))) +inline _CPP14_CONSTEXPR __host__ __device__ const __T & +max(const __T &__a, const __T &__b, __Cmp __cmp) { + return __cmp(__a, __b) ? __b : __a; +} + +template +__attribute__((enable_if(true, ""))) +inline _CPP14_CONSTEXPR __host__ __device__ const __T & +max(const __T &__a, const __T &__b) { + return __a < __b ? __b : __a; +} + +template +__attribute__((enable_if(true, ""))) +inline _CPP14_CONSTEXPR __host__ __device__ const __T & +min(const __T &__a, const __T &__b, __Cmp __cmp) { + return __cmp(__b, __a) ? __b : __a; +} + +template +__attribute__((enable_if(true, ""))) +inline _CPP14_CONSTEXPR __host__ __device__ const __T & +min(const __T &__a, const __T &__b) { + return __a < __b ? __a : __b; +} + +#pragma pop_macro("_CPP14_CONSTEXPR") + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif +} // namespace std +#endif + +#endif // __CLANG_CUDA_WRAPPERS_ALGORITHM diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/complex b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/complex new file mode 100644 index 0000000..e6805b6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/complex @@ -0,0 +1,90 @@ +/*===---- complex - CUDA wrapper for ------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_COMPLEX +#define __CLANG_CUDA_WRAPPERS_COMPLEX + +// Wrapper around that forces its functions to be __host__ +// __device__. + +// First, include host-only headers we think are likely to be included by +// , so that the pragma below only applies to itself. +#if __cplusplus >= 201103L +#include +#endif +#include +#include +#include + +// Next, include our wrapper, to ensure that device overloads of +// std::min/max are available. +#include + +#pragma clang force_cuda_host_device begin + +// When compiling for device, ask libstdc++ to use its own implements of +// complex functions, rather than calling builtins (which resolve to library +// functions that don't exist when compiling CUDA device code). +// +// This is a little dicey, because it causes libstdc++ to define a different +// set of overloads on host and device. +// +// // Present only when compiling for host. +// __host__ __device__ void complex sin(const complex& x) { +// return __builtin_csinf(x); +// } +// +// // Present when compiling for host and for device. +// template +// void __host__ __device__ complex sin(const complex& x) { +// return complex(sin(x.real()) * cosh(x.imag()), +// cos(x.real()), sinh(x.imag())); +// } +// +// This is safe because when compiling for device, all function calls in +// __host__ code to sin() will still resolve to *something*, even if they don't +// resolve to the same function as they resolve to when compiling for host. We +// don't care that they don't resolve to the right function because we won't +// codegen this host code when compiling for device. + +#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX") +#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX_TR1") +#define _GLIBCXX_USE_C99_COMPLEX 0 +#define _GLIBCXX_USE_C99_COMPLEX_TR1 0 + +// Work around a compatibility issue with libstdc++ 11.1.0 +// https://bugs.llvm.org/show_bug.cgi?id=50383 +#pragma push_macro("__failed_assertion") +#if _GLIBCXX_RELEASE == 11 +#define __failed_assertion __cuda_failed_assertion +#endif + +#include_next + +#pragma pop_macro("__failed_assertion") +#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX_TR1") +#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX") + +#pragma clang force_cuda_host_device end + +#endif // include guard diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/new b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/new new file mode 100644 index 0000000..d5fb3b7 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/cuda_wrappers/new @@ -0,0 +1,106 @@ +/*===---- new - CUDA wrapper for -------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_NEW +#define __CLANG_CUDA_WRAPPERS_NEW + +#include_next + +#if !defined(__device__) +// The header has been included too early from the standard C++ library +// and CUDA-specific macros are not available yet. +// Undo the include guard and try again later. +#undef __CLANG_CUDA_WRAPPERS_NEW +#else + +#pragma push_macro("CUDA_NOEXCEPT") +#if __cplusplus >= 201103L +#define CUDA_NOEXCEPT noexcept +#else +#define CUDA_NOEXCEPT +#endif + +// Device overrides for non-placement new and delete. +__device__ inline void *operator new(__SIZE_TYPE__ size) { + if (size == 0) { + size = 1; + } + return ::malloc(size); +} +__device__ inline void *operator new(__SIZE_TYPE__ size, + const std::nothrow_t &) CUDA_NOEXCEPT { + return ::operator new(size); +} + +__device__ inline void *operator new[](__SIZE_TYPE__ size) { + return ::operator new(size); +} +__device__ inline void *operator new[](__SIZE_TYPE__ size, + const std::nothrow_t &) { + return ::operator new(size); +} + +__device__ inline void operator delete(void* ptr) CUDA_NOEXCEPT { + if (ptr) { + ::free(ptr); + } +} +__device__ inline void operator delete(void *ptr, + const std::nothrow_t &) CUDA_NOEXCEPT { + ::operator delete(ptr); +} + +__device__ inline void operator delete[](void* ptr) CUDA_NOEXCEPT { + ::operator delete(ptr); +} +__device__ inline void operator delete[](void *ptr, + const std::nothrow_t &) CUDA_NOEXCEPT { + ::operator delete(ptr); +} + +// Sized delete, C++14 only. +#if __cplusplus >= 201402L +__device__ inline void operator delete(void *ptr, + __SIZE_TYPE__ size) CUDA_NOEXCEPT { + ::operator delete(ptr); +} +__device__ inline void operator delete[](void *ptr, + __SIZE_TYPE__ size) CUDA_NOEXCEPT { + ::operator delete(ptr); +} +#endif + +// Device overrides for placement new and delete. +__device__ inline void *operator new(__SIZE_TYPE__, void *__ptr) CUDA_NOEXCEPT { + return __ptr; +} +__device__ inline void *operator new[](__SIZE_TYPE__, void *__ptr) CUDA_NOEXCEPT { + return __ptr; +} +__device__ inline void operator delete(void *, void *) CUDA_NOEXCEPT {} +__device__ inline void operator delete[](void *, void *) CUDA_NOEXCEPT {} + +#pragma pop_macro("CUDA_NOEXCEPT") + +#endif // __device__ +#endif // include guard diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/emmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/emmintrin.h new file mode 100644 index 0000000..bb75972 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/emmintrin.h @@ -0,0 +1,4981 @@ +/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __EMMINTRIN_H +#define __EMMINTRIN_H + +#include + +typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16))); +typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16))); + +typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1))); +typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1))); + +/* Type defines. */ +typedef double __v2df __attribute__ ((__vector_size__ (16))); +typedef long long __v2di __attribute__ ((__vector_size__ (16))); +typedef short __v8hi __attribute__((__vector_size__(16))); +typedef char __v16qi __attribute__((__vector_size__(16))); + +/* Unsigned types */ +typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16))); +typedef unsigned short __v8hu __attribute__((__vector_size__(16))); +typedef unsigned char __v16qu __attribute__((__vector_size__(16))); + +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v16qs __attribute__((__vector_size__(16))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64))) + +/// Adds lower double-precision values in both operands and returns the +/// sum in the lower 64 bits of the result. The upper 64 bits of the result +/// are copied from the upper double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSD / ADDSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// sum of the lower 64 bits of both operands. The upper 64 bits are copied +/// from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_add_sd(__m128d __a, __m128d __b) +{ + __a[0] += __b[0]; + return __a; +} + +/// Adds two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD / ADDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the sums of both +/// operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_add_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2df)__a + (__v2df)__b); +} + +/// Subtracts the lower double-precision value of the second operand +/// from the lower double-precision value of the first operand and returns +/// the difference in the lower 64 bits of the result. The upper 64 bits of +/// the result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBSD / SUBSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the minuend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// difference of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_sub_sd(__m128d __a, __m128d __b) +{ + __a[0] -= __b[0]; + return __a; +} + +/// Subtracts two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD / SUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the minuend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the differences between +/// both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_sub_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2df)__a - (__v2df)__b); +} + +/// Multiplies lower double-precision values in both operands and returns +/// the product in the lower 64 bits of the result. The upper 64 bits of the +/// result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULSD / MULSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// product of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mul_sd(__m128d __a, __m128d __b) +{ + __a[0] *= __b[0]; + return __a; +} + +/// Multiplies two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPD / MULPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the products of both +/// operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mul_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2df)__a * (__v2df)__b); +} + +/// Divides the lower double-precision value of the first operand by the +/// lower double-precision value of the second operand and returns the +/// quotient in the lower 64 bits of the result. The upper 64 bits of the +/// result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVSD / DIVSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the dividend. +/// \param __b +/// A 128-bit vector of [2 x double] containing divisor. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// quotient of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_div_sd(__m128d __a, __m128d __b) +{ + __a[0] /= __b[0]; + return __a; +} + +/// Performs an element-by-element division of two 128-bit vectors of +/// [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPD / DIVPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the dividend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the divisor. +/// \returns A 128-bit vector of [2 x double] containing the quotients of both +/// operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_div_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2df)__a / (__v2df)__b); +} + +/// Calculates the square root of the lower double-precision value of +/// the second operand and returns it in the lower 64 bits of the result. +/// The upper 64 bits of the result are copied from the upper +/// double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTSD / SQRTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// upper 64 bits of this operand are copied to the upper 64 bits of the +/// result. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// square root is calculated using the lower 64 bits of this operand. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// square root of the lower 64 bits of operand \a __b, and whose upper 64 +/// bits are copied from the upper 64 bits of operand \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_sqrt_sd(__m128d __a, __m128d __b) +{ + __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b); + return __extension__ (__m128d) { __c[0], __a[1] }; +} + +/// Calculates the square root of the each of two values stored in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPD / SQRTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [2 x double] containing the square roots of the +/// values in the operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_sqrt_pd(__m128d __a) +{ + return __builtin_ia32_sqrtpd((__v2df)__a); +} + +/// Compares lower 64-bit double-precision values of both operands, and +/// returns the lesser of the pair of values in the lower 64-bits of the +/// result. The upper 64 bits of the result are copied from the upper +/// double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINSD / MINSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// minimum value between both operands. The upper 64 bits are copied from +/// the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_min_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b); +} + +/// Performs element-by-element comparison of the two 128-bit vectors of +/// [2 x double] and returns the vector containing the lesser of each pair of +/// values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPD / MINPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the minimum values +/// between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_min_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b); +} + +/// Compares lower 64-bit double-precision values of both operands, and +/// returns the greater of the pair of values in the lower 64-bits of the +/// result. The upper 64 bits of the result are copied from the upper +/// double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXSD / MAXSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// maximum value between both operands. The upper 64 bits are copied from +/// the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_max_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b); +} + +/// Performs element-by-element comparison of the two 128-bit vectors of +/// [2 x double] and returns the vector containing the greater of each pair +/// of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPD / MAXPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the maximum values +/// between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_max_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAND / PAND instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the +/// values between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_and_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2du)__a & (__v2du)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPANDN / PANDN instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector of [2 x double] containing the right source operand. +/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the +/// values in the second operand and the one's complement of the first +/// operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_andnot_pd(__m128d __a, __m128d __b) +{ + return (__m128d)(~(__v2du)__a & (__v2du)__b); +} + +/// Performs a bitwise OR of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPOR / POR instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the +/// values between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_or_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2du)__a | (__v2du)__b); +} + +/// Performs a bitwise XOR of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPXOR / PXOR instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the +/// values between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_xor_pd(__m128d __a, __m128d __b) +{ + return (__m128d)((__v2du)__a ^ (__v2du)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0x0 +/// for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQPD / CMPEQPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpeq_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are less than those in the second operand. Each comparison +/// yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmplt_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are less than or equal to those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmple_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are greater than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpgt_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are greater than or equal to those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpge_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are ordered with respect to those in the second operand. +/// +/// A pair of double-precision values are "ordered" with respect to each +/// other if neither value is a NaN. Each comparison yields 0x0 for false, +/// 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDPD / CMPORDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpord_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are unordered with respect to those in the second operand. +/// +/// A pair of double-precision values are "unordered" with respect to each +/// other if one or both values are NaN. Each comparison yields 0x0 for +/// false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDPD / CMPUNORDPD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpunord_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are unequal to those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQPD / CMPNEQPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpneq_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not less than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpnlt_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not less than or equal to those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpnle_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not greater than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpngt_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not greater than or equal to those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpnge_pd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQSD / CMPEQSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpeq_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmplt_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmple_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpgt_sd(__m128d __a, __m128d __b) +{ + __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a); + return __extension__ (__m128d) { __c[0], __a[1] }; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpge_sd(__m128d __a, __m128d __b) +{ + __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a); + return __extension__ (__m128d) { __c[0], __a[1] }; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is "ordered" with respect to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are "ordered" with respect to each other if +/// neither value is a NaN. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDSD / CMPORDSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpord_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is "unordered" with respect to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are "unordered" with respect to each other if +/// one or both values are NaN. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDSD / CMPUNORDSD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpunord_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQSD / CMPNEQSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpneq_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not less than the corresponding +/// value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpnlt_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpnle_sd(__m128d __a, __m128d __b) +{ + return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not greater than the corresponding +/// value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpngt_sd(__m128d __a, __m128d __b) +{ + __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a); + return __extension__ (__m128d) { __c[0], __a[1] }; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cmpnge_sd(__m128d __a, __m128d __b) +{ + __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a); + return __extension__ (__m128d) { __c[0], __a[1] }; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comieq_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comilt_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comile_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comigt_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comige_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 1 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comineq_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. The +/// comparison yields 0 for false, 1 for true. +/// +/// If either of the two lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomieq_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two lower +/// double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomilt_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two lower +/// double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomile_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two lower +/// double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomigt_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two +/// lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomige_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. +/// +/// The comparison yields 0 for false, 1 for true. If either of the two lower +/// double-precision values is NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison result. If either of the two +/// lower double-precision values is NaN, 1 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomineq_sd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two single-precision floating-point +/// values, returned in the lower 64 bits of a 128-bit vector of [4 x float]. +/// The upper 64 bits of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2PS / CVTPD2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtpd_ps(__m128d __a) +{ + return __builtin_ia32_cvtpd2ps((__v2df)__a); +} + +/// Converts the lower two single-precision floating-point elements of a +/// 128-bit vector of [4 x float] into two double-precision floating-point +/// values, returned in a 128-bit vector of [2 x double]. The upper two +/// elements of the input vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2PD / CVTPS2PD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower two single-precision +/// floating-point elements are converted to double-precision values. The +/// upper two elements are unused. +/// \returns A 128-bit vector of [2 x double] containing the converted values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtps_pd(__m128 __a) +{ + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df); +} + +/// Converts the lower two integer elements of a 128-bit vector of +/// [4 x i32] into two double-precision floating-point values, returned in a +/// 128-bit vector of [2 x double]. +/// +/// The upper two elements of the input vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PD / CVTDQ2PD instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. The lower two integer elements are +/// converted to double-precision values. +/// +/// The upper two elements are unused. +/// \returns A 128-bit vector of [2 x double] containing the converted values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtepi32_pd(__m128i __a) +{ + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper +/// 64 bits of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2DQ / CVTPD2DQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtpd_epi32(__m128d __a) +{ + return __builtin_ia32_cvtpd2dq((__v2df)__a); +} + +/// Converts the low-order element of a 128-bit vector of [2 x double] +/// into a 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 32-bit signed integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtsd_si32(__m128d __a) +{ + return __builtin_ia32_cvtsd2si((__v2df)__a); +} + +/// Converts the lower double-precision floating-point element of a +/// 128-bit vector of [2 x double], in the second parameter, into a +/// single-precision floating-point value, returned in the lower 32 bits of a +/// 128-bit vector of [4 x float]. The upper 96 bits of the result vector are +/// copied from the upper 96 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SS / CVTSD2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are +/// copied to the upper 96 bits of the result. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision +/// floating-point element is used in the conversion. +/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the +/// converted value from the second parameter. The upper 96 bits are copied +/// from the upper 96 bits of the first parameter. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtsd_ss(__m128 __a, __m128d __b) +{ + return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b); +} + +/// Converts a 32-bit signed integer value, in the second parameter, into +/// a double-precision floating-point value, returned in the lower 64 bits of +/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector +/// are copied from the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are +/// copied to the upper 64 bits of the result. +/// \param __b +/// A 32-bit signed integer containing the value to be converted. +/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the +/// converted value from the second parameter. The upper 64 bits are copied +/// from the upper 64 bits of the first parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtsi32_sd(__m128d __a, int __b) +{ + __a[0] = __b; + return __a; +} + +/// Converts the lower single-precision floating-point element of a +/// 128-bit vector of [4 x float], in the second parameter, into a +/// double-precision floating-point value, returned in the lower 64 bits of +/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector +/// are copied from the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SD / CVTSS2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are +/// copied to the upper 64 bits of the result. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower single-precision +/// floating-point element is used in the conversion. +/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the +/// converted value from the second parameter. The upper 64 bits are copied +/// from the upper 64 bits of the first parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtss_sd(__m128d __a, __m128 __b) +{ + __a[0] = __b[0]; + return __a; +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. +/// +/// If the result of either conversion is inexact, the result is truncated +/// (rounded towards zero) regardless of the current MXCSR setting. The upper +/// 64 bits of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPD2DQ / CVTTPD2DQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttpd_epi32(__m128d __a) +{ + return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a); +} + +/// Converts the low-order element of a [2 x double] vector into a 32-bit +/// signed integer value, truncating the result when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 32-bit signed integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvttsd_si32(__m128d __a) +{ + return __builtin_ia32_cvttsd2si((__v2df)__a); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPD2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 64-bit vector of [2 x i32] containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpd_pi32(__m128d __a) +{ + return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If the result of either conversion is inexact, the result is truncated +/// (rounded towards zero) regardless of the current MXCSR setting. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPD2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 64-bit vector of [2 x i32] containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvttpd_pi32(__m128d __a) +{ + return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a); +} + +/// Converts the two signed 32-bit integer elements of a 64-bit vector of +/// [2 x i32] into two double-precision floating-point values, returned in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. +/// \returns A 128-bit vector of [2 x double] containing the converted values. +static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi32_pd(__m64 __a) +{ + return __builtin_ia32_cvtpi2pd((__v2si)__a); +} + +/// Returns the low-order element of a 128-bit vector of [2 x double] as +/// a double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are returned. +/// \returns A double-precision floating-point value copied from the lower 64 +/// bits of \a __a. +static __inline__ double __DEFAULT_FN_ATTRS +_mm_cvtsd_f64(__m128d __a) +{ + return __a[0]; +} + +/// Loads a 128-bit floating-point vector of [2 x double] from an aligned +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_load_pd(double const *__dp) +{ + return *(const __m128d*)__dp; +} + +/// Loads a double-precision floating-point value from a specified memory +/// location and duplicates it to both vector elements of a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVDDUP instruction. +/// +/// \param __dp +/// A pointer to a memory location containing a double-precision value. +/// \returns A 128-bit vector of [2 x double] containing the loaded and +/// duplicated values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_load1_pd(double const *__dp) +{ + struct __mm_load1_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u; + return __extension__ (__m128d){ __u, __u }; +} + +#define _mm_load_pd1(dp) _mm_load1_pd(dp) + +/// Loads two double-precision values, in reverse order, from an aligned +/// memory location into a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction + +/// needed shuffling instructions. In AVX mode, the shuffling may be combined +/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction. +/// +/// \param __dp +/// A 16-byte aligned pointer to an array of double-precision values to be +/// loaded in reverse order. +/// \returns A 128-bit vector of [2 x double] containing the reversed loaded +/// values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_loadr_pd(double const *__dp) +{ + __m128d __u = *(const __m128d*)__dp; + return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0); +} + +/// Loads a 128-bit floating-point vector of [2 x double] from an +/// unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_loadu_pd(double const *__dp) +{ + struct __loadu_pd { + __m128d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__dp)->__v; +} + +/// Loads a 64-bit integer value to the low element of a 128-bit integer +/// vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A pointer to a 64-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [2 x i64] containing the loaded value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadu_si64(void const *__a) +{ + struct __loadu_si64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + long long __u = ((const struct __loadu_si64*)__a)->__v; + return __extension__ (__m128i)(__v2di){__u, 0LL}; +} + +/// Loads a 32-bit integer value to the low element of a 128-bit integer +/// vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A pointer to a 32-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [4 x i32] containing the loaded value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadu_si32(void const *__a) +{ + struct __loadu_si32 { + int __v; + } __attribute__((__packed__, __may_alias__)); + int __u = ((const struct __loadu_si32*)__a)->__v; + return __extension__ (__m128i)(__v4si){__u, 0, 0, 0}; +} + +/// Loads a 16-bit integer value to the low element of a 128-bit integer +/// vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __a +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [8 x i16] containing the loaded value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadu_si16(void const *__a) +{ + struct __loadu_si16 { + short __v; + } __attribute__((__packed__, __may_alias__)); + short __u = ((const struct __loadu_si16*)__a)->__v; + return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0}; +} + +/// Loads a 64-bit double-precision value to the low element of a +/// 128-bit integer vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSD / MOVSD instruction. +/// +/// \param __dp +/// A pointer to a memory location containing a double-precision value. +/// The address of the memory location does not have to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded value. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_load_sd(double const *__dp) +{ + struct __mm_load_sd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_load_sd_struct*)__dp)->__u; + return __extension__ (__m128d){ __u, 0 }; +} + +/// Loads a double-precision value into the high-order bits of a 128-bit +/// vector of [2 x double]. The low-order bits are copied from the low-order +/// bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the result. +/// \param __dp +/// A pointer to a 64-bit memory location containing a double-precision +/// floating-point value that is loaded. The loaded value is written to bits +/// [127:64] of the result. The address of the memory location does not have +/// to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the moved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_loadh_pd(__m128d __a, double const *__dp) +{ + struct __mm_loadh_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u; + return __extension__ (__m128d){ __a[0], __u }; +} + +/// Loads a double-precision value into the low-order bits of a 128-bit +/// vector of [2 x double]. The high-order bits are copied from the +/// high-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the result. +/// \param __dp +/// A pointer to a 64-bit memory location containing a double-precision +/// floating-point value that is loaded. The loaded value is written to bits +/// [63:0] of the result. The address of the memory location does not have to +/// be aligned. +/// \returns A 128-bit vector of [2 x double] containing the moved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_loadl_pd(__m128d __a, double const *__dp) +{ + struct __mm_loadl_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u; + return __extension__ (__m128d){ __u, __a[1] }; +} + +/// Constructs a 128-bit floating-point vector of [2 x double] with +/// unspecified content. This could be used as an argument to another +/// intrinsic function where the argument is required but the value is not +/// actually used. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit floating-point vector of [2 x double] with unspecified +/// content. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_undefined_pd(void) +{ + return (__m128d)__builtin_ia32_undef128(); +} + +/// Constructs a 128-bit floating-point vector of [2 x double]. The lower +/// 64 bits of the vector are initialized with the specified double-precision +/// floating-point value. The upper 64 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. The +/// lower 64 bits contain the value of the parameter. The upper 64 bits are +/// set to zero. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_set_sd(double __w) +{ + return __extension__ (__m128d){ __w, 0 }; +} + +/// Constructs a 128-bit floating-point vector of [2 x double], with each +/// of the two double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_set1_pd(double __w) +{ + return __extension__ (__m128d){ __w, __w }; +} + +/// Constructs a 128-bit floating-point vector of [2 x double], with each +/// of the two double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_set_pd1(double __w) +{ + return _mm_set1_pd(__w); +} + +/// Constructs a 128-bit floating-point vector of [2 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the upper 64 +/// bits of the result. +/// \param __x +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_set_pd(double __w, double __x) +{ + return __extension__ (__m128d){ __x, __w }; +} + +/// Constructs a 128-bit floating-point vector of [2 x double], +/// initialized in reverse order with the specified double-precision +/// floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \param __x +/// A double-precision floating-point value used to initialize the upper 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_setr_pd(double __w, double __x) +{ + return __extension__ (__m128d){ __w, __x }; +} + +/// Constructs a 128-bit floating-point vector of [2 x double] +/// initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit floating-point vector of [2 x double] with +/// all elements set to zero. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_setzero_pd(void) +{ + return __extension__ (__m128d){ 0, 0 }; +} + +/// Constructs a 128-bit floating-point vector of [2 x double]. The lower +/// 64 bits are set to the lower 64 bits of the second parameter. The upper +/// 64 bits are set to the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits are written to the +/// upper 64 bits of the result. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the +/// lower 64 bits of the result. +/// \returns A 128-bit vector of [2 x double] containing the moved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_move_sd(__m128d __a, __m128d __b) +{ + __a[0] = __b[0]; + return __a; +} + +/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSD / MOVSD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_sd(double *__dp, __m128d __a) +{ + struct __mm_store_sd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_store_sd_struct*)__dp)->__u = __a[0]; +} + +/// Moves packed double-precision values from a 128-bit vector of +/// [2 x double] to a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPS instruction. +/// +/// \param __dp +/// A pointer to an aligned memory location that can store two +/// double-precision values. +/// \param __a +/// A packed 128-bit vector of [2 x double] containing the values to be +/// moved. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_pd(double *__dp, __m128d __a) +{ + *(__m128d*)__dp = __a; +} + +/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to +/// the upper and lower 64 bits of a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the +/// VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS instruction. +/// +/// \param __dp +/// A pointer to a memory location that can store two double-precision +/// values. +/// \param __a +/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each +/// of the values in \a __dp. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store1_pd(double *__dp, __m128d __a) +{ + __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); + _mm_store_pd(__dp, __a); +} + +/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to +/// the upper and lower 64 bits of a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the +/// VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS instruction. +/// +/// \param __dp +/// A pointer to a memory location that can store two double-precision +/// values. +/// \param __a +/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each +/// of the values in \a __dp. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_pd1(double *__dp, __m128d __a) +{ + _mm_store1_pd(__dp, __a); +} + +/// Stores a 128-bit vector of [2 x double] into an unaligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_pd(double *__dp, __m128d __a) +{ + struct __storeu_pd { + __m128d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__dp)->__v = __a; +} + +/// Stores two double-precision values, in reverse order, from a 128-bit +/// vector of [2 x double] to a 16-byte aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to a shuffling instruction followed by a +/// VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 16-byte aligned memory location that can store two +/// double-precision values. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be reversed and +/// stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storer_pd(double *__dp, __m128d __a) +{ + __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0); + *(__m128d *)__dp = __a; +} + +/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeh_pd(double *__dp, __m128d __a) +{ + struct __mm_storeh_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1]; +} + +/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storel_pd(double *__dp, __m128d __a) +{ + struct __mm_storeh_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0]; +} + +/// Adds the corresponding elements of two 128-bit vectors of [16 x i8], +/// saving the lower 8 bits of each sum in the corresponding element of a +/// 128-bit result vector of [16 x i8]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDB / PADDB instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// \param __b +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit vector of [16 x i8] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_add_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)((__v16qu)__a + (__v16qu)__b); +} + +/// Adds the corresponding elements of two 128-bit vectors of [8 x i16], +/// saving the lower 16 bits of each sum in the corresponding element of a +/// 128-bit result vector of [8 x i16]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDW / PADDW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// \returns A 128-bit vector of [8 x i16] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_add_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)((__v8hu)__a + (__v8hu)__b); +} + +/// Adds the corresponding elements of two 128-bit vectors of [4 x i32], +/// saving the lower 32 bits of each sum in the corresponding element of a +/// 128-bit result vector of [4 x i32]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDD / PADDD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \param __b +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_add_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a + (__v4su)__b); +} + +/// Adds two signed or unsigned 64-bit integer values, returning the +/// lower 64 bits of the sum. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDQ instruction. +/// +/// \param __a +/// A 64-bit integer. +/// \param __b +/// A 64-bit integer. +/// \returns A 64-bit integer containing the sum of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_add_si64(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b); +} + +/// Adds the corresponding elements of two 128-bit vectors of [2 x i64], +/// saving the lower 64 bits of each sum in the corresponding element of a +/// 128-bit result vector of [2 x i64]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDQ / PADDQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. +/// \param __b +/// A 128-bit vector of [2 x i64]. +/// \returns A 128-bit vector of [2 x i64] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_add_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a + (__v2du)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// signed [16 x i8] vectors, saving each sum in the corresponding element of +/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are +/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDSB / PADDSB instruction. +/// +/// \param __a +/// A 128-bit signed [16 x i8] vector. +/// \param __b +/// A 128-bit signed [16 x i8] vector. +/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of +/// both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_adds_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// signed [8 x i16] vectors, saving each sum in the corresponding element of +/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF +/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to +/// 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDSW / PADDSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of +/// both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_adds_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// unsigned [16 x i8] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF +/// are saturated to 0xFF. Negative sums are saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums +/// of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_adds_epu8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// unsigned [8 x i16] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [8 x i16]. Positive sums greater than +/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums +/// of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_adds_epu16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b); +} + +/// Computes the rounded averages of corresponding elements of two +/// 128-bit unsigned [16 x i8] vectors, saving each result in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAVGB / PAVGB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded +/// averages of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_avg_epu8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b); +} + +/// Computes the rounded averages of corresponding elements of two +/// 128-bit unsigned [8 x i16] vectors, saving each result in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAVGW / PAVGW instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded +/// averages of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_avg_epu16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies the corresponding elements of two 128-bit signed [8 x i16] +/// vectors, producing eight intermediate 32-bit signed integer products, and +/// adds the consecutive pairs of 32-bit products to form a 128-bit signed +/// [4 x i32] vector. +/// +/// For example, bits [15:0] of both parameters are multiplied producing a +/// 32-bit product, bits [31:16] of both parameters are multiplied producing +/// a 32-bit product, and the sum of those two products becomes bits [31:0] +/// of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMADDWD / PMADDWD instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [4 x i32] vector containing the sums of products +/// of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_madd_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b); +} + +/// Compares corresponding elements of two 128-bit signed [8 x i16] +/// vectors, saving the greater value from each comparison in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSW / PMAXSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the greater value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Compares corresponding elements of two 128-bit unsigned [16 x i8] +/// vectors, saving the greater value from each comparison in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUB / PMAXUB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epu8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b); +} + +/// Compares corresponding elements of two 128-bit signed [8 x i16] +/// vectors, saving the smaller value from each comparison in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSW / PMINSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Compares corresponding elements of two 128-bit unsigned [16 x i8] +/// vectors, saving the smaller value from each comparison in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUB / PMINUB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epu8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b); +} + +/// Multiplies the corresponding elements of two signed [8 x i16] +/// vectors, saving the upper 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit signed [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULHW / PMULHW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of +/// each of the eight 32-bit products. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mulhi_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies the corresponding elements of two unsigned [8 x i16] +/// vectors, saving the upper 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit unsigned [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULHUW / PMULHUW instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits +/// of each of the eight 32-bit products. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mulhi_epu16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies the corresponding elements of two signed [8 x i16] +/// vectors, saving the lower 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit signed [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULLW / PMULLW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of +/// each of the eight 32-bit products. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mullo_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)((__v8hu)__a * (__v8hu)__b); +} + +/// Multiplies 32-bit unsigned integer values contained in the lower bits +/// of the two 64-bit integer vectors and returns the 64-bit unsigned +/// product. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULUDQ instruction. +/// +/// \param __a +/// A 64-bit integer containing one of the source operands. +/// \param __b +/// A 64-bit integer containing one of the source operands. +/// \returns A 64-bit integer vector containing the product of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_mul_su32(__m64 __a, __m64 __b) +{ + return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b); +} + +/// Multiplies 32-bit unsigned integer values contained in the lower +/// bits of the corresponding elements of two [2 x i64] vectors, and returns +/// the 64-bit products in the corresponding elements of a [2 x i64] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULUDQ / PMULUDQ instruction. +/// +/// \param __a +/// A [2 x i64] vector containing one of the source operands. +/// \param __b +/// A [2 x i64] vector containing one of the source operands. +/// \returns A [2 x i64] vector containing the product of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mul_epu32(__m128i __a, __m128i __b) +{ + return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b); +} + +/// Computes the absolute differences of corresponding 8-bit integer +/// values in two 128-bit vectors. Sums the first 8 absolute differences, and +/// separately sums the second 8 absolute differences. Packs these two +/// unsigned 16-bit integer sums into the upper and lower elements of a +/// [2 x i64] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSADBW / PSADBW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A [2 x i64] vector containing the sums of the sets of absolute +/// differences between both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sad_epu8(__m128i __a, __m128i __b) +{ + return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b); +} + +/// Subtracts the corresponding 8-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBB / PSUBB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sub_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)((__v16qu)__a - (__v16qu)__b); +} + +/// Subtracts the corresponding 16-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBW / PSUBW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sub_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)((__v8hu)__a - (__v8hu)__b); +} + +/// Subtracts the corresponding 32-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBD / PSUBD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sub_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a - (__v4su)__b); +} + +/// Subtracts signed or unsigned 64-bit integer values and writes the +/// difference to the corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBQ instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the minuend. +/// \param __b +/// A 64-bit integer vector containing the subtrahend. +/// \returns A 64-bit integer vector containing the difference of the values in +/// the operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sub_si64(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b); +} + +/// Subtracts the corresponding elements of two [2 x i64] vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBQ / PSUBQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sub_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a - (__v2du)__b); +} + +/// Subtracts corresponding 8-bit signed integer values in the input and +/// returns the differences in the corresponding bytes in the destination. +/// Differences greater than 0x7F are saturated to 0x7F, and differences less +/// than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBSB / PSUBSB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_subs_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b); +} + +/// Subtracts corresponding 16-bit signed integer values in the input and +/// returns the differences in the corresponding bytes in the destination. +/// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less +/// than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBSW / PSUBSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_subs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Subtracts corresponding 8-bit unsigned integer values in the input +/// and returns the differences in the corresponding bytes in the +/// destination. Differences less than 0x00 are saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBUSB / PSUBUSB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the unsigned integer +/// differences of the values in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_subs_epu8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b); +} + +/// Subtracts corresponding 16-bit unsigned integer values in the input +/// and returns the differences in the corresponding bytes in the +/// destination. Differences less than 0x0000 are saturated to 0x0000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBUSW / PSUBUSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the unsigned integer +/// differences of the values in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_subs_epu16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b); +} + +/// Performs a bitwise AND of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAND / PAND instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise AND of the values +/// in both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_and_si128(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a & (__v2du)__b); +} + +/// Performs a bitwise AND of two 128-bit integer vectors, using the +/// one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPANDN / PANDN instruction. +/// +/// \param __a +/// A 128-bit vector containing the left source operand. The one's complement +/// of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector containing the right source operand. +/// \returns A 128-bit integer vector containing the bitwise AND of the one's +/// complement of the first operand and the values in the second operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_andnot_si128(__m128i __a, __m128i __b) +{ + return (__m128i)(~(__v2du)__a & (__v2du)__b); +} +/// Performs a bitwise OR of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPOR / POR instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise OR of the values +/// in both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_or_si128(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a | (__v2du)__b); +} + +/// Performs a bitwise exclusive OR of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPXOR / PXOR instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the +/// values in both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_xor_si128(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a ^ (__v2du)__b); +} + +/// Left-shifts the 128-bit integer vector operand by the specified +/// number of bytes. Low-order bits are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_slli_si128(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSLLDQ / PSLLDQ instruction. +/// +/// \param a +/// A 128-bit integer vector containing the source operand. +/// \param imm +/// An immediate value specifying the number of bytes to left-shift operand +/// \a a. +/// \returns A 128-bit integer vector containing the left-shifted value. +#define _mm_slli_si128(a, imm) \ + (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)) + +#define _mm_bslli_si128(a, imm) \ + (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)) + +/// Left-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLW / PSLLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_slli_epi16(__m128i __a, int __count) +{ + return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count); +} + +/// Left-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLW / PSLLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sll_epi16(__m128i __a, __m128i __count) +{ + return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count); +} + +/// Left-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLD / PSLLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_slli_epi32(__m128i __a, int __count) +{ + return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count); +} + +/// Left-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLD / PSLLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sll_epi32(__m128i __a, __m128i __count) +{ + return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count); +} + +/// Left-shifts each 64-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_slli_epi64(__m128i __a, int __count) +{ + return __builtin_ia32_psllqi128((__v2di)__a, __count); +} + +/// Left-shifts each 64-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sll_epi64(__m128i __a, __m128i __count) +{ + return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count); +} + +/// Right-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAW / PSRAW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srai_epi16(__m128i __a, int __count) +{ + return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count); +} + +/// Right-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAW / PSRAW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sra_epi16(__m128i __a, __m128i __count) +{ + return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count); +} + +/// Right-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAD / PSRAD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srai_epi32(__m128i __a, int __count) +{ + return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count); +} + +/// Right-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAD / PSRAD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sra_epi32(__m128i __a, __m128i __count) +{ + return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count); +} + +/// Right-shifts the 128-bit integer vector operand by the specified +/// number of bytes. High-order bits are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_srli_si128(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSRLDQ / PSRLDQ instruction. +/// +/// \param a +/// A 128-bit integer vector containing the source operand. +/// \param imm +/// An immediate value specifying the number of bytes to right-shift operand +/// \a a. +/// \returns A 128-bit integer vector containing the right-shifted value. +#define _mm_srli_si128(a, imm) \ + (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)) + +#define _mm_bsrli_si128(a, imm) \ + (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)) + +/// Right-shifts each of 16-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLW / PSRLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srli_epi16(__m128i __a, int __count) +{ + return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count); +} + +/// Right-shifts each of 16-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLW / PSRLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srl_epi16(__m128i __a, __m128i __count) +{ + return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count); +} + +/// Right-shifts each of 32-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLD / PSRLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srli_epi32(__m128i __a, int __count) +{ + return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count); +} + +/// Right-shifts each of 32-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLD / PSRLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srl_epi32(__m128i __a, __m128i __count) +{ + return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count); +} + +/// Right-shifts each of 64-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srli_epi64(__m128i __a, int __count) +{ + return __builtin_ia32_psrlqi128((__v2di)__a, __count); +} + +/// Right-shifts each of 64-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srl_epi64(__m128i __a, __m128i __count) +{ + return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count); +} + +/// Compares each of the corresponding 8-bit values of the 128-bit +/// integer vectors for equality. Each comparison yields 0x0 for false, 0xFF +/// for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQB / PCMPEQB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpeq_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)((__v16qi)__a == (__v16qi)__b); +} + +/// Compares each of the corresponding 16-bit values of the 128-bit +/// integer vectors for equality. Each comparison yields 0x0 for false, +/// 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQW / PCMPEQW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpeq_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)((__v8hi)__a == (__v8hi)__b); +} + +/// Compares each of the corresponding 32-bit values of the 128-bit +/// integer vectors for equality. Each comparison yields 0x0 for false, +/// 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQD / PCMPEQD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpeq_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4si)__a == (__v4si)__b); +} + +/// Compares each of the corresponding signed 8-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are +/// greater than those in the second operand. Each comparison yields 0x0 for +/// false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpgt_epi8(__m128i __a, __m128i __b) +{ + /* This function always performs a signed comparison, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)((__v16qs)__a > (__v16qs)__b); +} + +/// Compares each of the corresponding signed 16-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are greater than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpgt_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)((__v8hi)__a > (__v8hi)__b); +} + +/// Compares each of the corresponding signed 32-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are greater than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpgt_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4si)__a > (__v4si)__b); +} + +/// Compares each of the corresponding signed 8-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are less +/// than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmplt_epi8(__m128i __a, __m128i __b) +{ + return _mm_cmpgt_epi8(__b, __a); +} + +/// Compares each of the corresponding signed 16-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are less than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmplt_epi16(__m128i __a, __m128i __b) +{ + return _mm_cmpgt_epi16(__b, __a); +} + +/// Compares each of the corresponding signed 32-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are less than those in the second operand. +/// +/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmplt_epi32(__m128i __a, __m128i __b) +{ + return _mm_cmpgt_epi32(__b, __a); +} + +#ifdef __x86_64__ +/// Converts a 64-bit signed integer value from the second operand into a +/// double-precision value and returns it in the lower element of a [2 x +/// double] vector; the upper element of the returned vector is copied from +/// the upper element of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this operand are +/// copied to the upper 64 bits of the destination. +/// \param __b +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// converted value of the second operand. The upper 64 bits are copied from +/// the upper 64 bits of the first operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtsi64_sd(__m128d __a, long long __b) +{ + __a[0] = __b; + return __a; +} + +/// Converts the first (lower) element of a vector of [2 x double] into a +/// 64-bit signed integer value, according to the current rounding mode. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 64-bit signed integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvtsd_si64(__m128d __a) +{ + return __builtin_ia32_cvtsd2si64((__v2df)__a); +} + +/// Converts the first (lower) element of a vector of [2 x double] into a +/// 64-bit signed integer value, truncating the result when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 64-bit signed integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvttsd_si64(__m128d __a) +{ + return __builtin_ia32_cvttsd2si64((__v2df)__a); +} +#endif + +/// Converts a vector of [4 x i32] into a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PS / CVTDQ2PS instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit vector of [4 x float] containing the converted values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtepi32_ps(__m128i __a) +{ + return (__m128)__builtin_convertvector((__v4si)__a, __v4sf); +} + +/// Converts a vector of [4 x float] into a vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2DQ / CVTPS2DQ instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit integer vector of [4 x i32] containing the converted +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtps_epi32(__m128 __a) +{ + return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); +} + +/// Converts a vector of [4 x float] into a vector of [4 x i32], +/// truncating the result when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPS2DQ / CVTTPS2DQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x i32] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttps_epi32(__m128 __a) +{ + return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a); +} + +/// Returns a vector of [4 x i32] where the lowest element is the input +/// operand and the remaining elements are zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A 32-bit signed integer operand. +/// \returns A 128-bit vector of [4 x i32]. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsi32_si128(int __a) +{ + return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 }; +} + +#ifdef __x86_64__ +/// Returns a vector of [2 x i64] where the lower element is the input +/// operand and the upper element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [2 x i64] containing the converted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsi64_si128(long long __a) +{ + return __extension__ (__m128i)(__v2di){ __a, 0 }; +} +#endif + +/// Moves the least significant 32 bits of a vector of [4 x i32] to a +/// 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A vector of [4 x i32]. The least significant 32 bits are moved to the +/// destination. +/// \returns A 32-bit signed integer containing the moved value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtsi128_si32(__m128i __a) +{ + __v4si __b = (__v4si)__a; + return __b[0]; +} + +#ifdef __x86_64__ +/// Moves the least significant 64 bits of a vector of [2 x i64] to a +/// 64-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A vector of [2 x i64]. The least significant 64 bits are moved to the +/// destination. +/// \returns A 64-bit signed integer containing the moved value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvtsi128_si64(__m128i __a) +{ + return __a[0]; +} +#endif + +/// Moves packed integer values from an aligned 128-bit memory location +/// to elements in a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA / MOVDQA instruction. +/// +/// \param __p +/// An aligned pointer to a memory location containing integer values. +/// \returns A 128-bit integer vector containing the moved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_load_si128(__m128i const *__p) +{ + return *__p; +} + +/// Moves packed integer values from an unaligned 128-bit memory location +/// to elements in a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU / MOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location containing integer values. +/// \returns A 128-bit integer vector containing the moved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadu_si128(__m128i_u const *__p) +{ + struct __loadu_si128 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si128*)__p)->__v; +} + +/// Returns a vector of [2 x i64] where the lower element is taken from +/// the lower element of the operand, and the upper element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __p +/// A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of +/// the destination. +/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the +/// moved value. The higher order bits are cleared. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadl_epi64(__m128i_u const *__p) +{ + struct __mm_loadl_epi64_struct { + long long __u; + } __attribute__((__packed__, __may_alias__)); + return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0}; +} + +/// Generates a 128-bit vector of [4 x i32] with unspecified content. +/// This could be used as an argument to another intrinsic function where the +/// argument is required but the value is not actually used. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit vector of [4 x i32] with unspecified content. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_undefined_si128(void) +{ + return (__m128i)__builtin_ia32_undef128(); +} + +/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with +/// the specified 64-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q1 +/// A 64-bit integer value used to initialize the upper 64 bits of the +/// destination vector of [2 x i64]. +/// \param __q0 +/// A 64-bit integer value used to initialize the lower 64 bits of the +/// destination vector of [2 x i64]. +/// \returns An initialized 128-bit vector of [2 x i64] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi64x(long long __q1, long long __q0) +{ + return __extension__ (__m128i)(__v2di){ __q0, __q1 }; +} + +/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with +/// the specified 64-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q1 +/// A 64-bit integer value used to initialize the upper 64 bits of the +/// destination vector of [2 x i64]. +/// \param __q0 +/// A 64-bit integer value used to initialize the lower 64 bits of the +/// destination vector of [2 x i64]. +/// \returns An initialized 128-bit vector of [2 x i64] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi64(__m64 __q1, __m64 __q0) +{ + return _mm_set_epi64x((long long)__q1, (long long)__q0); +} + +/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with +/// the specified 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i3 +/// A 32-bit integer value used to initialize bits [127:96] of the +/// destination vector. +/// \param __i2 +/// A 32-bit integer value used to initialize bits [95:64] of the destination +/// vector. +/// \param __i1 +/// A 32-bit integer value used to initialize bits [63:32] of the destination +/// vector. +/// \param __i0 +/// A 32-bit integer value used to initialize bits [31:0] of the destination +/// vector. +/// \returns An initialized 128-bit vector of [4 x i32] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi32(int __i3, int __i2, int __i1, int __i0) +{ + return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3}; +} + +/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with +/// the specified 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w7 +/// A 16-bit integer value used to initialize bits [127:112] of the +/// destination vector. +/// \param __w6 +/// A 16-bit integer value used to initialize bits [111:96] of the +/// destination vector. +/// \param __w5 +/// A 16-bit integer value used to initialize bits [95:80] of the destination +/// vector. +/// \param __w4 +/// A 16-bit integer value used to initialize bits [79:64] of the destination +/// vector. +/// \param __w3 +/// A 16-bit integer value used to initialize bits [63:48] of the destination +/// vector. +/// \param __w2 +/// A 16-bit integer value used to initialize bits [47:32] of the destination +/// vector. +/// \param __w1 +/// A 16-bit integer value used to initialize bits [31:16] of the destination +/// vector. +/// \param __w0 +/// A 16-bit integer value used to initialize bits [15:0] of the destination +/// vector. +/// \returns An initialized 128-bit vector of [8 x i16] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0) +{ + return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 }; +} + +/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with +/// the specified 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b15 +/// Initializes bits [127:120] of the destination vector. +/// \param __b14 +/// Initializes bits [119:112] of the destination vector. +/// \param __b13 +/// Initializes bits [111:104] of the destination vector. +/// \param __b12 +/// Initializes bits [103:96] of the destination vector. +/// \param __b11 +/// Initializes bits [95:88] of the destination vector. +/// \param __b10 +/// Initializes bits [87:80] of the destination vector. +/// \param __b9 +/// Initializes bits [79:72] of the destination vector. +/// \param __b8 +/// Initializes bits [71:64] of the destination vector. +/// \param __b7 +/// Initializes bits [63:56] of the destination vector. +/// \param __b6 +/// Initializes bits [55:48] of the destination vector. +/// \param __b5 +/// Initializes bits [47:40] of the destination vector. +/// \param __b4 +/// Initializes bits [39:32] of the destination vector. +/// \param __b3 +/// Initializes bits [31:24] of the destination vector. +/// \param __b2 +/// Initializes bits [23:16] of the destination vector. +/// \param __b1 +/// Initializes bits [15:8] of the destination vector. +/// \param __b0 +/// Initializes bits [7:0] of the destination vector. +/// \returns An initialized 128-bit vector of [16 x i8] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) +{ + return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 }; +} + +/// Initializes both values in a 128-bit integer vector with the +/// specified 64-bit integer value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q +/// Integer value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit integer vector of [2 x i64] with both +/// elements containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set1_epi64x(long long __q) +{ + return _mm_set_epi64x(__q, __q); +} + +/// Initializes both values in a 128-bit vector of [2 x i64] with the +/// specified 64-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q +/// A 64-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [2 x i64] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set1_epi64(__m64 __q) +{ + return _mm_set_epi64(__q, __q); +} + +/// Initializes all values in a 128-bit vector of [4 x i32] with the +/// specified 32-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i +/// A 32-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [4 x i32] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set1_epi32(int __i) +{ + return _mm_set_epi32(__i, __i, __i, __i); +} + +/// Initializes all values in a 128-bit vector of [8 x i16] with the +/// specified 16-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w +/// A 16-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [8 x i16] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set1_epi16(short __w) +{ + return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Initializes all values in a 128-bit vector of [16 x i8] with the +/// specified 8-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b +/// An 8-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [16 x i8] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set1_epi8(char __b) +{ + return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __q0 +/// A 64-bit integral value used to initialize the lower 64 bits of the +/// result. +/// \param __q1 +/// A 64-bit integral value used to initialize the upper 64 bits of the +/// result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setr_epi64(__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64(__q1, __q0); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3) +{ + return _mm_set_epi32(__i3, __i2, __i1, __i0); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w0 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \param __w1 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w2 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w3 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w4 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w5 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w6 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w7 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7) +{ + return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b0 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \param __b1 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b2 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b3 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b4 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b5 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b6 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b7 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b8 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b9 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15) +{ + return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/// Creates a 128-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit integer vector with all elements set to +/// zero. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setzero_si128(void) +{ + return __extension__ (__m128i)(__v2di){ 0LL, 0LL }; +} + +/// Stores a 128-bit integer vector to a memory location aligned on a +/// 128-bit boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to an aligned memory location that will receive the integer +/// values. +/// \param __b +/// A 128-bit integer vector containing the values to be moved. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_si128(__m128i *__p, __m128i __b) +{ + *__p = __b; +} + +/// Stores a 128-bit integer vector to an unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __b +/// A 128-bit integer vector containing the values to be moved. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_si128(__m128i_u *__p, __m128i __b) +{ + struct __storeu_si128 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si128*)__p)->__v = __b; +} + +/// Stores a 64-bit integer value from the low element of a 128-bit integer +/// vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __b +/// A 128-bit integer vector containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_si64(void *__p, __m128i __b) +{ + struct __storeu_si64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0]; +} + +/// Stores a 32-bit integer value from the low element of a 128-bit integer +/// vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __b +/// A 128-bit integer vector containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_si32(void *__p, __m128i __b) +{ + struct __storeu_si32 { + int __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0]; +} + +/// Stores a 16-bit integer value from the low element of a 128-bit integer +/// vector. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __p +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __b +/// A 128-bit integer vector containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_si16(void *__p, __m128i __b) +{ + struct __storeu_si16 { + short __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0]; +} + +/// Moves bytes selected by the mask from the first operand to the +/// specified unaligned memory location. When a mask bit is 1, the +/// corresponding byte is written, otherwise it is not written. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). Exception and trap behavior for elements not selected +/// for storage to memory are implementation dependent. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVDQU / MASKMOVDQU +/// instruction. +/// +/// \param __d +/// A 128-bit integer vector containing the values to be moved. +/// \param __n +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each byte represents the mask bits. +/// \param __p +/// A pointer to an unaligned 128-bit memory location where the specified +/// values are moved. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p) +{ + __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p); +} + +/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to +/// a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location that will receive the lower 64 bits +/// of the integer vector parameter. +/// \param __a +/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the +/// value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storel_epi64(__m128i_u *__p, __m128i __a) +{ + struct __mm_storel_epi64_struct { + long long __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0]; +} + +/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit +/// aligned memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to the 128-bit aligned memory location used to store the value. +/// \param __a +/// A vector of [2 x double] containing the 64-bit values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_pd(double *__p, __m128d __a) +{ + __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p); +} + +/// Stores a 128-bit integer vector to a 128-bit aligned memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to the 128-bit aligned memory location used to store the value. +/// \param __a +/// A 128-bit integer vector containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_si128(__m128i *__p, __m128i __a) +{ + __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p); +} + +/// Stores a 32-bit integer value in the specified memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTI instruction. +/// +/// \param __p +/// A pointer to the 32-bit memory location used to store the value. +/// \param __a +/// A 32-bit integer containing the value to be stored. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2"))) +_mm_stream_si32(int *__p, int __a) +{ + __builtin_ia32_movnti(__p, __a); +} + +#ifdef __x86_64__ +/// Stores a 64-bit integer value in the specified memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTIQ instruction. +/// +/// \param __p +/// A pointer to the 64-bit memory location used to store the value. +/// \param __a +/// A 64-bit integer containing the value to be stored. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2"))) +_mm_stream_si64(long long *__p, long long __a) +{ + __builtin_ia32_movnti64(__p, __a); +} +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/// The cache line containing \a __p is flushed and invalidated from all +/// caches in the coherency domain. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLFLUSH instruction. +/// +/// \param __p +/// A pointer to the memory location used to identify the cache line to be +/// flushed. +void _mm_clflush(void const * __p); + +/// Forces strong memory ordering (serialization) between load +/// instructions preceding this instruction and load instructions following +/// this instruction, ensuring the system completes all previous loads before +/// executing subsequent loads. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LFENCE instruction. +/// +void _mm_lfence(void); + +/// Forces strong memory ordering (serialization) between load and store +/// instructions preceding this instruction and load and store instructions +/// following this instruction, ensuring that the system completes all +/// previous memory accesses before executing subsequent memory accesses. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MFENCE instruction. +/// +void _mm_mfence(void); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// Converts 16-bit signed integers from both 128-bit integer vector +/// operands into 8-bit signed integers, and packs the results into the +/// destination. Positive values greater than 0x7F are saturated to 0x7F. +/// Negative values less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKSSWB / PACKSSWB instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to a 8-bit signed integer with +/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less +/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to a 8-bit signed integer with +/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less +/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [16 x i8] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_packs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b); +} + +/// Converts 32-bit signed integers from both 128-bit integer vector +/// operands into 16-bit signed integers, and packs the results into the +/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF. +/// Negative values less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKSSDW / PACKSSDW instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as +/// a signed integer and is converted to a 16-bit signed integer with +/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values +/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// are written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as +/// a signed integer and is converted to a 16-bit signed integer with +/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values +/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// are written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [8 x i16] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_packs_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b); +} + +/// Converts 16-bit signed integers from both 128-bit integer vector +/// operands into 8-bit unsigned integers, and packs the results into the +/// destination. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0x00 are saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKUSWB / PACKUSWB instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [16 x i8] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_packus_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b); +} + +/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using +/// the immediate-value parameter as a selector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __imm +/// An immediate value. Bits [2:0] selects values from \a __a to be assigned +/// to bits[15:0] of the result. \n +/// 000: assign values from bits [15:0] of \a __a. \n +/// 001: assign values from bits [31:16] of \a __a. \n +/// 010: assign values from bits [47:32] of \a __a. \n +/// 011: assign values from bits [63:48] of \a __a. \n +/// 100: assign values from bits [79:64] of \a __a. \n +/// 101: assign values from bits [95:80] of \a __a. \n +/// 110: assign values from bits [111:96] of \a __a. \n +/// 111: assign values from bits [127:112] of \a __a. +/// \returns An integer, whose lower 16 bits are selected from the 128-bit +/// integer vector parameter and the remaining bits are assigned zeros. +#define _mm_extract_epi16(a, imm) \ + (int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \ + (int)(imm)) + +/// Constructs a 128-bit integer vector by first making a copy of the +/// 128-bit integer vector parameter, and then inserting the lower 16 bits +/// of an integer parameter into an offset specified by the immediate-value +/// parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPINSRW / PINSRW instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. This vector is copied to the +/// result and then one of the eight elements in the result is replaced by +/// the lower 16 bits of \a __b. +/// \param __b +/// An integer. The lower 16 bits of this parameter are written to the +/// result beginning at an offset specified by \a __imm. +/// \param __imm +/// An immediate value specifying the bit offset in the result at which the +/// lower 16 bits of \a __b are written. +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi16(a, b, imm) \ + (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \ + (int)(imm)) + +/// Copies the values of the most significant bits from each 8-bit +/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask +/// value, zero-extends the value, and writes it to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVMSKB / PMOVMSKB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values with bits to be extracted. +/// \returns The most significant bits from each 8-bit element in \a __a, +/// written to bits [15:0]. The other bits are assigned zeros. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_movemask_epi8(__m128i __a) +{ + return __builtin_ia32_pmovmskb128((__v16qi)__a); +} + +/// Constructs a 128-bit integer vector by shuffling four 32-bit +/// elements of a 128-bit integer vector parameter, using the immediate-value +/// parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shuffle_epi32(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFD / PSHUFD instruction. +/// +/// \param a +/// A 128-bit integer vector containing the values to be copied. +/// \param imm +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from a. The destinations within the 128-bit destination are assigned +/// values as follows: \n +/// Bits [1:0] are used to assign values to bits [31:0] of the result. \n +/// Bits [3:2] are used to assign values to bits [63:32] of the result. \n +/// Bits [5:4] are used to assign values to bits [95:64] of the result. \n +/// Bits [7:6] are used to assign values to bits [127:96] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [31:0] of \a a. \n +/// 01: assign values from bits [63:32] of \a a. \n +/// 10: assign values from bits [95:64] of \a a. \n +/// 11: assign values from bits [127:96] of \a a. +/// \returns A 128-bit integer vector containing the shuffled values. +#define _mm_shuffle_epi32(a, imm) \ + (__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)) + +/// Constructs a 128-bit integer vector by shuffling four lower 16-bit +/// elements of a 128-bit integer vector of [8 x i16], using the immediate +/// value parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFLW / PSHUFLW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits +/// [127:64] of the result. +/// \param imm +/// An 8-bit immediate value specifying which elements to copy from \a a. \n +/// Bits[1:0] are used to assign values to bits [15:0] of the result. \n +/// Bits[3:2] are used to assign values to bits [31:16] of the result. \n +/// Bits[5:4] are used to assign values to bits [47:32] of the result. \n +/// Bits[7:6] are used to assign values to bits [63:48] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [15:0] of \a a. \n +/// 01: assign values from bits [31:16] of \a a. \n +/// 10: assign values from bits [47:32] of \a a. \n +/// 11: assign values from bits [63:48] of \a a. \n +/// \returns A 128-bit integer vector containing the shuffled values. +#define _mm_shufflelo_epi16(a, imm) \ + (__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)) + +/// Constructs a 128-bit integer vector by shuffling four upper 16-bit +/// elements of a 128-bit integer vector of [8 x i16], using the immediate +/// value parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFHW / PSHUFHW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits +/// [63:0] of the result. +/// \param imm +/// An 8-bit immediate value specifying which elements to copy from \a a. \n +/// Bits[1:0] are used to assign values to bits [79:64] of the result. \n +/// Bits[3:2] are used to assign values to bits [95:80] of the result. \n +/// Bits[5:4] are used to assign values to bits [111:96] of the result. \n +/// Bits[7:6] are used to assign values to bits [127:112] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [79:64] of \a a. \n +/// 01: assign values from bits [95:80] of \a a. \n +/// 10: assign values from bits [111:96] of \a a. \n +/// 11: assign values from bits [127:112] of \a a. \n +/// \returns A 128-bit integer vector containing the shuffled values. +#define _mm_shufflehi_epi16(a, imm) \ + (__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)) + +/// Unpacks the high-order (index 8-15) values from two 128-bit vectors +/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHBW / PUNPCKHBW +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// Bits [71:64] are written to bits [7:0] of the result. \n +/// Bits [79:72] are written to bits [23:16] of the result. \n +/// Bits [87:80] are written to bits [39:32] of the result. \n +/// Bits [95:88] are written to bits [55:48] of the result. \n +/// Bits [103:96] are written to bits [71:64] of the result. \n +/// Bits [111:104] are written to bits [87:80] of the result. \n +/// Bits [119:112] are written to bits [103:96] of the result. \n +/// Bits [127:120] are written to bits [119:112] of the result. +/// \param __b +/// A 128-bit vector of [16 x i8]. \n +/// Bits [71:64] are written to bits [15:8] of the result. \n +/// Bits [79:72] are written to bits [31:24] of the result. \n +/// Bits [87:80] are written to bits [47:40] of the result. \n +/// Bits [95:88] are written to bits [63:56] of the result. \n +/// Bits [103:96] are written to bits [79:72] of the result. \n +/// Bits [111:104] are written to bits [95:88] of the result. \n +/// Bits [119:112] are written to bits [111:104] of the result. \n +/// Bits [127:120] are written to bits [127:120] of the result. +/// \returns A 128-bit vector of [16 x i8] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpackhi_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15); +} + +/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of +/// [8 x i16] and interleaves them into a 128-bit vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHWD / PUNPCKHWD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// Bits [79:64] are written to bits [15:0] of the result. \n +/// Bits [95:80] are written to bits [47:32] of the result. \n +/// Bits [111:96] are written to bits [79:64] of the result. \n +/// Bits [127:112] are written to bits [111:96] of the result. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// Bits [79:64] are written to bits [31:16] of the result. \n +/// Bits [95:80] are written to bits [63:48] of the result. \n +/// Bits [111:96] are written to bits [95:80] of the result. \n +/// Bits [127:112] are written to bits [127:112] of the result. +/// \returns A 128-bit vector of [8 x i16] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpackhi_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); +} + +/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of +/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHDQ / PUNPCKHDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. \n +/// Bits [95:64] are written to bits [31:0] of the destination. \n +/// Bits [127:96] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x i32]. \n +/// Bits [95:64] are written to bits [64:32] of the destination. \n +/// Bits [127:96] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x i32] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpackhi_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3); +} + +/// Unpacks the high-order 64-bit elements from two 128-bit vectors of +/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHQDQ / PUNPCKHQDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. \n +/// Bits [127:64] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x i64]. \n +/// Bits [127:64] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x i64] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpackhi_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1); +} + +/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of +/// [16 x i8] and interleaves them into a 128-bit vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLBW / PUNPCKLBW +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. \n +/// Bits [7:0] are written to bits [7:0] of the result. \n +/// Bits [15:8] are written to bits [23:16] of the result. \n +/// Bits [23:16] are written to bits [39:32] of the result. \n +/// Bits [31:24] are written to bits [55:48] of the result. \n +/// Bits [39:32] are written to bits [71:64] of the result. \n +/// Bits [47:40] are written to bits [87:80] of the result. \n +/// Bits [55:48] are written to bits [103:96] of the result. \n +/// Bits [63:56] are written to bits [119:112] of the result. +/// \param __b +/// A 128-bit vector of [16 x i8]. +/// Bits [7:0] are written to bits [15:8] of the result. \n +/// Bits [15:8] are written to bits [31:24] of the result. \n +/// Bits [23:16] are written to bits [47:40] of the result. \n +/// Bits [31:24] are written to bits [63:56] of the result. \n +/// Bits [39:32] are written to bits [79:72] of the result. \n +/// Bits [47:40] are written to bits [95:88] of the result. \n +/// Bits [55:48] are written to bits [111:104] of the result. \n +/// Bits [63:56] are written to bits [127:120] of the result. +/// \returns A 128-bit vector of [16 x i8] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpacklo_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7); +} + +/// Unpacks the low-order (index 0-3) values from each of the two 128-bit +/// vectors of [8 x i16] and interleaves them into a 128-bit vector of +/// [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLWD / PUNPCKLWD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// Bits [15:0] are written to bits [15:0] of the result. \n +/// Bits [31:16] are written to bits [47:32] of the result. \n +/// Bits [47:32] are written to bits [79:64] of the result. \n +/// Bits [63:48] are written to bits [111:96] of the result. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// Bits [15:0] are written to bits [31:16] of the result. \n +/// Bits [31:16] are written to bits [63:48] of the result. \n +/// Bits [47:32] are written to bits [95:80] of the result. \n +/// Bits [63:48] are written to bits [127:112] of the result. +/// \returns A 128-bit vector of [8 x i16] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpacklo_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); +} + +/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of +/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLDQ / PUNPCKLDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. \n +/// Bits [31:0] are written to bits [31:0] of the destination. \n +/// Bits [63:32] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x i32]. \n +/// Bits [31:0] are written to bits [64:32] of the destination. \n +/// Bits [63:32] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x i32] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpacklo_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1); +} + +/// Unpacks the low-order 64-bit elements from two 128-bit vectors of +/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ / PUNPCKLQDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. \n +/// Bits [63:0] are written to bits [63:0] of the destination. \n +/// \param __b +/// A 128-bit vector of [2 x i64]. \n +/// Bits [63:0] are written to bits [127:64] of the destination. \n +/// \returns A 128-bit vector of [2 x i64] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_unpacklo_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0); +} + +/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit +/// integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVDQ2Q instruction. +/// +/// \param __a +/// A 128-bit integer vector operand. The lower 64 bits are moved to the +/// destination. +/// \returns A 64-bit integer containing the lower 64 bits of the parameter. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_movepi64_pi64(__m128i __a) +{ + return (__m64)__a[0]; +} + +/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the +/// upper bits. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVD+VMOVQ instruction. +/// +/// \param __a +/// A 64-bit value. +/// \returns A 128-bit integer vector. The lower 64 bits contain the value from +/// the operand. The upper 64 bits are assigned zeros. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_movpi64_epi64(__m64 __a) +{ + return __extension__ (__m128i)(__v2di){ (long long)__a, 0 }; +} + +/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit +/// integer vector, zeroing the upper bits. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A 128-bit integer vector operand. The lower 64 bits are moved to the +/// destination. +/// \returns A 128-bit integer vector. The lower 64 bits contain the value from +/// the operand. The upper 64 bits are assigned zeros. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_move_epi64(__m128i __a) +{ + return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2); +} + +/// Unpacks the high-order 64-bit elements from two 128-bit vectors of +/// [2 x double] and interleaves them into a 128-bit vector of [2 x +/// double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the interleaved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_unpackhi_pd(__m128d __a, __m128d __b) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1); +} + +/// Unpacks the low-order 64-bit elements from two 128-bit vectors +/// of [2 x double] and interleaves them into a 128-bit vector of [2 x +/// double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the interleaved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_unpacklo_pd(__m128d __a, __m128d __b) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0); +} + +/// Extracts the sign bits of the double-precision values in the 128-bit +/// vector of [2 x double], zero-extends the value, and writes it to the +/// low-order bits of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD / MOVMSKPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the values with sign bits to +/// be extracted. +/// \returns The sign bits from each of the double-precision elements in \a __a, +/// written to bits [1:0]. The remaining bits are assigned values of zero. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_movemask_pd(__m128d __a) +{ + return __builtin_ia32_movmskpd((__v2df)__a); +} + + +/// Constructs a 128-bit floating-point vector of [2 x double] from two +/// 128-bit vector parameters of [2 x double], using the immediate-value +/// parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPD / SHUFPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param i +/// An 8-bit immediate value. The least significant two bits specify which +/// elements to copy from \a a and \a b: \n +/// Bit[0] = 0: lower element of \a a copied to lower element of result. \n +/// Bit[0] = 1: upper element of \a a copied to lower element of result. \n +/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n +/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n +/// \returns A 128-bit vector of [2 x double] containing the shuffled values. +#define _mm_shuffle_pd(a, b, i) \ + (__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (int)(i)) + +/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit +/// floating-point vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [2 x double]. +/// \returns A 128-bit floating-point vector of [4 x float] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_castpd_ps(__m128d __a) +{ + return (__m128)__a; +} + +/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [2 x double]. +/// \returns A 128-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_castpd_si128(__m128d __a) +{ + return (__m128i)__a; +} + +/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit +/// floating-point vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 128-bit floating-point vector of [2 x double] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_castps_pd(__m128 __a) +{ + return (__m128d)__a; +} + +/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 128-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_castps_si128(__m128 __a) +{ + return (__m128i)__a; +} + +/// Casts a 128-bit integer vector into a 128-bit floating-point vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit floating-point vector of [4 x float] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_castsi128_ps(__m128i __a) +{ + return (__m128)__a; +} + +/// Casts a 128-bit integer vector into a 128-bit floating-point vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit floating-point vector of [2 x double] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_castsi128_pd(__m128i __a) +{ + return (__m128d)__a; +} + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Indicates that a spin loop is being executed for the purposes of +/// optimizing power consumption during the loop. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAUSE instruction. +/// +void _mm_pause(void); + +#if defined(__cplusplus) +} // extern "C" +#endif +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_MMX + +#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y)) + +#define _MM_DENORMALS_ZERO_ON (0x0040U) +#define _MM_DENORMALS_ZERO_OFF (0x0000U) + +#define _MM_DENORMALS_ZERO_MASK (0x0040U) + +#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) +#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x))) + +#endif /* __EMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/enqcmdintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/enqcmdintrin.h new file mode 100644 index 0000000..30af67f --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/enqcmdintrin.h @@ -0,0 +1,63 @@ +/*===------------------ enqcmdintrin.h - enqcmd intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __ENQCMDINTRIN_H +#define __ENQCMDINTRIN_H + +/* Define the default attributes for the functions in this file */ +#define _DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("enqcmd"))) + +/// Reads 64-byte command pointed by \a __src, formats 64-byte enqueue store +/// data, and performs 64-byte enqueue store to memory pointed by \a __dst. +/// This intrinsics may only be used in User mode. +/// +/// \headerfile +/// +/// This intrinsics corresponds to the ENQCMD instruction. +/// +/// \param __dst +/// Pointer to the destination of the enqueue store. +/// \param __src +/// Pointer to 64-byte command data. +/// \returns If the command data is successfully written to \a __dst then 0 is +/// returned. Otherwise 1 is returned. +static __inline__ int _DEFAULT_FN_ATTRS +_enqcmd (void *__dst, const void *__src) +{ + return __builtin_ia32_enqcmd(__dst, __src); +} + +/// Reads 64-byte command pointed by \a __src, formats 64-byte enqueue store +/// data, and performs 64-byte enqueue store to memory pointed by \a __dst +/// This intrinsic may only be used in Privileged mode. +/// +/// \headerfile +/// +/// This intrinsics corresponds to the ENQCMDS instruction. +/// +/// \param __dst +/// Pointer to the destination of the enqueue store. +/// \param __src +/// Pointer to 64-byte command data. +/// \returns If the command data is successfully written to \a __dst then 0 is +/// returned. Otherwise 1 is returned. +static __inline__ int _DEFAULT_FN_ATTRS +_enqcmds (void *__dst, const void *__src) +{ + return __builtin_ia32_enqcmds(__dst, __src); +} + +#undef _DEFAULT_FN_ATTRS + +#endif /* __ENQCMDINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/f16cintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/f16cintrin.h new file mode 100644 index 0000000..109b604 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/f16cintrin.h @@ -0,0 +1,162 @@ +/*===---- f16cintrin.h - F16C intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __F16CINTRIN_H +#define __F16CINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(256))) + +/* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h, + * but that's because icc can emulate these without f16c using a library call. + * Since we don't do that let's leave these in f16cintrin.h. + */ + +/// Converts a 16-bit half-precision float value into a 32-bit float +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 16-bit half-precision float value. +/// \returns The converted 32-bit float value. +static __inline float __DEFAULT_FN_ATTRS128 +_cvtsh_ss(unsigned short __a) +{ + __v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0}; + __v4sf __r = __builtin_ia32_vcvtph2ps(__v); + return __r[0]; +} + +/// Converts a 32-bit single-precision float value to a 16-bit +/// half-precision float value. +/// +/// \headerfile +/// +/// \code +/// unsigned short _cvtss_sh(float a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 32-bit single-precision float value to be converted to a 16-bit +/// half-precision float value. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns The converted 16-bit half-precision float value. +#define _cvtss_sh(a, imm) \ + (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \ + (imm)))[0]) + +/// Converts a 128-bit vector containing 32-bit float values into a +/// 128-bit vector containing 16-bit half-precision float values. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cvtps_ph(__m128 a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 128-bit vector containing 32-bit float values. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns A 128-bit vector containing converted 16-bit half-precision float +/// values. The lower 64 bits are used to store the converted 16-bit +/// half-precision floating-point values. +#define _mm_cvtps_ph(a, imm) \ + (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)) + +/// Converts a 128-bit vector containing 16-bit half-precision float +/// values into a 128-bit vector containing 32-bit float values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 128-bit vector containing 16-bit half-precision float values. The lower +/// 64 bits are used in the conversion. +/// \returns A 128-bit vector of [4 x float] containing converted float values. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtph_ps(__m128i __a) +{ + return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a); +} + +/// Converts a 256-bit vector of [8 x float] into a 128-bit vector +/// containing 16-bit half-precision float values. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_cvtps_ph(__m256 a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 256-bit vector containing 32-bit single-precision float values to be +/// converted to 16-bit half-precision float values. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns A 128-bit vector containing the converted 16-bit half-precision +/// float values. +#define _mm256_cvtps_ph(a, imm) \ + (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)) + +/// Converts a 128-bit vector containing 16-bit half-precision float +/// values into a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 128-bit vector containing 16-bit half-precision float values to be +/// converted to 32-bit single-precision float values. +/// \returns A vector of [8 x float] containing the converted 32-bit +/// single-precision float values. +static __inline __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtph_ps(__m128i __a) +{ + return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __F16CINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/float.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/float.h new file mode 100644 index 0000000..ed610b2 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/float.h @@ -0,0 +1,152 @@ +/*===---- float.h - Characteristics of floating point types ----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_FLOAT_H +#define __CLANG_FLOAT_H + +/* If we're on MinGW, fall back to the system's float.h, which might have + * additional definitions provided for Windows. + * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx + * + * Also fall back on Darwin to allow additional definitions and + * implementation-defined values. + */ +#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \ + __STDC_HOSTED__ && __has_include_next() + +/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level + * of #include_next to keep Metrowerks compilers happy. Avoid this + * extra indirection. + */ +#ifdef __APPLE__ +#define _FLOAT_H_ +#endif + +# include_next + +/* Undefine anything that we'll be redefining below. */ +# undef FLT_EVAL_METHOD +# undef FLT_ROUNDS +# undef FLT_RADIX +# undef FLT_MANT_DIG +# undef DBL_MANT_DIG +# undef LDBL_MANT_DIG +# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L +# undef DECIMAL_DIG +# endif +# undef FLT_DIG +# undef DBL_DIG +# undef LDBL_DIG +# undef FLT_MIN_EXP +# undef DBL_MIN_EXP +# undef LDBL_MIN_EXP +# undef FLT_MIN_10_EXP +# undef DBL_MIN_10_EXP +# undef LDBL_MIN_10_EXP +# undef FLT_MAX_EXP +# undef DBL_MAX_EXP +# undef LDBL_MAX_EXP +# undef FLT_MAX_10_EXP +# undef DBL_MAX_10_EXP +# undef LDBL_MAX_10_EXP +# undef FLT_MAX +# undef DBL_MAX +# undef LDBL_MAX +# undef FLT_EPSILON +# undef DBL_EPSILON +# undef LDBL_EPSILON +# undef FLT_MIN +# undef DBL_MIN +# undef LDBL_MIN +# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L +# undef FLT_TRUE_MIN +# undef DBL_TRUE_MIN +# undef LDBL_TRUE_MIN +# undef FLT_DECIMAL_DIG +# undef DBL_DECIMAL_DIG +# undef LDBL_DECIMAL_DIG +# undef FLT_HAS_SUBNORM +# undef DBL_HAS_SUBNORM +# undef LDBL_HAS_SUBNORM +# endif +#endif + +/* Characteristics of floating point types, C99 5.2.4.2.2 */ + +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ +#define FLT_ROUNDS (__builtin_flt_rounds()) +#define FLT_RADIX __FLT_RADIX__ + +#define FLT_MANT_DIG __FLT_MANT_DIG__ +#define DBL_MANT_DIG __DBL_MANT_DIG__ +#define LDBL_MANT_DIG __LDBL_MANT_DIG__ + +#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L +# define DECIMAL_DIG __DECIMAL_DIG__ +#endif + +#define FLT_DIG __FLT_DIG__ +#define DBL_DIG __DBL_DIG__ +#define LDBL_DIG __LDBL_DIG__ + +#define FLT_MIN_EXP __FLT_MIN_EXP__ +#define DBL_MIN_EXP __DBL_MIN_EXP__ +#define LDBL_MIN_EXP __LDBL_MIN_EXP__ + +#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ +#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ +#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ + +#define FLT_MAX_EXP __FLT_MAX_EXP__ +#define DBL_MAX_EXP __DBL_MAX_EXP__ +#define LDBL_MAX_EXP __LDBL_MAX_EXP__ + +#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ +#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ +#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ + +#define FLT_MAX __FLT_MAX__ +#define DBL_MAX __DBL_MAX__ +#define LDBL_MAX __LDBL_MAX__ + +#define FLT_EPSILON __FLT_EPSILON__ +#define DBL_EPSILON __DBL_EPSILON__ +#define LDBL_EPSILON __LDBL_EPSILON__ + +#define FLT_MIN __FLT_MIN__ +#define DBL_MIN __DBL_MIN__ +#define LDBL_MIN __LDBL_MIN__ + +#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L +# define FLT_TRUE_MIN __FLT_DENORM_MIN__ +# define DBL_TRUE_MIN __DBL_DENORM_MIN__ +# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ +# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__ +# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__ +# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__ +# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__ +# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__ +# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ +#endif + +#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ +# define FLT16_MANT_DIG __FLT16_MANT_DIG__ +# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__ +# define FLT16_DIG __FLT16_DIG__ +# define FLT16_MIN_EXP __FLT16_MIN_EXP__ +# define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__ +# define FLT16_MAX_EXP __FLT16_MAX_EXP__ +# define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__ +# define FLT16_MAX __FLT16_MAX__ +# define FLT16_EPSILON __FLT16_EPSILON__ +# define FLT16_MIN __FLT16_MIN__ +# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__ +#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */ + +#endif /* __CLANG_FLOAT_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fma4intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fma4intrin.h new file mode 100644 index 0000000..694801b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fma4intrin.h @@ -0,0 +1,218 @@ +/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FMA4INTRIN_H +#define __FMA4INTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(256))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __FMA4INTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fmaintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fmaintrin.h new file mode 100644 index 0000000..d889b7c --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fmaintrin.h @@ -0,0 +1,216 @@ +/*===---- fmaintrin.h - FMA intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FMAINTRIN_H +#define __FMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __FMAINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fuzzer/FuzzedDataProvider.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fuzzer/FuzzedDataProvider.h new file mode 100644 index 0000000..71cb427 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fuzzer/FuzzedDataProvider.h @@ -0,0 +1,397 @@ +//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// A single header library providing an utility class to break up an array of +// bytes. Whenever run on the same input, provides the same output, as long as +// its methods are called in the same order, with the same arguments. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ +#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// In addition to the comments below, the API is also briefly documented at +// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider +class FuzzedDataProvider { + public: + // |data| is an array of length |size| that the FuzzedDataProvider wraps to + // provide more granular access. |data| must outlive the FuzzedDataProvider. + FuzzedDataProvider(const uint8_t *data, size_t size) + : data_ptr_(data), remaining_bytes_(size) {} + ~FuzzedDataProvider() = default; + + // See the implementation below (after the class definition) for more verbose + // comments for each of the methods. + + // Methods returning std::vector of bytes. These are the most popular choice + // when splitting fuzzing input into pieces, as every piece is put into a + // separate buffer (i.e. ASan would catch any under-/overflow) and the memory + // will be released automatically. + template std::vector ConsumeBytes(size_t num_bytes); + template + std::vector ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0); + template std::vector ConsumeRemainingBytes(); + + // Methods returning strings. Use only when you need a std::string or a null + // terminated C-string. Otherwise, prefer the methods returning std::vector. + std::string ConsumeBytesAsString(size_t num_bytes); + std::string ConsumeRandomLengthString(size_t max_length); + std::string ConsumeRandomLengthString(); + std::string ConsumeRemainingBytesAsString(); + + // Methods returning integer values. + template T ConsumeIntegral(); + template T ConsumeIntegralInRange(T min, T max); + + // Methods returning floating point values. + template T ConsumeFloatingPoint(); + template T ConsumeFloatingPointInRange(T min, T max); + + // 0 <= return value <= 1. + template T ConsumeProbability(); + + bool ConsumeBool(); + + // Returns a value chosen from the given enum. + template T ConsumeEnum(); + + // Returns a value from the given array. + template T PickValueInArray(const T (&array)[size]); + template + T PickValueInArray(const std::array &array); + template T PickValueInArray(std::initializer_list list); + + // Writes data to the given destination and returns number of bytes written. + size_t ConsumeData(void *destination, size_t num_bytes); + + // Reports the remaining bytes available for fuzzed input. + size_t remaining_bytes() { return remaining_bytes_; } + + private: + FuzzedDataProvider(const FuzzedDataProvider &) = delete; + FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; + + void CopyAndAdvance(void *destination, size_t num_bytes); + + void Advance(size_t num_bytes); + + template + std::vector ConsumeBytes(size_t size, size_t num_bytes); + + template TS ConvertUnsignedToSigned(TU value); + + const uint8_t *data_ptr_; + size_t remaining_bytes_; +}; + +// Returns a std::vector containing |num_bytes| of input data. If fewer than +// |num_bytes| of data remain, returns a shorter std::vector containing all +// of the data that's left. Can be used with any byte sized type, such as +// char, unsigned char, uint8_t, etc. +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + return ConsumeBytes(num_bytes, num_bytes); +} + +// Similar to |ConsumeBytes|, but also appends the terminator value at the end +// of the resulting vector. Useful, when a mutable null-terminated C-string is +// needed, for example. But that is a rare case. Better avoid it, if possible, +// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. +template +std::vector FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes, + T terminator) { + num_bytes = std::min(num_bytes, remaining_bytes_); + std::vector result = ConsumeBytes(num_bytes + 1, num_bytes); + result.back() = terminator; + return result; +} + +// Returns a std::vector containing all remaining bytes of the input data. +template +std::vector FuzzedDataProvider::ConsumeRemainingBytes() { + return ConsumeBytes(remaining_bytes_); +} + +// Returns a std::string containing |num_bytes| of input data. Using this and +// |.c_str()| on the resulting string is the best way to get an immutable +// null-terminated C string. If fewer than |num_bytes| of data remain, returns +// a shorter std::string containing all of the data that's left. +inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) { + static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), + "ConsumeBytesAsString cannot convert the data to a string."); + + num_bytes = std::min(num_bytes, remaining_bytes_); + std::string result( + reinterpret_cast(data_ptr_), num_bytes); + Advance(num_bytes); + return result; +} + +// Returns a std::string of length from 0 to |max_length|. When it runs out of +// input data, returns what remains of the input. Designed to be more stable +// with respect to a fuzzer inserting characters than just picking a random +// length and then consuming that many bytes with |ConsumeBytes|. +inline std::string +FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) { + // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" + // followed by anything else to the end of the string. As a result of this + // logic, a fuzzer can insert characters into the string, and the string + // will be lengthened to include those new characters, resulting in a more + // stable fuzzer than picking the length of a string independently from + // picking its contents. + std::string result; + + // Reserve the anticipated capaticity to prevent several reallocations. + result.reserve(std::min(max_length, remaining_bytes_)); + for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { + char next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next == '\\' && remaining_bytes_ != 0) { + next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next != '\\') + break; + } + result += next; + } + + result.shrink_to_fit(); + return result; +} + +// Returns a std::string of length from 0 to |remaining_bytes_|. +inline std::string FuzzedDataProvider::ConsumeRandomLengthString() { + return ConsumeRandomLengthString(remaining_bytes_); +} + +// Returns a std::string containing all remaining bytes of the input data. +// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string +// object. +inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() { + return ConsumeBytesAsString(remaining_bytes_); +} + +// Returns a number in the range [Type's min, Type's max]. The value might +// not be uniformly distributed in the given range. If there's no input data +// left, always returns |min|. +template T FuzzedDataProvider::ConsumeIntegral() { + return ConsumeIntegralInRange(std::numeric_limits::min(), + std::numeric_limits::max()); +} + +// Returns a number in the range [min, max] by consuming bytes from the +// input data. The value might not be uniformly distributed in the given +// range. If there's no input data left, always returns |min|. |min| must +// be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) { + static_assert(std::is_integral::value, "An integral type is required."); + static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + + if (min > max) + abort(); + + // Use the biggest type possible to hold the range and the result. + uint64_t range = static_cast(max) - min; + uint64_t result = 0; + size_t offset = 0; + + while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && + remaining_bytes_ != 0) { + // Pull bytes off the end of the seed data. Experimentally, this seems to + // allow the fuzzer to more easily explore the input space. This makes + // sense, since it works by modifying inputs that caused new code to run, + // and this data is often used to encode length of data read by + // |ConsumeBytes|. Separating out read lengths makes it easier modify the + // contents of the data that is actually read. + --remaining_bytes_; + result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; + offset += CHAR_BIT; + } + + // Avoid division by 0, in case |range + 1| results in overflow. + if (range != std::numeric_limits::max()) + result = result % (range + 1); + + return static_cast(min + result); +} + +// Returns a floating point value in the range [Type's lowest, Type's max] by +// consuming bytes from the input data. If there's no input data left, always +// returns approximately 0. +template T FuzzedDataProvider::ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange(std::numeric_limits::lowest(), + std::numeric_limits::max()); +} + +// Returns a floating point value in the given range by consuming bytes from +// the input data. If there's no input data left, returns |min|. Note that +// |min| must be less than or equal to |max|. +template +T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; + } + } else { + range = max - min; + } + + return result + range * ConsumeProbability(); +} + +// Returns a floating point number in the range [0.0, 1.0]. If there's no +// input data left, always returns 0. +template T FuzzedDataProvider::ConsumeProbability() { + static_assert(std::is_floating_point::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast(ConsumeIntegral()); + result /= static_cast(std::numeric_limits::max()); + return result; +} + +// Reads one byte and returns a bool, or false when no data remains. +inline bool FuzzedDataProvider::ConsumeBool() { + return 1 & ConsumeIntegral(); +} + +// Returns an enum value. The enum must start at 0 and be contiguous. It must +// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: +// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; +template T FuzzedDataProvider::ConsumeEnum() { + static_assert(std::is_enum::value, "|T| must be an enum type."); + return static_cast( + ConsumeIntegralInRange(0, static_cast(T::kMaxValue))); +} + +// Returns a copy of the value selected from the given fixed-size |array|. +template +T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(const std::array &array) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; +} + +template +T FuzzedDataProvider::PickValueInArray(std::initializer_list list) { + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + + return *(list.begin() + ConsumeIntegralInRange(0, list.size() - 1)); +} + +// Writes |num_bytes| of input data to the given destination pointer. If there +// is not enough data left, writes all remaining bytes. Return value is the +// number of bytes written. +// In general, it's better to avoid using this function, but it may be useful +// in cases when it's necessary to fill a certain buffer or object with +// fuzzing data. +inline size_t FuzzedDataProvider::ConsumeData(void *destination, + size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + CopyAndAdvance(destination, num_bytes); + return num_bytes; +} + +// Private methods. +inline void FuzzedDataProvider::CopyAndAdvance(void *destination, + size_t num_bytes) { + std::memcpy(destination, data_ptr_, num_bytes); + Advance(num_bytes); +} + +inline void FuzzedDataProvider::Advance(size_t num_bytes) { + if (num_bytes > remaining_bytes_) + abort(); + + data_ptr_ += num_bytes; + remaining_bytes_ -= num_bytes; +} + +template +std::vector FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) { + static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); + + // The point of using the size-based constructor below is to increase the + // odds of having a vector object with capacity being equal to the length. + // That part is always implementation specific, but at least both libc++ and + // libstdc++ allocate the requested number of bytes in that constructor, + // which seems to be a natural choice for other implementations as well. + // To increase the odds even more, we also call |shrink_to_fit| below. + std::vector result(size); + if (size == 0) { + if (num_bytes != 0) + abort(); + return result; + } + + CopyAndAdvance(result.data(), num_bytes); + + // Even though |shrink_to_fit| is also implementation specific, we expect it + // to provide an additional assurance in case vector's constructor allocated + // a buffer which is larger than the actual amount of data we put inside it. + result.shrink_to_fit(); + return result; +} + +template +TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) { + static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); + static_assert(!std::numeric_limits::is_signed, + "Source type must be unsigned."); + + // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. + if (std::numeric_limits::is_modulo) + return static_cast(value); + + // Avoid using implementation-defined unsigned to signed conversions. + // To learn more, see https://stackoverflow.com/questions/13150449. + if (value <= std::numeric_limits::max()) { + return static_cast(value); + } else { + constexpr auto TS_min = std::numeric_limits::min(); + return TS_min + static_cast(value - TS_min); + } +} + +#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fxsrintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fxsrintrin.h new file mode 100644 index 0000000..afee6aa --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/fxsrintrin.h @@ -0,0 +1,91 @@ +/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FXSRINTRIN_H +#define __FXSRINTRIN_H + +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr"))) + +/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte +/// memory region pointed to by the input parameter \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXSAVE instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxsave(void *__p) +{ + __builtin_ia32_fxsave(__p); +} + +/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte +/// memory region pointed to by the input parameter \a __p. The contents of +/// this memory region should have been written to by a previous \c _fxsave +/// or \c _fxsave64 intrinsic. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXRSTOR instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxrstor(void *__p) +{ + __builtin_ia32_fxrstor(__p); +} + +#ifdef __x86_64__ +/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte +/// memory region pointed to by the input parameter \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXSAVE64 instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxsave64(void *__p) +{ + __builtin_ia32_fxsave64(__p); +} + +/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte +/// memory region pointed to by the input parameter \a __p. The contents of +/// this memory region should have been written to by a previous \c _fxsave +/// or \c _fxsave64 intrinsic. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXRSTOR64 instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxrstor64(void *__p) +{ + __builtin_ia32_fxrstor64(__p); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/gfniintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/gfniintrin.h new file mode 100644 index 0000000..11a321b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/gfniintrin.h @@ -0,0 +1,193 @@ +/*===----------------- gfniintrin.h - GFNI intrinsics ----------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __GFNIINTRIN_H +#define __GFNIINTRIN_H + +/* Default attributes for simple form (no masking). */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni"), __min_vector_width__(128))) + +/* Default attributes for YMM unmasked form. */ +#define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256))) + +/* Default attributes for ZMM forms. */ +#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512))) + +/* Default attributes for VLX forms. */ +#define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256))) + +#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \ + (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), \ + (char)(I)) + +#define _mm_gf2p8affine_epi64_epi8(A, B, I) \ + (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), \ + (char)(I)) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_gf2p8mul_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A, + (__v16qi) __B); +} + +#ifdef __AVXINTRIN_H +#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \ + (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), \ + (char)(I)) + +#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \ + (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), \ + (char)(I)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS_Y +_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A, + (__v32qi) __B); +} +#endif /* __AVXINTRIN_H */ + +#ifdef __AVX512BWINTRIN_H +#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \ + (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), \ + (char)(I)) + +#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \ + (__v64qi)(__m512i)(S)) + +#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \ + (__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \ + U, A, B, I) + +#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \ + (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), \ + (char)(I)) + +#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \ + (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I), \ + (__v64qi)(__m512i)(S)) + +#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \ + (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \ + U, A, B, I) + +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z +_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A, + (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z +_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_selectb_512(__U, + (__v64qi) _mm512_gf2p8mul_epi8(__A, __B), + (__v64qi) __S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z +_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(), + __U, __A, __B); +} +#endif /* __AVX512BWINTRIN_H */ + +#ifdef __AVX512VLBWINTRIN_H +#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \ + (__v16qi)(__m128i)(S)) + +#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \ + (__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \ + U, A, B, I) + +#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \ + (__v32qi)(__m256i)(S)) + +#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \ + (__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \ + U, A, B, I) + +#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \ + (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \ + (__v16qi)(__m128i)(S)) + +#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \ + (__m128i)_mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), \ + U, A, B, I) + +#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \ + (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \ + (__v32qi)(__m256i)(S)) + +#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \ + (__m256i)_mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \ + U, A, B, I) + +static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128 +_mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_selectb_128(__U, + (__v16qi) _mm_gf2p8mul_epi8(__A, __B), + (__v16qi) __S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128 +_mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(), + __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256 +_mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_selectb_256(__U, + (__v32qi) _mm256_gf2p8mul_epi8(__A, __B), + (__v32qi) __S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256 +_mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(), + __U, __A, __B); +} +#endif /* __AVX512VLBWINTRIN_H */ + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_Y +#undef __DEFAULT_FN_ATTRS_Z +#undef __DEFAULT_FN_ATTRS_VL128 +#undef __DEFAULT_FN_ATTRS_VL256 + +#endif /* __GFNIINTRIN_H */ + diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_circ_brev_intrinsics.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_circ_brev_intrinsics.h new file mode 100644 index 0000000..c53786d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_circ_brev_intrinsics.h @@ -0,0 +1,298 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_ +#define _HEXAGON_CIRC_BREV_INTRINSICS_H_ 1 + +#include +#include + +/* Circular Load */ +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_load_update_D(Word64 dst, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_load_update_D(dest,ptr,incr,bufsize,K) \ + { ptr = (int64_t *) HEXAGON_circ_ldd (ptr, &(dest), ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_load_update_W(Word32 dst, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_load_update_W(dest,ptr,incr,bufsize,K) \ + { ptr = (int *) HEXAGON_circ_ldw (ptr, &(dest), (((K)<<24)|((bufsize)<<2)), ((incr)*4)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_load_update_H(Word16 dst, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_load_update_H(dest,ptr,incr,bufsize,K) \ + { ptr = (int16_t *) HEXAGON_circ_ldh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_load_update_UH( UWord16 dst, UWord16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_load_update_UH(dest,ptr,incr,bufsize,K) \ + { ptr = (uint16_t *) HEXAGON_circ_lduh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_load_update_B(Word8 dst, Word8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_load_update_B(dest,ptr,incr,bufsize,K) \ + { ptr = (int8_t *) HEXAGON_circ_ldb (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_load_update_UB(dest,ptr,incr,bufsize,K) \ + { ptr = (uint8_t *) HEXAGON_circ_ldub (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); } + +/* Circular Store */ +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_store_update_D(Word64 *src, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_store_update_D(src,ptr,incr,bufsize,K) \ + { ptr = (int64_t *) HEXAGON_circ_std (ptr, src, ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_store_update_W(Word32 *src, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_store_update_W(src,ptr,incr,bufsize,K) \ + { ptr = (int *) HEXAGON_circ_stw (ptr, src, (((K)<<24)|((bufsize)<<2)), ((incr)*4)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_store_update_HL(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_store_update_HL(src,ptr,incr,bufsize,K) \ + { ptr = (int16_t *) HEXAGON_circ_sth (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_store_update_HH(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_store_update_HH(src,ptr,incr,bufsize,K) \ + { ptr = (int16_t *) HEXAGON_circ_sthhi (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_circ_store_update_B(Word8 *src, Word8 *ptr, UWord32 I4, UWord32 bufsize, UWord64 K) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_circ_store_update_B(src,ptr,incr,bufsize,K) \ + { ptr = (int8_t *) HEXAGON_circ_stb (ptr, src, ((((K)-2)<<24)|(bufsize)), incr); } + + +/* Bit Reverse Load */ +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_load_update_D(Word64 dst, Word64 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_load_update_D(dest,ptr,log2bufsize) \ + { ptr = (int64_t *) HEXAGON_brev_ldd (ptr, &(dest), (1<<(16-((log2bufsize) + 3)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_load_update_W(Word32 dst, Word32 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_load_update_W(dest,ptr,log2bufsize) \ + { ptr = (int *) HEXAGON_brev_ldw (ptr, &(dest), (1<<(16-((log2bufsize) + 2)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_load_update_H(Word16 dst, Word16 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_load_update_H(dest,ptr,log2bufsize) \ + { ptr = (int16_t *) HEXAGON_brev_ldh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_load_update_UH(UWord16 dst, UWord16 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_load_update_UH(dest,ptr,log2bufsize) \ + { ptr = (uint16_t *) HEXAGON_brev_lduh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_load_update_B(Word8 dst, Word8 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_load_update_B(dest,ptr,log2bufsize) \ + { ptr = (int8_t *) HEXAGON_brev_ldb (ptr, &(dest), (1<<(16-((log2bufsize))))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_load_update_UB(dest,ptr,log2bufsize) \ + { ptr = (uint8_t *) HEXAGON_brev_ldub (ptr, &(dest), (1<<(16-((log2bufsize))))); } + +/* Bit Reverse Store */ + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_store_update_D(Word64 *src, Word64 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_store_update_D(src,ptr,log2bufsize) \ + { ptr = (int64_t *) HEXAGON_brev_std (ptr, src, (1<<(16-((log2bufsize) + 3)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_store_update_W(Word32 *src, Word32 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_store_update_W(src,ptr,log2bufsize) \ + { ptr = (int *) HEXAGON_brev_stw (ptr, src, (1<<(16-((log2bufsize) + 2)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_store_update_HL(Word16 *src, Word16 *ptr, Word32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_store_update_HL(src,ptr,log2bufsize) \ + { ptr = (int16_t *) HEXAGON_brev_sth (ptr, src, (1<<(16-((log2bufsize) + 1)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_store_update_HH(Word16 *src, Word16 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_store_update_HH(src,ptr,log2bufsize) \ + { ptr = (int16_t *) HEXAGON_brev_sthhi (ptr, src, (1<<(16-((log2bufsize) + 1)))); } + +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: void Q6_bitrev_store_update_B(Word8 *src, Word8 *ptr, UWord32 Iu4) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#define Q6_bitrev_store_update_B(src,ptr,log2bufsize) \ + { ptr = (int8_t *) HEXAGON_brev_stb (ptr, src, (1<<(16-((log2bufsize))))); } + + +#define HEXAGON_circ_ldd __builtin_circ_ldd +#define HEXAGON_circ_ldw __builtin_circ_ldw +#define HEXAGON_circ_ldh __builtin_circ_ldh +#define HEXAGON_circ_lduh __builtin_circ_lduh +#define HEXAGON_circ_ldb __builtin_circ_ldb +#define HEXAGON_circ_ldub __builtin_circ_ldub + + +#define HEXAGON_circ_std __builtin_circ_std +#define HEXAGON_circ_stw __builtin_circ_stw +#define HEXAGON_circ_sth __builtin_circ_sth +#define HEXAGON_circ_sthhi __builtin_circ_sthhi +#define HEXAGON_circ_stb __builtin_circ_stb + + +#define HEXAGON_brev_ldd __builtin_brev_ldd +#define HEXAGON_brev_ldw __builtin_brev_ldw +#define HEXAGON_brev_ldh __builtin_brev_ldh +#define HEXAGON_brev_lduh __builtin_brev_lduh +#define HEXAGON_brev_ldb __builtin_brev_ldb +#define HEXAGON_brev_ldub __builtin_brev_ldub + +#define HEXAGON_brev_std __builtin_brev_std +#define HEXAGON_brev_stw __builtin_brev_stw +#define HEXAGON_brev_sth __builtin_brev_sth +#define HEXAGON_brev_sthhi __builtin_brev_sthhi +#define HEXAGON_brev_stb __builtin_brev_stb + +#ifdef __HVX__ +/* ========================================================================== + Assembly Syntax: if (Qt) vmem(Rt+#0) = Vs + C Intrinsic Prototype: void Q6_vmaskedstoreq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs) + Instruction Type: COPROC_VMEM + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmaskedstoreq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstoreq) + +/* ========================================================================== + Assembly Syntax: if (!Qt) vmem(Rt+#0) = Vs + C Intrinsic Prototype: void Q6_vmaskedstorenq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs) + Instruction Type: COPROC_VMEM + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmaskedstorenq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorenq) + +/* ========================================================================== + Assembly Syntax: if (Qt) vmem(Rt+#0):nt = Vs + C Intrinsic Prototype: void Q6_vmaskedstorentq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs) + Instruction Type: COPROC_VMEM + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmaskedstorentq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentq) + +/* ========================================================================== + Assembly Syntax: if (!Qt) vmem(Rt+#0):nt = Vs + C Intrinsic Prototype: void Q6_vmaskedstorentnq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs) + Instruction Type: COPROC_VMEM + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmaskedstorentnq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentnq) + +#endif + + +#endif /* #ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_ */ + +#ifdef __NOT_DEFINED__ +/*** comment block template ***/ +/* ========================================================================== + Assembly Syntax: Return=instruction() + C Intrinsic Prototype: ReturnType Intrinsic(ParamType Rs, ParamType Rt) + Instruction Type: InstructionType + Execution Slots: SLOT0123 + ========================================================================== */ +#endif /*** __NOT_DEFINED__ ***/ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_protos.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_protos.h new file mode 100644 index 0000000..cdffd93 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_protos.h @@ -0,0 +1,8450 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Automatically generated file, do not edit! +//===----------------------------------------------------------------------===// + + + +#ifndef __HEXAGON_PROTOS_H_ +#define __HEXAGON_PROTOS_H_ 1 + +/* ========================================================================== + Assembly Syntax: Rd32=abs(Rs32) + C Intrinsic Prototype: Word32 Q6_R_abs_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_abs_R __builtin_HEXAGON_A2_abs + +/* ========================================================================== + Assembly Syntax: Rdd32=abs(Rss32) + C Intrinsic Prototype: Word64 Q6_P_abs_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_abs_P __builtin_HEXAGON_A2_absp + +/* ========================================================================== + Assembly Syntax: Rd32=abs(Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_abs_R_sat(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_abs_R_sat __builtin_HEXAGON_A2_abssat + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_add_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_add_RR __builtin_HEXAGON_A2_add + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRh_s16 __builtin_HEXAGON_A2_addh_h16_hh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRl_s16 __builtin_HEXAGON_A2_addh_h16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh_s16 __builtin_HEXAGON_A2_addh_h16_lh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl_s16 __builtin_HEXAGON_A2_addh_h16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_lh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_add_RlRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh __builtin_HEXAGON_A2_addh_l16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_add_RlRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl __builtin_HEXAGON_A2_addh_l16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat + C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh_sat __builtin_HEXAGON_A2_addh_l16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat + C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl_sat __builtin_HEXAGON_A2_addh_l16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,#s16) + C Intrinsic Prototype: Word32 Q6_R_add_RI(Word32 Rs, Word32 Is16) + Instruction Type: ALU32_ADDI + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_add_RI __builtin_HEXAGON_A2_addi + +/* ========================================================================== + Assembly Syntax: Rdd32=add(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_add_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_add_PP __builtin_HEXAGON_A2_addp + +/* ========================================================================== + Assembly Syntax: Rdd32=add(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_add_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_add_PP_sat __builtin_HEXAGON_A2_addpsat + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_add_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_add_RR_sat __builtin_HEXAGON_A2_addsat + +/* ========================================================================== + Assembly Syntax: Rdd32=add(Rs32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_add_RP(Word32 Rs, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_add_RP __builtin_HEXAGON_A2_addsp + +/* ========================================================================== + Assembly Syntax: Rd32=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_and_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_and_RR __builtin_HEXAGON_A2_and + +/* ========================================================================== + Assembly Syntax: Rd32=and(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_and_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_and_RI __builtin_HEXAGON_A2_andir + +/* ========================================================================== + Assembly Syntax: Rdd32=and(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_and_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_and_PP __builtin_HEXAGON_A2_andp + +/* ========================================================================== + Assembly Syntax: Rd32=aslh(Rs32) + C Intrinsic Prototype: Word32 Q6_R_aslh_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_aslh_R __builtin_HEXAGON_A2_aslh + +/* ========================================================================== + Assembly Syntax: Rd32=asrh(Rs32) + C Intrinsic Prototype: Word32 Q6_R_asrh_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_asrh_R __builtin_HEXAGON_A2_asrh + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.h,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_combine_RhRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RhRh __builtin_HEXAGON_A2_combine_hh + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.h,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_combine_RhRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RhRl __builtin_HEXAGON_A2_combine_hl + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.l,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_combine_RlRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RlRh __builtin_HEXAGON_A2_combine_lh + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.l,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_combine_RlRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RlRl __builtin_HEXAGON_A2_combine_ll + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(#s8,#S8) + C Intrinsic Prototype: Word64 Q6_P_combine_II(Word32 Is8, Word32 IS8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_II __builtin_HEXAGON_A2_combineii + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_combine_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_RR __builtin_HEXAGON_A2_combinew + +/* ========================================================================== + Assembly Syntax: Rd32=max(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_max_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_max_RR __builtin_HEXAGON_A2_max + +/* ========================================================================== + Assembly Syntax: Rdd32=max(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_max_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_max_PP __builtin_HEXAGON_A2_maxp + +/* ========================================================================== + Assembly Syntax: Rd32=maxu(Rs32,Rt32) + C Intrinsic Prototype: UWord32 Q6_R_maxu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_maxu_RR __builtin_HEXAGON_A2_maxu + +/* ========================================================================== + Assembly Syntax: Rdd32=maxu(Rss32,Rtt32) + C Intrinsic Prototype: UWord64 Q6_P_maxu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_maxu_PP __builtin_HEXAGON_A2_maxup + +/* ========================================================================== + Assembly Syntax: Rd32=min(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_min_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_min_RR __builtin_HEXAGON_A2_min + +/* ========================================================================== + Assembly Syntax: Rdd32=min(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_min_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_min_PP __builtin_HEXAGON_A2_minp + +/* ========================================================================== + Assembly Syntax: Rd32=minu(Rt32,Rs32) + C Intrinsic Prototype: UWord32 Q6_R_minu_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_minu_RR __builtin_HEXAGON_A2_minu + +/* ========================================================================== + Assembly Syntax: Rdd32=minu(Rtt32,Rss32) + C Intrinsic Prototype: UWord64 Q6_P_minu_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_minu_PP __builtin_HEXAGON_A2_minup + +/* ========================================================================== + Assembly Syntax: Rd32=neg(Rs32) + C Intrinsic Prototype: Word32 Q6_R_neg_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_neg_R __builtin_HEXAGON_A2_neg + +/* ========================================================================== + Assembly Syntax: Rdd32=neg(Rss32) + C Intrinsic Prototype: Word64 Q6_P_neg_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_neg_P __builtin_HEXAGON_A2_negp + +/* ========================================================================== + Assembly Syntax: Rd32=neg(Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_neg_R_sat(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_neg_R_sat __builtin_HEXAGON_A2_negsat + +/* ========================================================================== + Assembly Syntax: Rd32=not(Rs32) + C Intrinsic Prototype: Word32 Q6_R_not_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_not_R __builtin_HEXAGON_A2_not + +/* ========================================================================== + Assembly Syntax: Rdd32=not(Rss32) + C Intrinsic Prototype: Word64 Q6_P_not_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_not_P __builtin_HEXAGON_A2_notp + +/* ========================================================================== + Assembly Syntax: Rd32=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_or_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_or_RR __builtin_HEXAGON_A2_or + +/* ========================================================================== + Assembly Syntax: Rd32=or(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_or_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_or_RI __builtin_HEXAGON_A2_orir + +/* ========================================================================== + Assembly Syntax: Rdd32=or(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_or_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_or_PP __builtin_HEXAGON_A2_orp + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rss32):sat + C Intrinsic Prototype: Word32 Q6_R_round_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_P_sat __builtin_HEXAGON_A2_roundsat + +/* ========================================================================== + Assembly Syntax: Rd32=sat(Rss32) + C Intrinsic Prototype: Word32 Q6_R_sat_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sat_P __builtin_HEXAGON_A2_sat + +/* ========================================================================== + Assembly Syntax: Rd32=satb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_satb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_satb_R __builtin_HEXAGON_A2_satb + +/* ========================================================================== + Assembly Syntax: Rd32=sath(Rs32) + C Intrinsic Prototype: Word32 Q6_R_sath_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sath_R __builtin_HEXAGON_A2_sath + +/* ========================================================================== + Assembly Syntax: Rd32=satub(Rs32) + C Intrinsic Prototype: Word32 Q6_R_satub_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_satub_R __builtin_HEXAGON_A2_satub + +/* ========================================================================== + Assembly Syntax: Rd32=satuh(Rs32) + C Intrinsic Prototype: Word32 Q6_R_satuh_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_satuh_R __builtin_HEXAGON_A2_satuh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_sub_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sub_RR __builtin_HEXAGON_A2_sub + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRh_s16 __builtin_HEXAGON_A2_subh_h16_hh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRl_s16 __builtin_HEXAGON_A2_subh_h16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh_s16 __builtin_HEXAGON_A2_subh_h16_lh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl_s16 __builtin_HEXAGON_A2_subh_h16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_lh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh __builtin_HEXAGON_A2_subh_l16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl __builtin_HEXAGON_A2_subh_l16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh_sat __builtin_HEXAGON_A2_subh_l16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl_sat __builtin_HEXAGON_A2_subh_l16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rdd32=sub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_sub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_sub_PP __builtin_HEXAGON_A2_subp + +/* ========================================================================== + Assembly Syntax: Rd32=sub(#s10,Rs32) + C Intrinsic Prototype: Word32 Q6_R_sub_IR(Word32 Is10, Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sub_IR __builtin_HEXAGON_A2_subri + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32,Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_sub_RR_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sub_RR_sat __builtin_HEXAGON_A2_subsat + +/* ========================================================================== + Assembly Syntax: Rd32=vaddh(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_vaddh_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vaddh_RR __builtin_HEXAGON_A2_svaddh + +/* ========================================================================== + Assembly Syntax: Rd32=vaddh(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_vaddh_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vaddh_RR_sat __builtin_HEXAGON_A2_svaddhs + +/* ========================================================================== + Assembly Syntax: Rd32=vadduh(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_vadduh_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vadduh_RR_sat __builtin_HEXAGON_A2_svadduhs + +/* ========================================================================== + Assembly Syntax: Rd32=vavgh(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_vavgh_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vavgh_RR __builtin_HEXAGON_A2_svavgh + +/* ========================================================================== + Assembly Syntax: Rd32=vavgh(Rs32,Rt32):rnd + C Intrinsic Prototype: Word32 Q6_R_vavgh_RR_rnd(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vavgh_RR_rnd __builtin_HEXAGON_A2_svavghs + +/* ========================================================================== + Assembly Syntax: Rd32=vnavgh(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vnavgh_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vnavgh_RR __builtin_HEXAGON_A2_svnavgh + +/* ========================================================================== + Assembly Syntax: Rd32=vsubh(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsubh_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vsubh_RR __builtin_HEXAGON_A2_svsubh + +/* ========================================================================== + Assembly Syntax: Rd32=vsubh(Rt32,Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_vsubh_RR_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vsubh_RR_sat __builtin_HEXAGON_A2_svsubhs + +/* ========================================================================== + Assembly Syntax: Rd32=vsubuh(Rt32,Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_vsubuh_RR_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vsubuh_RR_sat __builtin_HEXAGON_A2_svsubuhs + +/* ========================================================================== + Assembly Syntax: Rd32=swiz(Rs32) + C Intrinsic Prototype: Word32 Q6_R_swiz_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_swiz_R __builtin_HEXAGON_A2_swiz + +/* ========================================================================== + Assembly Syntax: Rd32=sxtb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_sxtb_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sxtb_R __builtin_HEXAGON_A2_sxtb + +/* ========================================================================== + Assembly Syntax: Rd32=sxth(Rs32) + C Intrinsic Prototype: Word32 Q6_R_sxth_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sxth_R __builtin_HEXAGON_A2_sxth + +/* ========================================================================== + Assembly Syntax: Rdd32=sxtw(Rs32) + C Intrinsic Prototype: Word64 Q6_P_sxtw_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_sxtw_R __builtin_HEXAGON_A2_sxtw + +/* ========================================================================== + Assembly Syntax: Rd32=Rs32 + C Intrinsic Prototype: Word32 Q6_R_equals_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_equals_R __builtin_HEXAGON_A2_tfr + +/* ========================================================================== + Assembly Syntax: Rx32.h=#u16 + C Intrinsic Prototype: Word32 Q6_Rh_equals_I(Word32 Rx, Word32 Iu16) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Rh_equals_I __builtin_HEXAGON_A2_tfrih + +/* ========================================================================== + Assembly Syntax: Rx32.l=#u16 + C Intrinsic Prototype: Word32 Q6_Rl_equals_I(Word32 Rx, Word32 Iu16) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Rl_equals_I __builtin_HEXAGON_A2_tfril + +/* ========================================================================== + Assembly Syntax: Rdd32=Rss32 + C Intrinsic Prototype: Word64 Q6_P_equals_P(Word64 Rss) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_equals_P __builtin_HEXAGON_A2_tfrp + +/* ========================================================================== + Assembly Syntax: Rdd32=#s8 + C Intrinsic Prototype: Word64 Q6_P_equals_I(Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_equals_I __builtin_HEXAGON_A2_tfrpi + +/* ========================================================================== + Assembly Syntax: Rd32=#s16 + C Intrinsic Prototype: Word32 Q6_R_equals_I(Word32 Is16) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_equals_I __builtin_HEXAGON_A2_tfrsi + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsh(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsh_P __builtin_HEXAGON_A2_vabsh + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsh(Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vabsh_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsh_P_sat __builtin_HEXAGON_A2_vabshsat + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsw(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsw_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsw_P __builtin_HEXAGON_A2_vabsw + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsw(Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vabsw_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsw_P_sat __builtin_HEXAGON_A2_vabswsat + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vaddb_PP __builtin_HEXAGON_A2_vaddb_map + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddh_PP __builtin_HEXAGON_A2_vaddh + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vaddh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddh_PP_sat __builtin_HEXAGON_A2_vaddhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddub_PP __builtin_HEXAGON_A2_vaddub + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vaddub_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddub_PP_sat __builtin_HEXAGON_A2_vaddubs + +/* ========================================================================== + Assembly Syntax: Rdd32=vadduh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vadduh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vadduh_PP_sat __builtin_HEXAGON_A2_vadduhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddw_PP __builtin_HEXAGON_A2_vaddw + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vaddw_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddw_PP_sat __builtin_HEXAGON_A2_vaddws + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavgh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgh_PP __builtin_HEXAGON_A2_vavgh + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):crnd + C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_crnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgh_PP_crnd __builtin_HEXAGON_A2_vavghcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgh_PP_rnd __builtin_HEXAGON_A2_vavghr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavgub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgub_PP __builtin_HEXAGON_A2_vavgub + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavgub_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgub_PP_rnd __builtin_HEXAGON_A2_vavgubr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavguh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguh_PP __builtin_HEXAGON_A2_vavguh + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavguh_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguh_PP_rnd __builtin_HEXAGON_A2_vavguhr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavguw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguw_PP __builtin_HEXAGON_A2_vavguw + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavguw_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguw_PP_rnd __builtin_HEXAGON_A2_vavguwr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavgw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgw_PP __builtin_HEXAGON_A2_vavgw + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):crnd + C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_crnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgw_PP_crnd __builtin_HEXAGON_A2_vavgwcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgw_PP_rnd __builtin_HEXAGON_A2_vavgwr + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_eq_PP __builtin_HEXAGON_A2_vcmpbeq + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gtu_PP __builtin_HEXAGON_A2_vcmpbgtu + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_eq_PP __builtin_HEXAGON_A2_vcmpheq + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gt_PP __builtin_HEXAGON_A2_vcmphgt + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gtu_PP __builtin_HEXAGON_A2_vcmphgtu + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_eq_PP __builtin_HEXAGON_A2_vcmpweq + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gt_PP __builtin_HEXAGON_A2_vcmpwgt + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gtu_PP __builtin_HEXAGON_A2_vcmpwgtu + +/* ========================================================================== + Assembly Syntax: Rdd32=vconj(Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vconj_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vconj_P_sat __builtin_HEXAGON_A2_vconj + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxb(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxb_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxb_PP __builtin_HEXAGON_A2_vmaxb + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxh_PP __builtin_HEXAGON_A2_vmaxh + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxub_PP __builtin_HEXAGON_A2_vmaxub + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxuh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxuh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxuh_PP __builtin_HEXAGON_A2_vmaxuh + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxuw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxuw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxuw_PP __builtin_HEXAGON_A2_vmaxuw + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxw_PP __builtin_HEXAGON_A2_vmaxw + +/* ========================================================================== + Assembly Syntax: Rdd32=vminb(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminb_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminb_PP __builtin_HEXAGON_A2_vminb + +/* ========================================================================== + Assembly Syntax: Rdd32=vminh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminh_PP __builtin_HEXAGON_A2_vminh + +/* ========================================================================== + Assembly Syntax: Rdd32=vminub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminub_PP __builtin_HEXAGON_A2_vminub + +/* ========================================================================== + Assembly Syntax: Rdd32=vminuh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminuh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminuh_PP __builtin_HEXAGON_A2_vminuh + +/* ========================================================================== + Assembly Syntax: Rdd32=vminuw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminuw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminuw_PP __builtin_HEXAGON_A2_vminuw + +/* ========================================================================== + Assembly Syntax: Rdd32=vminw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminw_PP __builtin_HEXAGON_A2_vminw + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgh_PP __builtin_HEXAGON_A2_vnavgh + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):crnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_crnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgh_PP_crnd_sat __builtin_HEXAGON_A2_vnavghcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_rnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgh_PP_rnd_sat __builtin_HEXAGON_A2_vnavghr + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgw_PP __builtin_HEXAGON_A2_vnavgw + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):crnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_crnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgw_PP_crnd_sat __builtin_HEXAGON_A2_vnavgwcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_rnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgw_PP_rnd_sat __builtin_HEXAGON_A2_vnavgwr + +/* ========================================================================== + Assembly Syntax: Rdd32=vraddub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vraddub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vraddub_PP __builtin_HEXAGON_A2_vraddub + +/* ========================================================================== + Assembly Syntax: Rxx32+=vraddub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vraddubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vraddubacc_PP __builtin_HEXAGON_A2_vraddub_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vrsadub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrsadub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrsadub_PP __builtin_HEXAGON_A2_vrsadub + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrsadub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrsadubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrsadubacc_PP __builtin_HEXAGON_A2_vrsadub_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vsubb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vsubb_PP __builtin_HEXAGON_A2_vsubb_map + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsubh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubh_PP __builtin_HEXAGON_A2_vsubh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubh_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubh_PP_sat __builtin_HEXAGON_A2_vsubhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsubub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubub_PP __builtin_HEXAGON_A2_vsubub + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubub_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubub_PP_sat __builtin_HEXAGON_A2_vsububs + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubuh(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubuh_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubuh_PP_sat __builtin_HEXAGON_A2_vsubuhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsubw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubw_PP __builtin_HEXAGON_A2_vsubw + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubw_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubw_PP_sat __builtin_HEXAGON_A2_vsubws + +/* ========================================================================== + Assembly Syntax: Rd32=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xor_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_xor_RR __builtin_HEXAGON_A2_xor + +/* ========================================================================== + Assembly Syntax: Rdd32=xor(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_xor_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_xor_PP __builtin_HEXAGON_A2_xorp + +/* ========================================================================== + Assembly Syntax: Rd32=zxtb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_zxtb_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_zxtb_R __builtin_HEXAGON_A2_zxtb + +/* ========================================================================== + Assembly Syntax: Rd32=zxth(Rs32) + C Intrinsic Prototype: Word32 Q6_R_zxth_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_zxth_R __builtin_HEXAGON_A2_zxth + +/* ========================================================================== + Assembly Syntax: Rd32=and(Rt32,~Rs32) + C Intrinsic Prototype: Word32 Q6_R_and_RnR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_and_RnR __builtin_HEXAGON_A4_andn + +/* ========================================================================== + Assembly Syntax: Rdd32=and(Rtt32,~Rss32) + C Intrinsic Prototype: Word64 Q6_P_and_PnP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_and_PnP __builtin_HEXAGON_A4_andnp + +/* ========================================================================== + Assembly Syntax: Rdd32=bitsplit(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_bitsplit_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_bitsplit_RR __builtin_HEXAGON_A4_bitsplit + +/* ========================================================================== + Assembly Syntax: Rdd32=bitsplit(Rs32,#u5) + C Intrinsic Prototype: Word64 Q6_P_bitsplit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_bitsplit_RI __builtin_HEXAGON_A4_bitspliti + +/* ========================================================================== + Assembly Syntax: Pd4=boundscheck(Rs32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_boundscheck_RP(Word32 Rs, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_boundscheck_RP __builtin_HEXAGON_A4_boundscheck + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_eq_RR __builtin_HEXAGON_A4_cmpbeq + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.eq(Rs32,#u8) + C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RI(Word32 Rs, Word32 Iu8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_eq_RI __builtin_HEXAGON_A4_cmpbeqi + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gt_RR __builtin_HEXAGON_A4_cmpbgt + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gt(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gt_RI __builtin_HEXAGON_A4_cmpbgti + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gtu_RR __builtin_HEXAGON_A4_cmpbgtu + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gtu(Rs32,#u7) + C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RI(Word32 Rs, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gtu_RI __builtin_HEXAGON_A4_cmpbgtui + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmph_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_eq_RR __builtin_HEXAGON_A4_cmpheq + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.eq(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmph_eq_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_eq_RI __builtin_HEXAGON_A4_cmpheqi + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmph_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gt_RR __builtin_HEXAGON_A4_cmphgt + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gt(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmph_gt_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gt_RI __builtin_HEXAGON_A4_cmphgti + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gtu_RR __builtin_HEXAGON_A4_cmphgtu + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gtu(Rs32,#u7) + C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RI(Word32 Rs, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gtu_RI __builtin_HEXAGON_A4_cmphgtui + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(#s8,Rs32) + C Intrinsic Prototype: Word64 Q6_P_combine_IR(Word32 Is8, Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_IR __builtin_HEXAGON_A4_combineir + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(Rs32,#s8) + C Intrinsic Prototype: Word64 Q6_P_combine_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_RI __builtin_HEXAGON_A4_combineri + +/* ========================================================================== + Assembly Syntax: Rd32=cround(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_cround_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cround_RI __builtin_HEXAGON_A4_cround_ri + +/* ========================================================================== + Assembly Syntax: Rd32=cround(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_cround_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cround_RR __builtin_HEXAGON_A4_cround_rr + +/* ========================================================================== + Assembly Syntax: Rd32=modwrap(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_modwrap_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_modwrap_RR __builtin_HEXAGON_A4_modwrapu + +/* ========================================================================== + Assembly Syntax: Rd32=or(Rt32,~Rs32) + C Intrinsic Prototype: Word32 Q6_R_or_RnR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_or_RnR __builtin_HEXAGON_A4_orn + +/* ========================================================================== + Assembly Syntax: Rdd32=or(Rtt32,~Rss32) + C Intrinsic Prototype: Word64 Q6_P_or_PnP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_or_PnP __builtin_HEXAGON_A4_ornp + +/* ========================================================================== + Assembly Syntax: Rd32=cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_cmp_eq_RR __builtin_HEXAGON_A4_rcmpeq + +/* ========================================================================== + Assembly Syntax: Rd32=cmp.eq(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_cmp_eq_RI __builtin_HEXAGON_A4_rcmpeqi + +/* ========================================================================== + Assembly Syntax: Rd32=!cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_not_cmp_eq_RR __builtin_HEXAGON_A4_rcmpneq + +/* ========================================================================== + Assembly Syntax: Rd32=!cmp.eq(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_not_cmp_eq_RI __builtin_HEXAGON_A4_rcmpneqi + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_round_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RI __builtin_HEXAGON_A4_round_ri + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,#u5):sat + C Intrinsic Prototype: Word32 Q6_R_round_RI_sat(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RI_sat __builtin_HEXAGON_A4_round_ri_sat + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_round_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RR __builtin_HEXAGON_A4_round_rr + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_round_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RR_sat __builtin_HEXAGON_A4_round_rr_sat + +/* ========================================================================== + Assembly Syntax: Pd4=tlbmatch(Rss32,Rt32) + C Intrinsic Prototype: Byte Q6_p_tlbmatch_PR(Word64 Rss, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_tlbmatch_PR __builtin_HEXAGON_A4_tlbmatch + +/* ========================================================================== + Assembly Syntax: Pd4=any8(vcmpb.eq(Rss32,Rtt32)) + C Intrinsic Prototype: Byte Q6_p_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_any8_vcmpb_eq_PP __builtin_HEXAGON_A4_vcmpbeq_any + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.eq(Rss32,#u8) + C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PI(Word64 Rss, Word32 Iu8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_eq_PI __builtin_HEXAGON_A4_vcmpbeqi + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gt_PP __builtin_HEXAGON_A4_vcmpbgt + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gt(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gt_PI __builtin_HEXAGON_A4_vcmpbgti + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gtu(Rss32,#u7) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PI(Word64 Rss, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gtu_PI __builtin_HEXAGON_A4_vcmpbgtui + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.eq(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_eq_PI __builtin_HEXAGON_A4_vcmpheqi + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gt(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gt_PI __builtin_HEXAGON_A4_vcmphgti + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gtu(Rss32,#u7) + C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PI(Word64 Rss, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gtu_PI __builtin_HEXAGON_A4_vcmphgtui + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.eq(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_eq_PI __builtin_HEXAGON_A4_vcmpweqi + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gt(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gt_PI __builtin_HEXAGON_A4_vcmpwgti + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gtu(Rss32,#u7) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PI(Word64 Rss, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gtu_PI __builtin_HEXAGON_A4_vcmpwgtui + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxh_PR __builtin_HEXAGON_A4_vrmaxh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxuh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxuh_PR __builtin_HEXAGON_A4_vrmaxuh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxuw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxuw_PR __builtin_HEXAGON_A4_vrmaxuw + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxw_PR __builtin_HEXAGON_A4_vrmaxw + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminh_PR __builtin_HEXAGON_A4_vrminh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminuh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminuh_PR __builtin_HEXAGON_A4_vrminuh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminuw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminuw_PR __builtin_HEXAGON_A4_vrminuw + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminw_PR __builtin_HEXAGON_A4_vrminw + +/* ========================================================================== + Assembly Syntax: Rd32=vaddhub(Rss32,Rtt32):sat + C Intrinsic Prototype: Word32 Q6_R_vaddhub_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vaddhub_PP_sat __builtin_HEXAGON_A5_vaddhubs + +/* ========================================================================== + Assembly Syntax: Pd4=all8(Ps4) + C Intrinsic Prototype: Byte Q6_p_all8_p(Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_all8_p __builtin_HEXAGON_C2_all8 + +/* ========================================================================== + Assembly Syntax: Pd4=and(Pt4,Ps4) + C Intrinsic Prototype: Byte Q6_p_and_pp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_pp __builtin_HEXAGON_C2_and + +/* ========================================================================== + Assembly Syntax: Pd4=and(Pt4,!Ps4) + C Intrinsic Prototype: Byte Q6_p_and_pnp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_pnp __builtin_HEXAGON_C2_andn + +/* ========================================================================== + Assembly Syntax: Pd4=any8(Ps4) + C Intrinsic Prototype: Byte Q6_p_any8_p(Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_any8_p __builtin_HEXAGON_C2_any8 + +/* ========================================================================== + Assembly Syntax: Pd4=bitsclr(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_bitsclr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_bitsclr_RR __builtin_HEXAGON_C2_bitsclr + +/* ========================================================================== + Assembly Syntax: Pd4=bitsclr(Rs32,#u6) + C Intrinsic Prototype: Byte Q6_p_bitsclr_RI(Word32 Rs, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_bitsclr_RI __builtin_HEXAGON_C2_bitsclri + +/* ========================================================================== + Assembly Syntax: Pd4=bitsset(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_bitsset_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_bitsset_RR __builtin_HEXAGON_C2_bitsset + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_eq_RR __builtin_HEXAGON_C2_cmpeq + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.eq(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_cmp_eq_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_eq_RI __builtin_HEXAGON_C2_cmpeqi + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_cmp_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmp_eq_PP __builtin_HEXAGON_C2_cmpeqp + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.ge(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmp_ge_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_ge_RI __builtin_HEXAGON_C2_cmpgei + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.geu(Rs32,#u8) + C Intrinsic Prototype: Byte Q6_p_cmp_geu_RI(Word32 Rs, Word32 Iu8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_geu_RI __builtin_HEXAGON_C2_cmpgeui + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gt_RR __builtin_HEXAGON_C2_cmpgt + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gt(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_cmp_gt_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gt_RI __builtin_HEXAGON_C2_cmpgti + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmp_gt_PP __builtin_HEXAGON_C2_cmpgtp + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gtu_RR __builtin_HEXAGON_C2_cmpgtu + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gtu(Rs32,#u9) + C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RI(Word32 Rs, Word32 Iu9) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gtu_RI __builtin_HEXAGON_C2_cmpgtui + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmp_gtu_PP __builtin_HEXAGON_C2_cmpgtup + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.lt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_lt_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_lt_RR __builtin_HEXAGON_C2_cmplt + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.ltu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_ltu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_ltu_RR __builtin_HEXAGON_C2_cmpltu + +/* ========================================================================== + Assembly Syntax: Rdd32=mask(Pt4) + C Intrinsic Prototype: Word64 Q6_P_mask_p(Byte Pt) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mask_p __builtin_HEXAGON_C2_mask + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mux_pRR(Byte Pu, Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pRR __builtin_HEXAGON_C2_mux + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,#s8,#S8) + C Intrinsic Prototype: Word32 Q6_R_mux_pII(Byte Pu, Word32 Is8, Word32 IS8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pII __builtin_HEXAGON_C2_muxii + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_mux_pRI(Byte Pu, Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pRI __builtin_HEXAGON_C2_muxir + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,#s8,Rs32) + C Intrinsic Prototype: Word32 Q6_R_mux_pIR(Byte Pu, Word32 Is8, Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pIR __builtin_HEXAGON_C2_muxri + +/* ========================================================================== + Assembly Syntax: Pd4=not(Ps4) + C Intrinsic Prototype: Byte Q6_p_not_p(Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_p __builtin_HEXAGON_C2_not + +/* ========================================================================== + Assembly Syntax: Pd4=or(Pt4,Ps4) + C Intrinsic Prototype: Byte Q6_p_or_pp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_pp __builtin_HEXAGON_C2_or + +/* ========================================================================== + Assembly Syntax: Pd4=or(Pt4,!Ps4) + C Intrinsic Prototype: Byte Q6_p_or_pnp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_pnp __builtin_HEXAGON_C2_orn + +/* ========================================================================== + Assembly Syntax: Pd4=Ps4 + C Intrinsic Prototype: Byte Q6_p_equals_p(Byte Ps) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_equals_p __builtin_HEXAGON_C2_pxfer_map + +/* ========================================================================== + Assembly Syntax: Rd32=Ps4 + C Intrinsic Prototype: Word32 Q6_R_equals_p(Byte Ps) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_equals_p __builtin_HEXAGON_C2_tfrpr + +/* ========================================================================== + Assembly Syntax: Pd4=Rs32 + C Intrinsic Prototype: Byte Q6_p_equals_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_equals_R __builtin_HEXAGON_C2_tfrrp + +/* ========================================================================== + Assembly Syntax: Rd32=vitpack(Ps4,Pt4) + C Intrinsic Prototype: Word32 Q6_R_vitpack_pp(Byte Ps, Byte Pt) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vitpack_pp __builtin_HEXAGON_C2_vitpack + +/* ========================================================================== + Assembly Syntax: Rdd32=vmux(Pu4,Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vmux_pPP(Byte Pu, Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmux_pPP __builtin_HEXAGON_C2_vmux + +/* ========================================================================== + Assembly Syntax: Pd4=xor(Ps4,Pt4) + C Intrinsic Prototype: Byte Q6_p_xor_pp(Byte Ps, Byte Pt) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_xor_pp __builtin_HEXAGON_C2_xor + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,and(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_and_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_and_ppp __builtin_HEXAGON_C4_and_and + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,and(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_and_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_and_ppnp __builtin_HEXAGON_C4_and_andn + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,or(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_or_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_or_ppp __builtin_HEXAGON_C4_and_or + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,or(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_or_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_or_ppnp __builtin_HEXAGON_C4_and_orn + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gt_RR __builtin_HEXAGON_C4_cmplte + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gt(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gt_RI __builtin_HEXAGON_C4_cmpltei + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gtu_RR __builtin_HEXAGON_C4_cmplteu + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gtu(Rs32,#u9) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RI(Word32 Rs, Word32 Iu9) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gtu_RI __builtin_HEXAGON_C4_cmplteui + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_eq_RR __builtin_HEXAGON_C4_cmpneq + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.eq(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_eq_RI __builtin_HEXAGON_C4_cmpneqi + +/* ========================================================================== + Assembly Syntax: Pd4=fastcorner9(Ps4,Pt4) + C Intrinsic Prototype: Byte Q6_p_fastcorner9_pp(Byte Ps, Byte Pt) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9 + +/* ========================================================================== + Assembly Syntax: Pd4=!fastcorner9(Ps4,Pt4) + C Intrinsic Prototype: Byte Q6_p_not_fastcorner9_pp(Byte Ps, Byte Pt) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9_not + +/* ========================================================================== + Assembly Syntax: Pd4=!bitsclr(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_bitsclr_RR __builtin_HEXAGON_C4_nbitsclr + +/* ========================================================================== + Assembly Syntax: Pd4=!bitsclr(Rs32,#u6) + C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RI(Word32 Rs, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_bitsclr_RI __builtin_HEXAGON_C4_nbitsclri + +/* ========================================================================== + Assembly Syntax: Pd4=!bitsset(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_bitsset_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_bitsset_RR __builtin_HEXAGON_C4_nbitsset + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,and(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_and_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_and_ppp __builtin_HEXAGON_C4_or_and + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,and(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_and_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_and_ppnp __builtin_HEXAGON_C4_or_andn + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,or(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_or_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_or_ppp __builtin_HEXAGON_C4_or_or + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,or(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_or_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_or_ppnp __builtin_HEXAGON_C4_or_orn + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_d2df(Rss32) + C Intrinsic Prototype: Float64 Q6_P_convert_d2df_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_d2df_P __builtin_HEXAGON_F2_conv_d2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_d2sf(Rss32) + C Intrinsic Prototype: Float32 Q6_R_convert_d2sf_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_d2sf_P __builtin_HEXAGON_F2_conv_d2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2d(Rss32) + C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2d_P __builtin_HEXAGON_F2_conv_df2d + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2d(Rss32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2d_P_chop __builtin_HEXAGON_F2_conv_df2d_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2sf(Rss32) + C Intrinsic Prototype: Float32 Q6_R_convert_df2sf_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2sf_P __builtin_HEXAGON_F2_conv_df2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2ud(Rss32) + C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2ud_P __builtin_HEXAGON_F2_conv_df2ud + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2ud(Rss32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2ud_P_chop __builtin_HEXAGON_F2_conv_df2ud_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2uw(Rss32) + C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2uw_P __builtin_HEXAGON_F2_conv_df2uw + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2uw(Rss32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2uw_P_chop __builtin_HEXAGON_F2_conv_df2uw_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2w(Rss32) + C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2w_P __builtin_HEXAGON_F2_conv_df2w + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2w(Rss32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2w_P_chop __builtin_HEXAGON_F2_conv_df2w_chop + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2d(Rs32) + C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2d_R __builtin_HEXAGON_F2_conv_sf2d + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2d(Rs32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2d_R_chop __builtin_HEXAGON_F2_conv_sf2d_chop + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2df(Rs32) + C Intrinsic Prototype: Float64 Q6_P_convert_sf2df_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2df_R __builtin_HEXAGON_F2_conv_sf2df + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2ud(Rs32) + C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2ud_R __builtin_HEXAGON_F2_conv_sf2ud + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2ud(Rs32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2ud_R_chop __builtin_HEXAGON_F2_conv_sf2ud_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2uw(Rs32) + C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2uw_R __builtin_HEXAGON_F2_conv_sf2uw + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2uw(Rs32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2uw_R_chop __builtin_HEXAGON_F2_conv_sf2uw_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2w(Rs32) + C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2w_R __builtin_HEXAGON_F2_conv_sf2w + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2w(Rs32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2w_R_chop __builtin_HEXAGON_F2_conv_sf2w_chop + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_ud2df(Rss32) + C Intrinsic Prototype: Float64 Q6_P_convert_ud2df_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_ud2df_P __builtin_HEXAGON_F2_conv_ud2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_ud2sf(Rss32) + C Intrinsic Prototype: Float32 Q6_R_convert_ud2sf_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_ud2sf_P __builtin_HEXAGON_F2_conv_ud2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_uw2df(Rs32) + C Intrinsic Prototype: Float64 Q6_P_convert_uw2df_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_uw2df_R __builtin_HEXAGON_F2_conv_uw2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_uw2sf(Rs32) + C Intrinsic Prototype: Float32 Q6_R_convert_uw2sf_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_uw2sf_R __builtin_HEXAGON_F2_conv_uw2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_w2df(Rs32) + C Intrinsic Prototype: Float64 Q6_P_convert_w2df_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_w2df_R __builtin_HEXAGON_F2_conv_w2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_w2sf(Rs32) + C Intrinsic Prototype: Float32 Q6_R_convert_w2sf_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_w2sf_R __builtin_HEXAGON_F2_conv_w2sf + +/* ========================================================================== + Assembly Syntax: Pd4=dfclass(Rss32,#u5) + C Intrinsic Prototype: Byte Q6_p_dfclass_PI(Float64 Rss, Word32 Iu5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfclass_PI __builtin_HEXAGON_F2_dfclass + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_eq_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_eq_PP __builtin_HEXAGON_F2_dfcmpeq + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.ge(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_ge_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_ge_PP __builtin_HEXAGON_F2_dfcmpge + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_gt_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_gt_PP __builtin_HEXAGON_F2_dfcmpgt + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.uo(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_uo_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_uo_PP __builtin_HEXAGON_F2_dfcmpuo + +/* ========================================================================== + Assembly Syntax: Rdd32=dfmake(#u10):neg + C Intrinsic Prototype: Float64 Q6_P_dfmake_I_neg(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmake_I_neg __builtin_HEXAGON_F2_dfimm_n + +/* ========================================================================== + Assembly Syntax: Rdd32=dfmake(#u10):pos + C Intrinsic Prototype: Float64 Q6_P_dfmake_I_pos(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmake_I_pos __builtin_HEXAGON_F2_dfimm_p + +/* ========================================================================== + Assembly Syntax: Rd32=sfadd(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfadd_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfadd_RR __builtin_HEXAGON_F2_sfadd + +/* ========================================================================== + Assembly Syntax: Pd4=sfclass(Rs32,#u5) + C Intrinsic Prototype: Byte Q6_p_sfclass_RI(Float32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfclass_RI __builtin_HEXAGON_F2_sfclass + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_eq_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_eq_RR __builtin_HEXAGON_F2_sfcmpeq + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.ge(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_ge_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_ge_RR __builtin_HEXAGON_F2_sfcmpge + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_gt_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_gt_RR __builtin_HEXAGON_F2_sfcmpgt + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.uo(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_uo_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_uo_RR __builtin_HEXAGON_F2_sfcmpuo + +/* ========================================================================== + Assembly Syntax: Rd32=sffixupd(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sffixupd_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sffixupd_RR __builtin_HEXAGON_F2_sffixupd + +/* ========================================================================== + Assembly Syntax: Rd32=sffixupn(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sffixupn_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sffixupn_RR __builtin_HEXAGON_F2_sffixupn + +/* ========================================================================== + Assembly Syntax: Rd32=sffixupr(Rs32) + C Intrinsic Prototype: Float32 Q6_R_sffixupr_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sffixupr_R __builtin_HEXAGON_F2_sffixupr + +/* ========================================================================== + Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpyacc_RR __builtin_HEXAGON_F2_sffma + +/* ========================================================================== + Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32):lib + C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpyacc_RR_lib __builtin_HEXAGON_F2_sffma_lib + +/* ========================================================================== + Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32,Pu4):scale + C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RRp_scale(Float32 Rx, Float32 Rs, Float32 Rt, Byte Pu) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpyacc_RRp_scale __builtin_HEXAGON_F2_sffma_sc + +/* ========================================================================== + Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpynac_RR __builtin_HEXAGON_F2_sffms + +/* ========================================================================== + Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32):lib + C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpynac_RR_lib __builtin_HEXAGON_F2_sffms_lib + +/* ========================================================================== + Assembly Syntax: Rd32=sfmake(#u10):neg + C Intrinsic Prototype: Float32 Q6_R_sfmake_I_neg(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmake_I_neg __builtin_HEXAGON_F2_sfimm_n + +/* ========================================================================== + Assembly Syntax: Rd32=sfmake(#u10):pos + C Intrinsic Prototype: Float32 Q6_R_sfmake_I_pos(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmake_I_pos __builtin_HEXAGON_F2_sfimm_p + +/* ========================================================================== + Assembly Syntax: Rd32=sfmax(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmax_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmax_RR __builtin_HEXAGON_F2_sfmax + +/* ========================================================================== + Assembly Syntax: Rd32=sfmin(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmin_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmin_RR __builtin_HEXAGON_F2_sfmin + +/* ========================================================================== + Assembly Syntax: Rd32=sfmpy(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmpy_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpy_RR __builtin_HEXAGON_F2_sfmpy + +/* ========================================================================== + Assembly Syntax: Rd32=sfsub(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfsub_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfsub_RR __builtin_HEXAGON_F2_sfsub + +/* ========================================================================== + Assembly Syntax: Rd32=memb(Rx32++#s4:0:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memb_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memb_IM_circ __builtin_HEXAGON_L2_loadrb_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memb(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memb_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memb_M_circ __builtin_HEXAGON_L2_loadrb_pcr + +/* ========================================================================== + Assembly Syntax: Rdd32=memd(Rx32++#s4:3:circ(Mu2)) + C Intrinsic Prototype: Word64 Q6_P_memd_IM_circ(void** Rx, Word32 Is4_3, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_P_memd_IM_circ __builtin_HEXAGON_L2_loadrd_pci + +/* ========================================================================== + Assembly Syntax: Rdd32=memd(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word64 Q6_P_memd_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_P_memd_M_circ __builtin_HEXAGON_L2_loadrd_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memh(Rx32++#s4:1:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memh_IM_circ __builtin_HEXAGON_L2_loadrh_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memh(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memh_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memh_M_circ __builtin_HEXAGON_L2_loadrh_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memw(Rx32++#s4:2:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memw_IM_circ(void** Rx, Word32 Is4_2, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memw_IM_circ __builtin_HEXAGON_L2_loadri_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memw(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memw_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memw_M_circ __builtin_HEXAGON_L2_loadri_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memub(Rx32++#s4:0:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memub_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memub_IM_circ __builtin_HEXAGON_L2_loadrub_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memub(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memub_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memub_M_circ __builtin_HEXAGON_L2_loadrub_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memuh(Rx32++#s4:1:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memuh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memuh_IM_circ __builtin_HEXAGON_L2_loadruh_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memuh(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memuh_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memuh_M_circ __builtin_HEXAGON_L2_loadruh_pcr + +/* ========================================================================== + Assembly Syntax: Rx32+=add(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_addacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addacc_RR __builtin_HEXAGON_M2_acci + +/* ========================================================================== + Assembly Syntax: Rx32+=add(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_addacc_RI(Word32 Rx, Word32 Rs, Word32 Is8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addacc_RI __builtin_HEXAGON_M2_accii + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyi(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyiacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyiacc_RR __builtin_HEXAGON_M2_cmaci_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyr(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyracc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyracc_RR __builtin_HEXAGON_M2_cmacr_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_sat __builtin_HEXAGON_M2_cmacs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_s1_sat __builtin_HEXAGON_M2_cmacs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_conj_sat __builtin_HEXAGON_M2_cmacsc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_conj_s1_sat __builtin_HEXAGON_M2_cmacsc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyi(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyi_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyi_RR __builtin_HEXAGON_M2_cmpyi_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyr(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyr_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyr_RR __builtin_HEXAGON_M2_cmpyr_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_conj_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_conj_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_sat __builtin_HEXAGON_M2_cmpys_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_s1_sat __builtin_HEXAGON_M2_cmpys_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_conj_sat __builtin_HEXAGON_M2_cmpysc_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_conj_s1_sat __builtin_HEXAGON_M2_cmpysc_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_sat __builtin_HEXAGON_M2_cnacs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_s1_sat __builtin_HEXAGON_M2_cnacs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_conj_sat __builtin_HEXAGON_M2_cnacsc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_conj_s1_sat __builtin_HEXAGON_M2_cnacsc_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RR __builtin_HEXAGON_M2_dpmpyss_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RR __builtin_HEXAGON_M2_dpmpyss_nac_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RR_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR_rnd __builtin_HEXAGON_M2_dpmpyss_rnd_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpy_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RR __builtin_HEXAGON_M2_dpmpyss_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RR __builtin_HEXAGON_M2_dpmpyuu_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RR __builtin_HEXAGON_M2_dpmpyuu_nac_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32,Rt32) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RR __builtin_HEXAGON_M2_dpmpyuu_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRh_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyh_rs1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRh_s1_sat __builtin_HEXAGON_M2_hmmpyh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRl_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyl_rs1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRl_s1_sat __builtin_HEXAGON_M2_hmmpyl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyi(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyiacc_RR __builtin_HEXAGON_M2_maci + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyi(Rs32,#u8) + C Intrinsic Prototype: Word32 Q6_R_mpyinac_RI(Word32 Rx, Word32 Rs, Word32 Iu8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyinac_RI __builtin_HEXAGON_M2_macsin + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyi(Rs32,#u8) + C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RI(Word32 Rx, Word32 Rs, Word32 Iu8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyiacc_RI __builtin_HEXAGON_M2_macsip + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_rnd_sat __builtin_HEXAGON_M2_mmachs_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmachs_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_sat __builtin_HEXAGON_M2_mmachs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_s1_sat __builtin_HEXAGON_M2_mmachs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacls_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacls_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_sat __builtin_HEXAGON_M2_mmacls_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_s1_sat __builtin_HEXAGON_M2_mmacls_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_sat __builtin_HEXAGON_M2_mmacuhs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_s1_sat __builtin_HEXAGON_M2_mmacuhs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_sat __builtin_HEXAGON_M2_mmaculs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_s1_sat __builtin_HEXAGON_M2_mmaculs_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_sat __builtin_HEXAGON_M2_mmpyh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_s1_sat __builtin_HEXAGON_M2_mmpyh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_sat __builtin_HEXAGON_M2_mmpyl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_s1_sat __builtin_HEXAGON_M2_mmpyl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_sat __builtin_HEXAGON_M2_mmpyuh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_s1_sat __builtin_HEXAGON_M2_mmpyuh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_sat __builtin_HEXAGON_M2_mmpyul_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_s1_sat __builtin_HEXAGON_M2_mmpyul_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh __builtin_HEXAGON_M2_mpy_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpy_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl __builtin_HEXAGON_M2_mpy_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpy_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh __builtin_HEXAGON_M2_mpy_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpy_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl __builtin_HEXAGON_M2_mpy_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpy_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh __builtin_HEXAGON_M2_mpy_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpy_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl __builtin_HEXAGON_M2_mpy_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpy_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh __builtin_HEXAGON_M2_mpy_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpy_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl __builtin_HEXAGON_M2_mpy_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpy_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh __builtin_HEXAGON_M2_mpy_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpy_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl __builtin_HEXAGON_M2_mpy_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpy_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh __builtin_HEXAGON_M2_mpy_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpy_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl __builtin_HEXAGON_M2_mpy_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpy_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_sat __builtin_HEXAGON_M2_mpy_sat_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_sat __builtin_HEXAGON_M2_mpy_sat_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_sat __builtin_HEXAGON_M2_mpy_sat_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_sat __builtin_HEXAGON_M2_mpy_sat_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpy_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR __builtin_HEXAGON_M2_mpy_up + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR_s1 __builtin_HEXAGON_M2_mpy_up_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR_s1_sat __builtin_HEXAGON_M2_mpy_up_s1_sat + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRh __builtin_HEXAGON_M2_mpyd_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpyd_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRl __builtin_HEXAGON_M2_mpyd_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpyd_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRh __builtin_HEXAGON_M2_mpyd_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpyd_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRl __builtin_HEXAGON_M2_mpyd_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpyd_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh __builtin_HEXAGON_M2_mpyd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpyd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl __builtin_HEXAGON_M2_mpyd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpyd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh __builtin_HEXAGON_M2_mpyd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpyd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl __builtin_HEXAGON_M2_mpyd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpyd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRh __builtin_HEXAGON_M2_mpyd_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpyd_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRl __builtin_HEXAGON_M2_mpyd_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpyd_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRh __builtin_HEXAGON_M2_mpyd_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpyd_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRl __builtin_HEXAGON_M2_mpyd_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpyd_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyi(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyi_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyi_RR __builtin_HEXAGON_M2_mpyi + +/* ========================================================================== + Assembly Syntax: Rd32=mpyi(Rs32,#m9) + C Intrinsic Prototype: Word32 Q6_R_mpyi_RI(Word32 Rs, Word32 Im9) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mpyi_RI __builtin_HEXAGON_M2_mpysmi + +/* ========================================================================== + Assembly Syntax: Rd32=mpysu(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpysu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpysu_RR __builtin_HEXAGON_M2_mpysu_up + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyu_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyu_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyu_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyu_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyu_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyu_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyu_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyu_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRh __builtin_HEXAGON_M2_mpyu_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyu_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRl __builtin_HEXAGON_M2_mpyu_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyu_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRh __builtin_HEXAGON_M2_mpyu_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyu_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRl __builtin_HEXAGON_M2_mpyu_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyu_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRh __builtin_HEXAGON_M2_mpyu_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyu_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRl __builtin_HEXAGON_M2_mpyu_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyu_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRh __builtin_HEXAGON_M2_mpyu_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyu_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRl __builtin_HEXAGON_M2_mpyu_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyu_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32,Rt32) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RR __builtin_HEXAGON_M2_mpyu_up + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyud_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyud_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyud_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyud_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyud_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyud_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyud_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyud_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRh __builtin_HEXAGON_M2_mpyud_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyud_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRl __builtin_HEXAGON_M2_mpyud_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyud_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRh __builtin_HEXAGON_M2_mpyud_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyud_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRl __builtin_HEXAGON_M2_mpyud_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyud_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRh __builtin_HEXAGON_M2_mpyud_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyud_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRl __builtin_HEXAGON_M2_mpyud_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyud_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRh __builtin_HEXAGON_M2_mpyud_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyud_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRl __builtin_HEXAGON_M2_mpyud_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyud_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyui(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyui_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mpyui_RR __builtin_HEXAGON_M2_mpyui + +/* ========================================================================== + Assembly Syntax: Rx32-=add(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_addnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addnac_RR __builtin_HEXAGON_M2_nacci + +/* ========================================================================== + Assembly Syntax: Rx32-=add(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_addnac_RI(Word32 Rx, Word32 Rs, Word32 Is8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addnac_RI __builtin_HEXAGON_M2_naccii + +/* ========================================================================== + Assembly Syntax: Rx32+=sub(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_subacc_RR(Word32 Rx, Word32 Rt, Word32 Rs) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_subacc_RR __builtin_HEXAGON_M2_subacc + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffh_PP __builtin_HEXAGON_M2_vabsdiffh + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffw_PP __builtin_HEXAGON_M2_vabsdiffw + +/* ========================================================================== + Assembly Syntax: Rxx32+=vcmpyi(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyiacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyiacc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_i + +/* ========================================================================== + Assembly Syntax: Rxx32+=vcmpyr(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyracc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyracc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_r + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyi_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_i + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyr_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_r + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyi_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_i + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyr_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_r + +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpyacc_PP_sat __builtin_HEXAGON_M2_vdmacs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpyacc_PP_s1_sat __builtin_HEXAGON_M2_vdmacs_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vdmpy_PP_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vdmpy_PP_s1_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpy_PP_sat __builtin_HEXAGON_M2_vdmpys_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpy_PP_s1_sat __builtin_HEXAGON_M2_vdmpys_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhacc_RR __builtin_HEXAGON_M2_vmac2 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyehacc_PP __builtin_HEXAGON_M2_vmac2es + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyehacc_PP_sat __builtin_HEXAGON_M2_vmac2es_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyehacc_PP_s1_sat __builtin_HEXAGON_M2_vmac2es_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhacc_RR_sat __builtin_HEXAGON_M2_vmac2s_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2s_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsuacc_RR_sat __builtin_HEXAGON_M2_vmac2su_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsuacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2su_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyeh_PP_sat __builtin_HEXAGON_M2_vmpy2es_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyeh_PP_s1_sat __builtin_HEXAGON_M2_vmpy2es_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyh_RR_sat __builtin_HEXAGON_M2_vmpy2s_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vmpyh_RR_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s0pack + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyh_RR_s1_sat __builtin_HEXAGON_M2_vmpy2s_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vmpyh_RR_s1_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s1pack + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsu_RR_sat __builtin_HEXAGON_M2_vmpy2su_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsu_RR_s1_sat __builtin_HEXAGON_M2_vmpy2su_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vraddh(Rss32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_vraddh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vraddh_PP __builtin_HEXAGON_M2_vraddh + +/* ========================================================================== + Assembly Syntax: Rd32=vradduh(Rss32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_vradduh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vradduh_PP __builtin_HEXAGON_M2_vradduh + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyiacc_PP __builtin_HEXAGON_M2_vrcmaci_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyiacc_PP_conj __builtin_HEXAGON_M2_vrcmaci_s0c + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyracc_PP __builtin_HEXAGON_M2_vrcmacr_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyracc_PP_conj __builtin_HEXAGON_M2_vrcmacr_s0c + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyi_PP __builtin_HEXAGON_M2_vrcmpyi_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyi_PP_conj __builtin_HEXAGON_M2_vrcmpyi_s0c + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyr_PP __builtin_HEXAGON_M2_vrcmpyr_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyr_PP_conj __builtin_HEXAGON_M2_vrcmpyr_s0c + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vrcmpysacc_PR_s1_sat(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vrcmpysacc_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_acc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpys(Rss32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vrcmpys_PR_s1_sat(Word64 Rss, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vrcmpys_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vrcmpys_PR_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vrcmpys_PR_s1_rnd_sat __builtin_HEXAGON_M2_vrcmpys_s1rp + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpyh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpyhacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpyh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpyh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0 + +/* ========================================================================== + Assembly Syntax: Rx32^=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xorxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_xorxacc_RR __builtin_HEXAGON_M2_xor_xacc + +/* ========================================================================== + Assembly Syntax: Rx32&=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_andand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andand_RR __builtin_HEXAGON_M4_and_and + +/* ========================================================================== + Assembly Syntax: Rx32&=and(Rs32,~Rt32) + C Intrinsic Prototype: Word32 Q6_R_andand_RnR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andand_RnR __builtin_HEXAGON_M4_and_andn + +/* ========================================================================== + Assembly Syntax: Rx32&=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_orand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_orand_RR __builtin_HEXAGON_M4_and_or + +/* ========================================================================== + Assembly Syntax: Rx32&=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xorand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_xorand_RR __builtin_HEXAGON_M4_and_xor + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyiwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_wh + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyiwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_whc + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyrwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_wh + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyrwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_whc + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RR_s1_sat __builtin_HEXAGON_M4_mac_up_s1_sat + +/* ========================================================================== + Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,#U6)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRI(Word32 Iu6, Word32 Rs, Word32 IU6) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_IRI __builtin_HEXAGON_M4_mpyri_addi + +/* ========================================================================== + Assembly Syntax: Rd32=add(Ru32,mpyi(Rs32,#u6)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRI(Word32 Ru, Word32 Rs, Word32 Iu6) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_RRI __builtin_HEXAGON_M4_mpyri_addr + +/* ========================================================================== + Assembly Syntax: Rd32=add(Ru32,mpyi(#u6:2,Rs32)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RIR(Word32 Ru, Word32 Iu6_2, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_RIR __builtin_HEXAGON_M4_mpyri_addr_u2 + +/* ========================================================================== + Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,Rt32)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRR(Word32 Iu6, Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_IRR __builtin_HEXAGON_M4_mpyrr_addi + +/* ========================================================================== + Assembly Syntax: Ry32=add(Ru32,mpyi(Ry32,Rs32)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRR(Word32 Ru, Word32 Ry, Word32 Rs) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_RRR __builtin_HEXAGON_M4_mpyrr_addr + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RR_s1_sat __builtin_HEXAGON_M4_nac_up_s1_sat + +/* ========================================================================== + Assembly Syntax: Rx32|=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_andor_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andor_RR __builtin_HEXAGON_M4_or_and + +/* ========================================================================== + Assembly Syntax: Rx32|=and(Rs32,~Rt32) + C Intrinsic Prototype: Word32 Q6_R_andor_RnR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andor_RnR __builtin_HEXAGON_M4_or_andn + +/* ========================================================================== + Assembly Syntax: Rx32|=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_oror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_oror_RR __builtin_HEXAGON_M4_or_or + +/* ========================================================================== + Assembly Syntax: Rx32|=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xoror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_xoror_RR __builtin_HEXAGON_M4_or_xor + +/* ========================================================================== + Assembly Syntax: Rdd32=pmpyw(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_pmpyw_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_pmpyw_RR __builtin_HEXAGON_M4_pmpyw + +/* ========================================================================== + Assembly Syntax: Rxx32^=pmpyw(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_pmpywxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_pmpywxacc_RR __builtin_HEXAGON_M4_pmpyw_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vpmpyh(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vpmpyh_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vpmpyh_RR __builtin_HEXAGON_M4_vpmpyh + +/* ========================================================================== + Assembly Syntax: Rxx32^=vpmpyh(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vpmpyhxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vpmpyhxacc_RR __builtin_HEXAGON_M4_vpmpyh_acc + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywehacc_PP __builtin_HEXAGON_M4_vrmpyeh_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywehacc_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_acc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyweh_PP __builtin_HEXAGON_M4_vrmpyeh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP_s1(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyweh_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywohacc_PP __builtin_HEXAGON_M4_vrmpyoh_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywohacc_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_acc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywoh_PP __builtin_HEXAGON_M4_vrmpyoh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP_s1(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywoh_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32^=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_andxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andxacc_RR __builtin_HEXAGON_M4_xor_and + +/* ========================================================================== + Assembly Syntax: Rx32^=and(Rs32,~Rt32) + C Intrinsic Prototype: Word32 Q6_R_andxacc_RnR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andxacc_RnR __builtin_HEXAGON_M4_xor_andn + +/* ========================================================================== + Assembly Syntax: Rx32^=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_orxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_orxacc_RR __builtin_HEXAGON_M4_xor_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=xor(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_xorxacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_xorxacc_PP __builtin_HEXAGON_M4_xor_xacc + +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpybsu(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpybsuacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpybsuacc_PP_sat __builtin_HEXAGON_M5_vdmacbsu + +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpybsu(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpybsu_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpybsu_PP_sat __builtin_HEXAGON_M5_vdmpybsu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpybsu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybsuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybsuacc_RR __builtin_HEXAGON_M5_vmacbsu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpybu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybuacc_RR __builtin_HEXAGON_M5_vmacbuu + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpybsu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybsu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybsu_RR __builtin_HEXAGON_M5_vmpybsu + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpybu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybu_RR __builtin_HEXAGON_M5_vmpybuu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpybsu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybsuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybsuacc_PP __builtin_HEXAGON_M5_vrmacbsu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpybu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybuacc_PP __builtin_HEXAGON_M5_vrmacbuu + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpybsu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybsu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybsu_PP __builtin_HEXAGON_M5_vrmpybsu + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpybu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybu_PP __builtin_HEXAGON_M5_vrmpybuu + +/* ========================================================================== + Assembly Syntax: Rd32=addasl(Rt32,Rs32,#u3) + C Intrinsic Prototype: Word32 Q6_R_addasl_RRI(Word32 Rt, Word32 Rs, Word32 Iu3) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addasl_RRI __builtin_HEXAGON_S2_addasl_rrri + +/* ========================================================================== + Assembly Syntax: Rdd32=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asl_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asl_PI __builtin_HEXAGON_S2_asl_i_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslacc_PI __builtin_HEXAGON_S2_asl_i_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asland_PI __builtin_HEXAGON_S2_asl_i_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslnac_PI __builtin_HEXAGON_S2_asl_i_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslor_PI __builtin_HEXAGON_S2_asl_i_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslxacc_PI __builtin_HEXAGON_S2_asl_i_p_xacc + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asl_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RI __builtin_HEXAGON_S2_asl_i_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslacc_RI __builtin_HEXAGON_S2_asl_i_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asland_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asland_RI __builtin_HEXAGON_S2_asl_i_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslnac_RI __builtin_HEXAGON_S2_asl_i_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslor_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslor_RI __builtin_HEXAGON_S2_asl_i_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,#u5):sat + C Intrinsic Prototype: Word32 Q6_R_asl_RI_sat(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RI_sat __builtin_HEXAGON_S2_asl_i_r_sat + +/* ========================================================================== + Assembly Syntax: Rx32^=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslxacc_RI __builtin_HEXAGON_S2_asl_i_r_xacc + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslh(Rss32,#u4) + C Intrinsic Prototype: Word64 Q6_P_vaslh_PI(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslh_PI __builtin_HEXAGON_S2_asl_i_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslw(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vaslw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslw_PI __builtin_HEXAGON_S2_asl_i_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asl_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asl_PR __builtin_HEXAGON_S2_asl_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslacc_PR __builtin_HEXAGON_S2_asl_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asland_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asland_PR __builtin_HEXAGON_S2_asl_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslnac_PR __builtin_HEXAGON_S2_asl_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslor_PR __builtin_HEXAGON_S2_asl_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslxacc_PR __builtin_HEXAGON_S2_asl_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asl_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RR __builtin_HEXAGON_S2_asl_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_aslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslacc_RR __builtin_HEXAGON_S2_asl_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asland_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asland_RR __builtin_HEXAGON_S2_asl_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_aslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslnac_RR __builtin_HEXAGON_S2_asl_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_aslor_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslor_RR __builtin_HEXAGON_S2_asl_r_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_asl_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RR_sat __builtin_HEXAGON_S2_asl_r_r_sat + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vaslh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslh_PR __builtin_HEXAGON_S2_asl_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vaslw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslw_PR __builtin_HEXAGON_S2_asl_r_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asr_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asr_PI __builtin_HEXAGON_S2_asr_i_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asracc_PI __builtin_HEXAGON_S2_asr_i_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrand_PI __builtin_HEXAGON_S2_asr_i_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrnac_PI __builtin_HEXAGON_S2_asr_i_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asror_PI __builtin_HEXAGON_S2_asr_i_p_or + +/* ========================================================================== + Assembly Syntax: Rdd32=asr(Rss32,#u6):rnd + C Intrinsic Prototype: Word64 Q6_P_asr_PI_rnd(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asr_PI_rnd __builtin_HEXAGON_S2_asr_i_p_rnd + +/* ========================================================================== + Assembly Syntax: Rdd32=asrrnd(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asrrnd_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_asrrnd_PI __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asr_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RI __builtin_HEXAGON_S2_asr_i_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asracc_RI __builtin_HEXAGON_S2_asr_i_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrand_RI __builtin_HEXAGON_S2_asr_i_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrnac_RI __builtin_HEXAGON_S2_asr_i_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asror_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asror_RI __builtin_HEXAGON_S2_asr_i_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,#u5):rnd + C Intrinsic Prototype: Word32 Q6_R_asr_RI_rnd(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RI_rnd __builtin_HEXAGON_S2_asr_i_r_rnd + +/* ========================================================================== + Assembly Syntax: Rd32=asrrnd(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asrrnd_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_asrrnd_RI __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=vasrw(Rss32,#u5) + C Intrinsic Prototype: Word32 Q6_R_vasrw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vasrw_PI __builtin_HEXAGON_S2_asr_i_svw_trun + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrh(Rss32,#u4) + C Intrinsic Prototype: Word64 Q6_P_vasrh_PI(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrh_PI __builtin_HEXAGON_S2_asr_i_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrw(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vasrw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrw_PI __builtin_HEXAGON_S2_asr_i_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asr_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asr_PR __builtin_HEXAGON_S2_asr_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asracc_PR __builtin_HEXAGON_S2_asr_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrand_PR __builtin_HEXAGON_S2_asr_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrnac_PR __builtin_HEXAGON_S2_asr_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asror_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asror_PR __builtin_HEXAGON_S2_asr_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrxacc_PR __builtin_HEXAGON_S2_asr_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RR __builtin_HEXAGON_S2_asr_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asracc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asracc_RR __builtin_HEXAGON_S2_asr_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asrand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrand_RR __builtin_HEXAGON_S2_asr_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrnac_RR __builtin_HEXAGON_S2_asr_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asror_RR __builtin_HEXAGON_S2_asr_r_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_asr_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RR_sat __builtin_HEXAGON_S2_asr_r_r_sat + +/* ========================================================================== + Assembly Syntax: Rd32=vasrw(Rss32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_vasrw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vasrw_PR __builtin_HEXAGON_S2_asr_r_svw_trun + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vasrh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrh_PR __builtin_HEXAGON_S2_asr_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vasrw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrw_PR __builtin_HEXAGON_S2_asr_r_vw + +/* ========================================================================== + Assembly Syntax: Rd32=brev(Rs32) + C Intrinsic Prototype: Word32 Q6_R_brev_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_brev_R __builtin_HEXAGON_S2_brev + +/* ========================================================================== + Assembly Syntax: Rdd32=brev(Rss32) + C Intrinsic Prototype: Word64 Q6_P_brev_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_brev_P __builtin_HEXAGON_S2_brevp + +/* ========================================================================== + Assembly Syntax: Rd32=cl0(Rs32) + C Intrinsic Prototype: Word32 Q6_R_cl0_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl0_R __builtin_HEXAGON_S2_cl0 + +/* ========================================================================== + Assembly Syntax: Rd32=cl0(Rss32) + C Intrinsic Prototype: Word32 Q6_R_cl0_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl0_P __builtin_HEXAGON_S2_cl0p + +/* ========================================================================== + Assembly Syntax: Rd32=cl1(Rs32) + C Intrinsic Prototype: Word32 Q6_R_cl1_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl1_R __builtin_HEXAGON_S2_cl1 + +/* ========================================================================== + Assembly Syntax: Rd32=cl1(Rss32) + C Intrinsic Prototype: Word32 Q6_R_cl1_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl1_P __builtin_HEXAGON_S2_cl1p + +/* ========================================================================== + Assembly Syntax: Rd32=clb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_clb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clb_R __builtin_HEXAGON_S2_clb + +/* ========================================================================== + Assembly Syntax: Rd32=normamt(Rs32) + C Intrinsic Prototype: Word32 Q6_R_normamt_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_normamt_R __builtin_HEXAGON_S2_clbnorm + +/* ========================================================================== + Assembly Syntax: Rd32=clb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_clb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clb_P __builtin_HEXAGON_S2_clbp + +/* ========================================================================== + Assembly Syntax: Rd32=clrbit(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_clrbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clrbit_RI __builtin_HEXAGON_S2_clrbit_i + +/* ========================================================================== + Assembly Syntax: Rd32=clrbit(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_clrbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clrbit_RR __builtin_HEXAGON_S2_clrbit_r + +/* ========================================================================== + Assembly Syntax: Rd32=ct0(Rs32) + C Intrinsic Prototype: Word32 Q6_R_ct0_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct0_R __builtin_HEXAGON_S2_ct0 + +/* ========================================================================== + Assembly Syntax: Rd32=ct0(Rss32) + C Intrinsic Prototype: Word32 Q6_R_ct0_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct0_P __builtin_HEXAGON_S2_ct0p + +/* ========================================================================== + Assembly Syntax: Rd32=ct1(Rs32) + C Intrinsic Prototype: Word32 Q6_R_ct1_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct1_R __builtin_HEXAGON_S2_ct1 + +/* ========================================================================== + Assembly Syntax: Rd32=ct1(Rss32) + C Intrinsic Prototype: Word32 Q6_R_ct1_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct1_P __builtin_HEXAGON_S2_ct1p + +/* ========================================================================== + Assembly Syntax: Rdd32=deinterleave(Rss32) + C Intrinsic Prototype: Word64 Q6_P_deinterleave_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_deinterleave_P __builtin_HEXAGON_S2_deinterleave + +/* ========================================================================== + Assembly Syntax: Rd32=extractu(Rs32,#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_extractu_RII(Word32 Rs, Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extractu_RII __builtin_HEXAGON_S2_extractu + +/* ========================================================================== + Assembly Syntax: Rd32=extractu(Rs32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_extractu_RP(Word32 Rs, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extractu_RP __builtin_HEXAGON_S2_extractu_rp + +/* ========================================================================== + Assembly Syntax: Rdd32=extractu(Rss32,#u6,#U6) + C Intrinsic Prototype: Word64 Q6_P_extractu_PII(Word64 Rss, Word32 Iu6, Word32 IU6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extractu_PII __builtin_HEXAGON_S2_extractup + +/* ========================================================================== + Assembly Syntax: Rdd32=extractu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_extractu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extractu_PP __builtin_HEXAGON_S2_extractup_rp + +/* ========================================================================== + Assembly Syntax: Rx32=insert(Rs32,#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_insert_RII(Word32 Rx, Word32 Rs, Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_insert_RII __builtin_HEXAGON_S2_insert + +/* ========================================================================== + Assembly Syntax: Rx32=insert(Rs32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_insert_RP(Word32 Rx, Word32 Rs, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_insert_RP __builtin_HEXAGON_S2_insert_rp + +/* ========================================================================== + Assembly Syntax: Rxx32=insert(Rss32,#u6,#U6) + C Intrinsic Prototype: Word64 Q6_P_insert_PII(Word64 Rxx, Word64 Rss, Word32 Iu6, Word32 IU6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_insert_PII __builtin_HEXAGON_S2_insertp + +/* ========================================================================== + Assembly Syntax: Rxx32=insert(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_insert_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_insert_PP __builtin_HEXAGON_S2_insertp_rp + +/* ========================================================================== + Assembly Syntax: Rdd32=interleave(Rss32) + C Intrinsic Prototype: Word64 Q6_P_interleave_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_interleave_P __builtin_HEXAGON_S2_interleave + +/* ========================================================================== + Assembly Syntax: Rdd32=lfs(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_lfs_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lfs_PP __builtin_HEXAGON_S2_lfsp + +/* ========================================================================== + Assembly Syntax: Rdd32=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsl_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsl_PR __builtin_HEXAGON_S2_lsl_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslacc_PR __builtin_HEXAGON_S2_lsl_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsland_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsland_PR __builtin_HEXAGON_S2_lsl_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslnac_PR __builtin_HEXAGON_S2_lsl_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslor_PR __builtin_HEXAGON_S2_lsl_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslxacc_PR __builtin_HEXAGON_S2_lsl_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsl_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsl_RR __builtin_HEXAGON_S2_lsl_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lslacc_RR __builtin_HEXAGON_S2_lsl_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsland_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsland_RR __builtin_HEXAGON_S2_lsl_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lslnac_RR __builtin_HEXAGON_S2_lsl_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lslor_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lslor_RR __builtin_HEXAGON_S2_lsl_r_r_or + +/* ========================================================================== + Assembly Syntax: Rdd32=vlslh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlslh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlslh_PR __builtin_HEXAGON_S2_lsl_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vlslw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlslw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlslw_PR __builtin_HEXAGON_S2_lsl_r_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsr_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsr_PI __builtin_HEXAGON_S2_lsr_i_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsracc_PI __builtin_HEXAGON_S2_lsr_i_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrand_PI __builtin_HEXAGON_S2_lsr_i_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrnac_PI __builtin_HEXAGON_S2_lsr_i_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsror_PI __builtin_HEXAGON_S2_lsr_i_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrxacc_PI __builtin_HEXAGON_S2_lsr_i_p_xacc + +/* ========================================================================== + Assembly Syntax: Rd32=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsr_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsr_RI __builtin_HEXAGON_S2_lsr_i_r + +/* ========================================================================== + Assembly Syntax: Rx32+=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsracc_RI __builtin_HEXAGON_S2_lsr_i_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrand_RI __builtin_HEXAGON_S2_lsr_i_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrnac_RI __builtin_HEXAGON_S2_lsr_i_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsror_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsror_RI __builtin_HEXAGON_S2_lsr_i_r_or + +/* ========================================================================== + Assembly Syntax: Rx32^=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsrxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrxacc_RI __builtin_HEXAGON_S2_lsr_i_r_xacc + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrh(Rss32,#u4) + C Intrinsic Prototype: Word64 Q6_P_vlsrh_PI(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrh_PI __builtin_HEXAGON_S2_lsr_i_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrw(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vlsrw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrw_PI __builtin_HEXAGON_S2_lsr_i_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsr_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsr_PR __builtin_HEXAGON_S2_lsr_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsracc_PR __builtin_HEXAGON_S2_lsr_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrand_PR __builtin_HEXAGON_S2_lsr_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrnac_PR __builtin_HEXAGON_S2_lsr_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsror_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsror_PR __builtin_HEXAGON_S2_lsr_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrxacc_PR __builtin_HEXAGON_S2_lsr_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsr_RR __builtin_HEXAGON_S2_lsr_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsracc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsracc_RR __builtin_HEXAGON_S2_lsr_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsrand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrand_RR __builtin_HEXAGON_S2_lsr_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrnac_RR __builtin_HEXAGON_S2_lsr_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsror_RR __builtin_HEXAGON_S2_lsr_r_r_or + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlsrh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrh_PR __builtin_HEXAGON_S2_lsr_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlsrw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrw_PR __builtin_HEXAGON_S2_lsr_r_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=packhl(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_packhl_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_packhl_RR __builtin_HEXAGON_S2_packhl + +/* ========================================================================== + Assembly Syntax: Rd32=parity(Rss32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_parity_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_parity_PP __builtin_HEXAGON_S2_parityp + +/* ========================================================================== + Assembly Syntax: Rd32=setbit(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_setbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_setbit_RI __builtin_HEXAGON_S2_setbit_i + +/* ========================================================================== + Assembly Syntax: Rd32=setbit(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_setbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_setbit_RR __builtin_HEXAGON_S2_setbit_r + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffeb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_shuffeb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffeb_PP __builtin_HEXAGON_S2_shuffeb + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffeh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_shuffeh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffeh_PP __builtin_HEXAGON_S2_shuffeh + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffob(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_shuffob_PP(Word64 Rtt, Word64 Rss) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffob_PP __builtin_HEXAGON_S2_shuffob + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffoh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_shuffoh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffoh_PP __builtin_HEXAGON_S2_shuffoh + +/* ========================================================================== + Assembly Syntax: memb(Rx32++#s4:0:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memb_IMR_circ(void** Rx, Word32 Is4_0, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memb_IMR_circ __builtin_HEXAGON_S2_storerb_pci + +/* ========================================================================== + Assembly Syntax: memb(Rx32++I:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memb_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memb_MR_circ __builtin_HEXAGON_S2_storerb_pcr + +/* ========================================================================== + Assembly Syntax: memd(Rx32++#s4:3:circ(Mu2))=Rtt32 + C Intrinsic Prototype: void Q6_memd_IMP_circ(void** Rx, Word32 Is4_3, Word32 Mu, Word64 Rtt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memd_IMP_circ __builtin_HEXAGON_S2_storerd_pci + +/* ========================================================================== + Assembly Syntax: memd(Rx32++I:circ(Mu2))=Rtt32 + C Intrinsic Prototype: void Q6_memd_MP_circ(void** Rx, Word32 Mu, Word64 Rtt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memd_MP_circ __builtin_HEXAGON_S2_storerd_pcr + +/* ========================================================================== + Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32.h + C Intrinsic Prototype: void Q6_memh_IMRh_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_IMRh_circ __builtin_HEXAGON_S2_storerf_pci + +/* ========================================================================== + Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32.h + C Intrinsic Prototype: void Q6_memh_MRh_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_MRh_circ __builtin_HEXAGON_S2_storerf_pcr + +/* ========================================================================== + Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memh_IMR_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_IMR_circ __builtin_HEXAGON_S2_storerh_pci + +/* ========================================================================== + Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memh_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_MR_circ __builtin_HEXAGON_S2_storerh_pcr + +/* ========================================================================== + Assembly Syntax: memw(Rx32++#s4:2:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memw_IMR_circ(void** Rx, Word32 Is4_2, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memw_IMR_circ __builtin_HEXAGON_S2_storeri_pci + +/* ========================================================================== + Assembly Syntax: memw(Rx32++I:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memw_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memw_MR_circ __builtin_HEXAGON_S2_storeri_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=vsathb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsathb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathb_R __builtin_HEXAGON_S2_svsathb + +/* ========================================================================== + Assembly Syntax: Rd32=vsathub(Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsathub_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathub_R __builtin_HEXAGON_S2_svsathub + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxb(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxb_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxb_RII __builtin_HEXAGON_S2_tableidxb_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxd(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxd_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxd_RII __builtin_HEXAGON_S2_tableidxd_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxh(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxh_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxh_RII __builtin_HEXAGON_S2_tableidxh_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxw(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxw_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxw_RII __builtin_HEXAGON_S2_tableidxw_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=togglebit(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_togglebit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_togglebit_RI __builtin_HEXAGON_S2_togglebit_i + +/* ========================================================================== + Assembly Syntax: Rd32=togglebit(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_togglebit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_togglebit_RR __builtin_HEXAGON_S2_togglebit_r + +/* ========================================================================== + Assembly Syntax: Pd4=tstbit(Rs32,#u5) + C Intrinsic Prototype: Byte Q6_p_tstbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_tstbit_RI __builtin_HEXAGON_S2_tstbit_i + +/* ========================================================================== + Assembly Syntax: Pd4=tstbit(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_tstbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_tstbit_RR __builtin_HEXAGON_S2_tstbit_r + +/* ========================================================================== + Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,#u3) + C Intrinsic Prototype: Word64 Q6_P_valignb_PPI(Word64 Rtt, Word64 Rss, Word32 Iu3) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_valignb_PPI __builtin_HEXAGON_S2_valignib + +/* ========================================================================== + Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,Pu4) + C Intrinsic Prototype: Word64 Q6_P_valignb_PPp(Word64 Rtt, Word64 Rss, Byte Pu) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_valignb_PPp __builtin_HEXAGON_S2_valignrb + +/* ========================================================================== + Assembly Syntax: Rdd32=vcnegh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vcnegh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcnegh_PR __builtin_HEXAGON_S2_vcnegh + +/* ========================================================================== + Assembly Syntax: Rdd32=vcrotate(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vcrotate_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcrotate_PR __builtin_HEXAGON_S2_vcrotate + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcnegh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vrcneghacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcneghacc_PR __builtin_HEXAGON_S2_vrcnegh + +/* ========================================================================== + Assembly Syntax: Rd32=vrndwh(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vrndwh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vrndwh_P __builtin_HEXAGON_S2_vrndpackwh + +/* ========================================================================== + Assembly Syntax: Rd32=vrndwh(Rss32):sat + C Intrinsic Prototype: Word32 Q6_R_vrndwh_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vrndwh_P_sat __builtin_HEXAGON_S2_vrndpackwhs + +/* ========================================================================== + Assembly Syntax: Rd32=vsathb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsathb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathb_P __builtin_HEXAGON_S2_vsathb + +/* ========================================================================== + Assembly Syntax: Rdd32=vsathb(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsathb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsathb_P __builtin_HEXAGON_S2_vsathb_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsathub(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsathub_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathub_P __builtin_HEXAGON_S2_vsathub + +/* ========================================================================== + Assembly Syntax: Rdd32=vsathub(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsathub_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsathub_P __builtin_HEXAGON_S2_vsathub_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsatwh(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsatwh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsatwh_P __builtin_HEXAGON_S2_vsatwh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsatwh(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsatwh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsatwh_P __builtin_HEXAGON_S2_vsatwh_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsatwuh(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsatwuh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsatwuh(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsatwuh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsplatb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsplatb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsplatb_R __builtin_HEXAGON_S2_vsplatrb + +/* ========================================================================== + Assembly Syntax: Rdd32=vsplath(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsplath_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsplath_R __builtin_HEXAGON_S2_vsplatrh + +/* ========================================================================== + Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,#u3) + C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPI(Word64 Rss, Word64 Rtt, Word32 Iu3) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vspliceb_PPI __builtin_HEXAGON_S2_vspliceib + +/* ========================================================================== + Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,Pu4) + C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPp(Word64 Rss, Word64 Rtt, Byte Pu) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vspliceb_PPp __builtin_HEXAGON_S2_vsplicerb + +/* ========================================================================== + Assembly Syntax: Rdd32=vsxtbh(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsxtbh_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsxtbh_R __builtin_HEXAGON_S2_vsxtbh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsxthw(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsxthw_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsxthw_R __builtin_HEXAGON_S2_vsxthw + +/* ========================================================================== + Assembly Syntax: Rd32=vtrunehb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vtrunehb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vtrunehb_P __builtin_HEXAGON_S2_vtrunehb + +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunewh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunewh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunewh_PP __builtin_HEXAGON_S2_vtrunewh + +/* ========================================================================== + Assembly Syntax: Rd32=vtrunohb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vtrunohb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vtrunohb_P __builtin_HEXAGON_S2_vtrunohb + +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunowh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunowh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunowh_PP __builtin_HEXAGON_S2_vtrunowh + +/* ========================================================================== + Assembly Syntax: Rdd32=vzxtbh(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vzxtbh_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vzxtbh_R __builtin_HEXAGON_S2_vzxtbh + +/* ========================================================================== + Assembly Syntax: Rdd32=vzxthw(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vzxthw_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vzxthw_R __builtin_HEXAGON_S2_vzxthw + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,add(Ru32,#s6)) + C Intrinsic Prototype: Word32 Q6_R_add_add_RRI(Word32 Rs, Word32 Ru, Word32 Is6) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_add_RRI __builtin_HEXAGON_S4_addaddi + +/* ========================================================================== + Assembly Syntax: Rx32=add(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_add_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_asl_IRI __builtin_HEXAGON_S4_addi_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=add(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_add_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_lsr_IRI __builtin_HEXAGON_S4_addi_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rx32=and(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_and_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_and_asl_IRI __builtin_HEXAGON_S4_andi_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=and(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_and_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_and_lsr_IRI __builtin_HEXAGON_S4_andi_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rd32=add(clb(Rs32),#s6) + C Intrinsic Prototype: Word32 Q6_R_add_clb_RI(Word32 Rs, Word32 Is6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_clb_RI __builtin_HEXAGON_S4_clbaddi + +/* ========================================================================== + Assembly Syntax: Rd32=add(clb(Rss32),#s6) + C Intrinsic Prototype: Word32 Q6_R_add_clb_PI(Word64 Rss, Word32 Is6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_clb_PI __builtin_HEXAGON_S4_clbpaddi + +/* ========================================================================== + Assembly Syntax: Rd32=normamt(Rss32) + C Intrinsic Prototype: Word32 Q6_R_normamt_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_normamt_P __builtin_HEXAGON_S4_clbpnorm + +/* ========================================================================== + Assembly Syntax: Rd32=extract(Rs32,#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_extract_RII(Word32 Rs, Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extract_RII __builtin_HEXAGON_S4_extract + +/* ========================================================================== + Assembly Syntax: Rd32=extract(Rs32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_extract_RP(Word32 Rs, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extract_RP __builtin_HEXAGON_S4_extract_rp + +/* ========================================================================== + Assembly Syntax: Rdd32=extract(Rss32,#u6,#U6) + C Intrinsic Prototype: Word64 Q6_P_extract_PII(Word64 Rss, Word32 Iu6, Word32 IU6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extract_PII __builtin_HEXAGON_S4_extractp + +/* ========================================================================== + Assembly Syntax: Rdd32=extract(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_extract_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extract_PP __builtin_HEXAGON_S4_extractp_rp + +/* ========================================================================== + Assembly Syntax: Rd32=lsl(#s6,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsl_IR(Word32 Is6, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsl_IR __builtin_HEXAGON_S4_lsli + +/* ========================================================================== + Assembly Syntax: Pd4=!tstbit(Rs32,#u5) + C Intrinsic Prototype: Byte Q6_p_not_tstbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_tstbit_RI __builtin_HEXAGON_S4_ntstbit_i + +/* ========================================================================== + Assembly Syntax: Pd4=!tstbit(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_tstbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_tstbit_RR __builtin_HEXAGON_S4_ntstbit_r + +/* ========================================================================== + Assembly Syntax: Rx32|=and(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_andor_RI(Word32 Rx, Word32 Rs, Word32 Is10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andor_RI __builtin_HEXAGON_S4_or_andi + +/* ========================================================================== + Assembly Syntax: Rx32=or(Ru32,and(Rx32,#s10)) + C Intrinsic Prototype: Word32 Q6_R_or_and_RRI(Word32 Ru, Word32 Rx, Word32 Is10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_or_and_RRI __builtin_HEXAGON_S4_or_andix + +/* ========================================================================== + Assembly Syntax: Rx32|=or(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_oror_RI(Word32 Rx, Word32 Rs, Word32 Is10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_oror_RI __builtin_HEXAGON_S4_or_ori + +/* ========================================================================== + Assembly Syntax: Rx32=or(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_or_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_or_asl_IRI __builtin_HEXAGON_S4_ori_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=or(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_or_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_or_lsr_IRI __builtin_HEXAGON_S4_ori_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rd32=parity(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_parity_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_parity_RR __builtin_HEXAGON_S4_parity + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,sub(#s6,Ru32)) + C Intrinsic Prototype: Word32 Q6_R_add_sub_RIR(Word32 Rs, Word32 Is6, Word32 Ru) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_sub_RIR __builtin_HEXAGON_S4_subaddi + +/* ========================================================================== + Assembly Syntax: Rx32=sub(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_sub_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_asl_IRI __builtin_HEXAGON_S4_subi_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=sub(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_sub_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_lsr_IRI __builtin_HEXAGON_S4_subi_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcrotate(Rss32,Rt32,#u2) + C Intrinsic Prototype: Word64 Q6_P_vrcrotate_PRI(Word64 Rss, Word32 Rt, Word32 Iu2) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcrotate_PRI __builtin_HEXAGON_S4_vrcrotate + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcrotate(Rss32,Rt32,#u2) + C Intrinsic Prototype: Word64 Q6_P_vrcrotateacc_PRI(Word64 Rxx, Word64 Rss, Word32 Rt, Word32 Iu2) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcrotateacc_PRI __builtin_HEXAGON_S4_vrcrotate_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxaddsubh_PP_sat __builtin_HEXAGON_S4_vxaddsubh + +/* ========================================================================== + Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat + C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxaddsubh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxaddsubhr + +/* ========================================================================== + Assembly Syntax: Rdd32=vxaddsubw(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxaddsubw_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxaddsubw_PP_sat __builtin_HEXAGON_S4_vxaddsubw + +/* ========================================================================== + Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxsubaddh_PP_sat __builtin_HEXAGON_S4_vxsubaddh + +/* ========================================================================== + Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat + C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxsubaddh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxsubaddhr + +/* ========================================================================== + Assembly Syntax: Rdd32=vxsubaddw(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxsubaddw_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxsubaddw_PP_sat __builtin_HEXAGON_S4_vxsubaddw + +/* ========================================================================== + Assembly Syntax: Rd32=vasrhub(Rss32,#u4):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_rnd_sat(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vasrhub_PI_rnd_sat __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=vasrhub(Rss32,#u4):sat + C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_sat(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vasrhub_PI_sat __builtin_HEXAGON_S5_asrhub_sat + +/* ========================================================================== + Assembly Syntax: Rd32=popcount(Rss32) + C Intrinsic Prototype: Word32 Q6_R_popcount_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_popcount_P __builtin_HEXAGON_S5_popcountp + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrh(Rss32,#u4):rnd + C Intrinsic Prototype: Word64 Q6_P_vasrh_PI_rnd(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vasrh_PI_rnd __builtin_HEXAGON_S5_vasrhrnd_goodsyntax + +/* ========================================================================== + Assembly Syntax: dccleana(Rs32) + C Intrinsic Prototype: void Q6_dccleana_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dccleana_A __builtin_HEXAGON_Y2_dccleana + +/* ========================================================================== + Assembly Syntax: dccleaninva(Rs32) + C Intrinsic Prototype: void Q6_dccleaninva_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dccleaninva_A __builtin_HEXAGON_Y2_dccleaninva + +/* ========================================================================== + Assembly Syntax: dcfetch(Rs32) + C Intrinsic Prototype: void Q6_dcfetch_A(Address Rs) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_dcfetch_A __builtin_HEXAGON_Y2_dcfetch + +/* ========================================================================== + Assembly Syntax: dcinva(Rs32) + C Intrinsic Prototype: void Q6_dcinva_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dcinva_A __builtin_HEXAGON_Y2_dcinva + +/* ========================================================================== + Assembly Syntax: dczeroa(Rs32) + C Intrinsic Prototype: void Q6_dczeroa_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dczeroa_A __builtin_HEXAGON_Y2_dczeroa + +/* ========================================================================== + Assembly Syntax: l2fetch(Rs32,Rt32) + C Intrinsic Prototype: void Q6_l2fetch_AR(Address Rs, Word32 Rt) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_l2fetch_AR __builtin_HEXAGON_Y4_l2fetch + +/* ========================================================================== + Assembly Syntax: l2fetch(Rs32,Rtt32) + C Intrinsic Prototype: void Q6_l2fetch_AP(Address Rs, Word64 Rtt) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_l2fetch_AP __builtin_HEXAGON_Y5_l2fetch + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rdd32=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rol_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rol_PI __builtin_HEXAGON_S6_rol_i_p +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32+=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolacc_PI __builtin_HEXAGON_S6_rol_i_p_acc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32&=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_roland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_roland_PI __builtin_HEXAGON_S6_rol_i_p_and +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32-=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolnac_PI __builtin_HEXAGON_S6_rol_i_p_nac +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32|=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolor_PI __builtin_HEXAGON_S6_rol_i_p_or +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32^=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolxacc_PI __builtin_HEXAGON_S6_rol_i_p_xacc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rd32=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rol_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rol_RI __builtin_HEXAGON_S6_rol_i_r +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32+=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolacc_RI __builtin_HEXAGON_S6_rol_i_r_acc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32&=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_roland_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_roland_RI __builtin_HEXAGON_S6_rol_i_r_and +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32-=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolnac_RI __builtin_HEXAGON_S6_rol_i_r_nac +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32|=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolor_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolor_RI __builtin_HEXAGON_S6_rol_i_r_or +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32^=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolxacc_RI __builtin_HEXAGON_S6_rol_i_r_xacc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffb(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffb_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffb_PP __builtin_HEXAGON_M6_vabsdiffb +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffub_PP __builtin_HEXAGON_M6_vabsdiffub +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vsplatb(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsplatb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsplatb_R __builtin_HEXAGON_S6_vsplatrbp +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunehb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunehb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunehb_PP __builtin_HEXAGON_S6_vtrunehb_ppp +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunohb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunohb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vmem(Rt32):nt + C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0 +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Pd4=!any8(vcmpb.eq(Rss32,Rtt32)) + C Intrinsic Prototype: Byte Q6_p_not_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_any8_vcmpb_eq_PP __builtin_HEXAGON_A6_vcmpbeq_notany +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rdd32=dfadd(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfadd_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfadd_PP __builtin_HEXAGON_F2_dfadd +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rdd32=dfsub(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfsub_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfsub_PP __builtin_HEXAGON_F2_dfsub +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rx32-=mpyi(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyinac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyinac_RR __builtin_HEXAGON_M2_mnaci +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rd32=mask(#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_mask_II(Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mask_II __builtin_HEXAGON_S2_mask +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=clip(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_clip_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clip_RI __builtin_HEXAGON_A7_clip +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cround(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_cround_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cround_PI __builtin_HEXAGON_A7_croundd_ri +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cround(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cround_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cround_PR __builtin_HEXAGON_A7_croundd_rr +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=vclip(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vclip_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vclip_PI __builtin_HEXAGON_A7_vclip +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmax(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmax_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmax_PP __builtin_HEXAGON_F2_dfmax +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmin(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmin_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmin_PP __builtin_HEXAGON_F2_dfmin +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmpyfix(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpyfix_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpyfix_PP __builtin_HEXAGON_F2_dfmpyfix +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rxx32+=dfmpyhh(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpyhhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpyhhacc_PP __builtin_HEXAGON_F2_dfmpyhh +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rxx32+=dfmpylh(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpylhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpylhacc_PP __builtin_HEXAGON_F2_dfmpylh +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmpyll(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpyll_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpyll_PP __builtin_HEXAGON_F2_dfmpyll +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiw_PP __builtin_HEXAGON_M7_dcmpyiw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiwacc_PP __builtin_HEXAGON_M7_dcmpyiw_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiw_PP_conj __builtin_HEXAGON_M7_dcmpyiwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiwacc_PP_conj __builtin_HEXAGON_M7_dcmpyiwc_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrw_PP __builtin_HEXAGON_M7_dcmpyrw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrwacc_PP __builtin_HEXAGON_M7_dcmpyrw_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrw_PP_conj __builtin_HEXAGON_M7_dcmpyrwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrwacc_PP_conj __builtin_HEXAGON_M7_dcmpyrwc_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpyw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vdmpyw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_vdmpyw_PP __builtin_HEXAGON_M7_vdmpy +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpyw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vdmpywacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_vdmpywacc_PP __builtin_HEXAGON_M7_vdmpy_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyiw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiw_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyiwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiwc_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyrw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrw_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyrwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrwc_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: dmlink(Rs32,Rt32) + C Intrinsic Prototype: void Q6_dmlink_AA(Address Rs, Address Rt) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dmlink_AA __builtin_HEXAGON_Y6_dmlink +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Rd32=dmpause + C Intrinsic Prototype: Word32 Q6_R_dmpause() + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_dmpause __builtin_HEXAGON_Y6_dmpause +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Rd32=dmpoll + C Intrinsic Prototype: Word32 Q6_R_dmpoll() + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_dmpoll __builtin_HEXAGON_Y6_dmpoll +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: dmresume(Rs32) + C Intrinsic Prototype: void Q6_dmresume_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dmresume_A __builtin_HEXAGON_Y6_dmresume +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: dmstart(Rs32) + C Intrinsic Prototype: void Q6_dmstart_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dmstart_A __builtin_HEXAGON_Y6_dmstart +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Rd32=dmwait + C Intrinsic Prototype: Word32 Q6_R_dmwait() + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_dmwait __builtin_HEXAGON_Y6_dmwait +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#include +#ifdef __HVX__ +#include +#endif /* __HVX__ */ +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_types.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_types.h new file mode 100644 index 0000000..6958809 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hexagon_types.h @@ -0,0 +1,2653 @@ +/******************************************************************************/ +/* (c) 2020 Qualcomm Innovation Center, Inc. All rights reserved. */ +/* */ +/******************************************************************************/ +#ifndef HEXAGON_TYPES_H +#define HEXAGON_TYPES_H + +#include + +/* Hexagon names */ +#define HEXAGON_Vect HEXAGON_Vect64 +#define HEXAGON_V_GET_D HEXAGON_V64_GET_D +#define HEXAGON_V_GET_UD HEXAGON_V64_GET_UD +#define HEXAGON_V_GET_W0 HEXAGON_V64_GET_W0 +#define HEXAGON_V_GET_W1 HEXAGON_V64_GET_W1 +#define HEXAGON_V_GET_UW0 HEXAGON_V64_GET_UW0 +#define HEXAGON_V_GET_UW1 HEXAGON_V64_GET_UW1 +#define HEXAGON_V_GET_H0 HEXAGON_V64_GET_H0 +#define HEXAGON_V_GET_H1 HEXAGON_V64_GET_H1 +#define HEXAGON_V_GET_H2 HEXAGON_V64_GET_H2 +#define HEXAGON_V_GET_H3 HEXAGON_V64_GET_H3 +#define HEXAGON_V_GET_UH0 HEXAGON_V64_GET_UH0 +#define HEXAGON_V_GET_UH1 HEXAGON_V64_GET_UH1 +#define HEXAGON_V_GET_UH2 HEXAGON_V64_GET_UH2 +#define HEXAGON_V_GET_UH3 HEXAGON_V64_GET_UH3 +#define HEXAGON_V_GET_B0 HEXAGON_V64_GET_B0 +#define HEXAGON_V_GET_B1 HEXAGON_V64_GET_B1 +#define HEXAGON_V_GET_B2 HEXAGON_V64_GET_B2 +#define HEXAGON_V_GET_B3 HEXAGON_V64_GET_B3 +#define HEXAGON_V_GET_B4 HEXAGON_V64_GET_B4 +#define HEXAGON_V_GET_B5 HEXAGON_V64_GET_B5 +#define HEXAGON_V_GET_B6 HEXAGON_V64_GET_B6 +#define HEXAGON_V_GET_B7 HEXAGON_V64_GET_B7 +#define HEXAGON_V_GET_UB0 HEXAGON_V64_GET_UB0 +#define HEXAGON_V_GET_UB1 HEXAGON_V64_GET_UB1 +#define HEXAGON_V_GET_UB2 HEXAGON_V64_GET_UB2 +#define HEXAGON_V_GET_UB3 HEXAGON_V64_GET_UB3 +#define HEXAGON_V_GET_UB4 HEXAGON_V64_GET_UB4 +#define HEXAGON_V_GET_UB5 HEXAGON_V64_GET_UB5 +#define HEXAGON_V_GET_UB6 HEXAGON_V64_GET_UB6 +#define HEXAGON_V_GET_UB7 HEXAGON_V64_GET_UB7 +#define HEXAGON_V_PUT_D HEXAGON_V64_PUT_D +#define HEXAGON_V_PUT_W0 HEXAGON_V64_PUT_W0 +#define HEXAGON_V_PUT_W1 HEXAGON_V64_PUT_W1 +#define HEXAGON_V_PUT_H0 HEXAGON_V64_PUT_H0 +#define HEXAGON_V_PUT_H1 HEXAGON_V64_PUT_H1 +#define HEXAGON_V_PUT_H2 HEXAGON_V64_PUT_H2 +#define HEXAGON_V_PUT_H3 HEXAGON_V64_PUT_H3 +#define HEXAGON_V_PUT_B0 HEXAGON_V64_PUT_B0 +#define HEXAGON_V_PUT_B1 HEXAGON_V64_PUT_B1 +#define HEXAGON_V_PUT_B2 HEXAGON_V64_PUT_B2 +#define HEXAGON_V_PUT_B3 HEXAGON_V64_PUT_B3 +#define HEXAGON_V_PUT_B4 HEXAGON_V64_PUT_B4 +#define HEXAGON_V_PUT_B5 HEXAGON_V64_PUT_B5 +#define HEXAGON_V_PUT_B6 HEXAGON_V64_PUT_B6 +#define HEXAGON_V_PUT_B7 HEXAGON_V64_PUT_B7 +#define HEXAGON_V_CREATE_D HEXAGON_V64_CREATE_D +#define HEXAGON_V_CREATE_W HEXAGON_V64_CREATE_W +#define HEXAGON_V_CREATE_H HEXAGON_V64_CREATE_H +#define HEXAGON_V_CREATE_B HEXAGON_V64_CREATE_B + +#ifdef __cplusplus +#define HEXAGON_VectC HEXAGON_Vect64C +#endif /* __cplusplus */ + +/* 64 Bit Vectors */ + +typedef long long __attribute__((__may_alias__)) HEXAGON_Vect64; + +/* Extract doubleword macros */ + +#define HEXAGON_V64_GET_D(v) (v) +#define HEXAGON_V64_GET_UD(v) ((unsigned long long)(v)) + +/* Extract word macros */ + +#define HEXAGON_V64_GET_W0(v) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.w[0]; \ + }) +#define HEXAGON_V64_GET_W1(v) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.w[1]; \ + }) +#define HEXAGON_V64_GET_UW0(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned int uw[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.uw[0]; \ + }) +#define HEXAGON_V64_GET_UW1(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned int uw[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.uw[1]; \ + }) + +/* Extract half word macros */ + +#define HEXAGON_V64_GET_H0(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[0]; \ + }) +#define HEXAGON_V64_GET_H1(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[1]; \ + }) +#define HEXAGON_V64_GET_H2(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[2]; \ + }) +#define HEXAGON_V64_GET_H3(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[3]; \ + }) +#define HEXAGON_V64_GET_UH0(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.uh[0]; \ + }) +#define HEXAGON_V64_GET_UH1(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.uh[1]; \ + }) +#define HEXAGON_V64_GET_UH2(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.uh[2]; \ + }) +#define HEXAGON_V64_GET_UH3(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.uh[3]; \ + }) + +/* Extract byte macros */ + +#define HEXAGON_V64_GET_B0(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[0]; \ + }) +#define HEXAGON_V64_GET_B1(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[1]; \ + }) +#define HEXAGON_V64_GET_B2(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[2]; \ + }) +#define HEXAGON_V64_GET_B3(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[3]; \ + }) +#define HEXAGON_V64_GET_B4(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[4]; \ + }) +#define HEXAGON_V64_GET_B5(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[5]; \ + }) +#define HEXAGON_V64_GET_B6(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[6]; \ + }) +#define HEXAGON_V64_GET_B7(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[7]; \ + }) +#define HEXAGON_V64_GET_UB0(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[0]; \ + }) +#define HEXAGON_V64_GET_UB1(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[1]; \ + }) +#define HEXAGON_V64_GET_UB2(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[2]; \ + }) +#define HEXAGON_V64_GET_UB3(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[3]; \ + }) +#define HEXAGON_V64_GET_UB4(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[4]; \ + }) +#define HEXAGON_V64_GET_UB5(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[5]; \ + }) +#define HEXAGON_V64_GET_UB6(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[6]; \ + }) +#define HEXAGON_V64_GET_UB7(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.ub[7]; \ + }) + +/* NOTE: All set macros return a HEXAGON_Vect64 type */ + +/* Set doubleword macro */ + +#define HEXAGON_V64_PUT_D(v, new) (new) + +/* Set word macros */ + +#ifdef __hexagon__ + +#define HEXAGON_V64_PUT_W0(v, new) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.w[0] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_W1(v, new) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.w[1] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V64_PUT_W0(v, new) \ + (((v) & 0xffffffff00000000LL) | ((HEXAGON_Vect64)((unsigned int)(new)))) +#define HEXAGON_V64_PUT_W1(v, new) \ + (((v) & 0x00000000ffffffffLL) | (((HEXAGON_Vect64)(new)) << 32LL)) + +#endif /* !__hexagon__ */ + +/* Set half word macros */ + +#ifdef __hexagon__ + +#define HEXAGON_V64_PUT_H0(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[0] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_H1(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[1] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_H2(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[2] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_H3(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.h[3] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V64_PUT_H0(v, new) \ + (((v) & 0xffffffffffff0000LL) | ((HEXAGON_Vect64)((unsigned short)(new)))) +#define HEXAGON_V64_PUT_H1(v, new) \ + (((v) & 0xffffffff0000ffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 16LL)) +#define HEXAGON_V64_PUT_H2(v, new) \ + (((v) & 0xffff0000ffffffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 32LL)) +#define HEXAGON_V64_PUT_H3(v, new) \ + (((v) & 0x0000ffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 48LL)) + +#endif /* !__hexagon__ */ + +/* Set byte macros */ + +#ifdef __hexagon__ + +#define HEXAGON_V64_PUT_B0(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[0] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B1(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[1] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B2(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[2] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B3(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[3] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B4(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[4] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B5(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[5] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B6(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[6] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) +#define HEXAGON_V64_PUT_B7(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.d = (v); \ + _HEXAGON_V64_internal_union.b[7] = (new); \ + _HEXAGON_V64_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V64_PUT_B0(v, new) \ + (((v) & 0xffffffffffffff00LL) | ((HEXAGON_Vect64)((unsigned char)(new)))) +#define HEXAGON_V64_PUT_B1(v, new) \ + (((v) & 0xffffffffffff00ffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 8LL)) +#define HEXAGON_V64_PUT_B2(v, new) \ + (((v) & 0xffffffffff00ffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 16LL)) +#define HEXAGON_V64_PUT_B3(v, new) \ + (((v) & 0xffffffff00ffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 24LL)) +#define HEXAGON_V64_PUT_B4(v, new) \ + (((v) & 0xffffff00ffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 32LL)) +#define HEXAGON_V64_PUT_B5(v, new) \ + (((v) & 0xffff00ffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 40LL)) +#define HEXAGON_V64_PUT_B6(v, new) \ + (((v) & 0xff00ffffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 48LL)) +#define HEXAGON_V64_PUT_B7(v, new) \ + (((v) & 0x00ffffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 56LL)) + +#endif /* !__hexagon__ */ + +/* NOTE: All create macros return a HEXAGON_Vect64 type */ + +/* Create from a doubleword */ + +#define HEXAGON_V64_CREATE_D(d) (d) + +/* Create from words */ + +#ifdef __hexagon__ + +#define HEXAGON_V64_CREATE_W(w1, w0) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.w[0] = (w0); \ + _HEXAGON_V64_internal_union.w[1] = (w1); \ + _HEXAGON_V64_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V64_CREATE_W(w1, w0) \ + ((((HEXAGON_Vect64)(w1)) << 32LL) | ((HEXAGON_Vect64)((w0) & 0xffffffff))) + +#endif /* !__hexagon__ */ + +/* Create from half words */ + +#ifdef __hexagon__ + +#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.h[0] = (h0); \ + _HEXAGON_V64_internal_union.h[1] = (h1); \ + _HEXAGON_V64_internal_union.h[2] = (h2); \ + _HEXAGON_V64_internal_union.h[3] = (h3); \ + _HEXAGON_V64_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0) \ + ((((HEXAGON_Vect64)(h3)) << 48LL) | (((HEXAGON_Vect64)((h2) & 0xffff)) << 32LL) | \ + (((HEXAGON_Vect64)((h1) & 0xffff)) << 16LL) | ((HEXAGON_Vect64)((h0) & 0xffff))) + +#endif /* !__hexagon__ */ + +/* Create from bytes */ + +#ifdef __hexagon__ + +#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _HEXAGON_V64_internal_union; \ + _HEXAGON_V64_internal_union.b[0] = (b0); \ + _HEXAGON_V64_internal_union.b[1] = (b1); \ + _HEXAGON_V64_internal_union.b[2] = (b2); \ + _HEXAGON_V64_internal_union.b[3] = (b3); \ + _HEXAGON_V64_internal_union.b[4] = (b4); \ + _HEXAGON_V64_internal_union.b[5] = (b5); \ + _HEXAGON_V64_internal_union.b[6] = (b6); \ + _HEXAGON_V64_internal_union.b[7] = (b7); \ + _HEXAGON_V64_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \ + ((((HEXAGON_Vect64)(b7)) << 56LL) | (((HEXAGON_Vect64)((b6) & 0xff)) << 48LL) | \ + (((HEXAGON_Vect64)((b5) & 0xff)) << 40LL) | (((HEXAGON_Vect64)((b4) & 0xff)) << 32LL) | \ + (((HEXAGON_Vect64)((b3) & 0xff)) << 24LL) | (((HEXAGON_Vect64)((b2) & 0xff)) << 16LL) | \ + (((HEXAGON_Vect64)((b1) & 0xff)) << 8LL) | ((HEXAGON_Vect64)((b0) & 0xff))) + +#endif /* !__hexagon__ */ + +#ifdef __cplusplus + +class HEXAGON_Vect64C { +public: + // Constructors + HEXAGON_Vect64C(long long d = 0) : data(d) {}; + HEXAGON_Vect64C(int w1, int w0) : data(HEXAGON_V64_CREATE_W(w1, w0)) {}; + HEXAGON_Vect64C(short h3, short h2, short h1, short h0) + : data(HEXAGON_V64_CREATE_H(h3, h2, h1, h0)) {}; + HEXAGON_Vect64C(signed char b7, signed char b6, signed char b5, signed char b4, + signed char b3, signed char b2, signed char b1, signed char b0) + : data(HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {}; + HEXAGON_Vect64C(const HEXAGON_Vect64C &v) : data(v.data) {}; + + HEXAGON_Vect64C &operator=(const HEXAGON_Vect64C &v) { + data = v.data; + return *this; + }; + + operator long long() { + return data; + }; + + // Extract doubleword methods + long long D(void) { + return HEXAGON_V64_GET_D(data); + }; + unsigned long long UD(void) { + return HEXAGON_V64_GET_UD(data); + }; + + // Extract word methods + int W0(void) { + return HEXAGON_V64_GET_W0(data); + }; + int W1(void) { + return HEXAGON_V64_GET_W1(data); + }; + unsigned int UW0(void) { + return HEXAGON_V64_GET_UW0(data); + }; + unsigned int UW1(void) { + return HEXAGON_V64_GET_UW1(data); + }; + + // Extract half word methods + short H0(void) { + return HEXAGON_V64_GET_H0(data); + }; + short H1(void) { + return HEXAGON_V64_GET_H1(data); + }; + short H2(void) { + return HEXAGON_V64_GET_H2(data); + }; + short H3(void) { + return HEXAGON_V64_GET_H3(data); + }; + unsigned short UH0(void) { + return HEXAGON_V64_GET_UH0(data); + }; + unsigned short UH1(void) { + return HEXAGON_V64_GET_UH1(data); + }; + unsigned short UH2(void) { + return HEXAGON_V64_GET_UH2(data); + }; + unsigned short UH3(void) { + return HEXAGON_V64_GET_UH3(data); + }; + + // Extract byte methods + signed char B0(void) { + return HEXAGON_V64_GET_B0(data); + }; + signed char B1(void) { + return HEXAGON_V64_GET_B1(data); + }; + signed char B2(void) { + return HEXAGON_V64_GET_B2(data); + }; + signed char B3(void) { + return HEXAGON_V64_GET_B3(data); + }; + signed char B4(void) { + return HEXAGON_V64_GET_B4(data); + }; + signed char B5(void) { + return HEXAGON_V64_GET_B5(data); + }; + signed char B6(void) { + return HEXAGON_V64_GET_B6(data); + }; + signed char B7(void) { + return HEXAGON_V64_GET_B7(data); + }; + unsigned char UB0(void) { + return HEXAGON_V64_GET_UB0(data); + }; + unsigned char UB1(void) { + return HEXAGON_V64_GET_UB1(data); + }; + unsigned char UB2(void) { + return HEXAGON_V64_GET_UB2(data); + }; + unsigned char UB3(void) { + return HEXAGON_V64_GET_UB3(data); + }; + unsigned char UB4(void) { + return HEXAGON_V64_GET_UB4(data); + }; + unsigned char UB5(void) { + return HEXAGON_V64_GET_UB5(data); + }; + unsigned char UB6(void) { + return HEXAGON_V64_GET_UB6(data); + }; + unsigned char UB7(void) { + return HEXAGON_V64_GET_UB7(data); + }; + + // NOTE: All set methods return a HEXAGON_Vect64C type + + // Set doubleword method + HEXAGON_Vect64C D(long long d) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_D(data, d)); + }; + + // Set word methods + HEXAGON_Vect64C W0(int w) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_W0(data, w)); + }; + HEXAGON_Vect64C W1(int w) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_W1(data, w)); + }; + + // Set half word methods + HEXAGON_Vect64C H0(short h) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_H0(data, h)); + }; + HEXAGON_Vect64C H1(short h) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_H1(data, h)); + }; + HEXAGON_Vect64C H2(short h) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_H2(data, h)); + }; + HEXAGON_Vect64C H3(short h) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_H3(data, h)); + }; + + // Set byte methods + HEXAGON_Vect64C B0(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B0(data, b)); + }; + HEXAGON_Vect64C B1(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B1(data, b)); + }; + HEXAGON_Vect64C B2(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B2(data, b)); + }; + HEXAGON_Vect64C B3(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B3(data, b)); + }; + HEXAGON_Vect64C B4(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B4(data, b)); + }; + HEXAGON_Vect64C B5(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B5(data, b)); + }; + HEXAGON_Vect64C B6(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B6(data, b)); + }; + HEXAGON_Vect64C B7(signed char b) { + return HEXAGON_Vect64C(HEXAGON_V64_PUT_B7(data, b)); + }; + +private: + long long data; +}; + +#endif /* __cplusplus */ + +/* 32 Bit Vectors */ + +typedef int HEXAGON_Vect32; + +/* Extract word macros */ + +#define HEXAGON_V32_GET_W(v) (v) +#define HEXAGON_V32_GET_UW(v) ((unsigned int)(v)) + +/* Extract half word macros */ + +#define HEXAGON_V32_GET_H0(v) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.h[0]; \ + }) +#define HEXAGON_V32_GET_H1(v) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.h[1]; \ + }) +#define HEXAGON_V32_GET_UH0(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned short uh[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.uh[0]; \ + }) +#define HEXAGON_V32_GET_UH1(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned short uh[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.uh[1]; \ + }) + +/* Extract byte macros */ + +#define HEXAGON_V32_GET_B0(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[0]; \ + }) +#define HEXAGON_V32_GET_B1(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[1]; \ + }) +#define HEXAGON_V32_GET_B2(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[2]; \ + }) +#define HEXAGON_V32_GET_B3(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[3]; \ + }) +#define HEXAGON_V32_GET_UB0(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.ub[0]; \ + }) +#define HEXAGON_V32_GET_UB1(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.ub[1]; \ + }) +#define HEXAGON_V32_GET_UB2(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.ub[2]; \ + }) +#define HEXAGON_V32_GET_UB3(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.ub[3]; \ + }) + +/* NOTE: All set macros return a HEXAGON_Vect32 type */ + +/* Set word macro */ + +#define HEXAGON_V32_PUT_W(v, new) (new) + +/* Set half word macros */ + +#ifdef __hexagon__ + +#define HEXAGON_V32_PUT_H0(v, new) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.h[0] = (new); \ + _HEXAGON_V32_internal_union.w; \ + }) +#define HEXAGON_V32_PUT_H1(v, new) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.h[1] = (new); \ + _HEXAGON_V32_internal_union.w; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V32_PUT_H0(v, new) \ + (((v) & 0xffff0000) | ((HEXAGON_Vect32)((unsigned short)(new)))) +#define HEXAGON_V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((HEXAGON_Vect32)(new)) << 16)) + +#endif /* !__hexagon__ */ + +/* Set byte macros */ + +#ifdef __hexagon__ + +#define HEXAGON_V32_PUT_B0(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[0] = (new); \ + _HEXAGON_V32_internal_union.w; \ + }) +#define HEXAGON_V32_PUT_B1(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[1] = (new); \ + _HEXAGON_V32_internal_union.w; \ + }) +#define HEXAGON_V32_PUT_B2(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[2] = (new); \ + _HEXAGON_V32_internal_union.w; \ + }) +#define HEXAGON_V32_PUT_B3(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.w = (v); \ + _HEXAGON_V32_internal_union.b[3] = (new); \ + _HEXAGON_V32_internal_union.w; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V32_PUT_B0(v, new) \ + (((v) & 0xffffff00) | ((HEXAGON_Vect32)((unsigned char)(new)))) +#define HEXAGON_V32_PUT_B1(v, new) \ + (((v) & 0xffff00ff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 8)) +#define HEXAGON_V32_PUT_B2(v, new) \ + (((v) & 0xff00ffff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 16)) +#define HEXAGON_V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((HEXAGON_Vect32)(new)) << 24)) + +#endif /* !__hexagon__ */ + +/* NOTE: All create macros return a HEXAGON_Vect32 type */ + +/* Create from a word */ + +#define HEXAGON_V32_CREATE_W(w) (w) + +/* Create from half words */ + +#ifdef __hexagon__ + +#define HEXAGON_V32_CREATE_H(h1, h0) \ + __extension__({ \ + union { \ + long long d; \ + short h[2]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.h[0] = (h0); \ + _HEXAGON_V32_internal_union.h[1] = (h1); \ + _HEXAGON_V32_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V32_CREATE_H(h1, h0) \ + ((((HEXAGON_Vect32)(h1)) << 16) | ((HEXAGON_Vect32)((h0) & 0xffff))) + +#endif /* !__hexagon__ */ + +/* Create from bytes */ +#ifdef __hexagon__ + +#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0) \ + __extension__({ \ + union { \ + long long d; \ + char b[4]; \ + } _HEXAGON_V32_internal_union; \ + _HEXAGON_V32_internal_union.b[0] = (b0); \ + _HEXAGON_V32_internal_union.b[1] = (b1); \ + _HEXAGON_V32_internal_union.b[2] = (b2); \ + _HEXAGON_V32_internal_union.b[3] = (b3); \ + _HEXAGON_V32_internal_union.d; \ + }) + +#else /* !__hexagon__ */ + +#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0) \ + ((((HEXAGON_Vect32)(b3)) << 24) | (((HEXAGON_Vect32)((b2) & 0xff)) << 16) | \ + (((HEXAGON_Vect32)((b1) & 0xff)) << 8) | ((HEXAGON_Vect32)((b0) & 0xff))) + +#endif /* !__hexagon__ */ + +#ifdef __cplusplus + +class HEXAGON_Vect32C { +public: + // Constructors + HEXAGON_Vect32C(int w = 0) : data(w) {}; + HEXAGON_Vect32C(short h1, short h0) : data(HEXAGON_V32_CREATE_H(h1, h0)) {}; + HEXAGON_Vect32C(signed char b3, signed char b2, signed char b1, signed char b0) + : data(HEXAGON_V32_CREATE_B(b3, b2, b1, b0)) {}; + HEXAGON_Vect32C(const HEXAGON_Vect32C &v) : data(v.data) {}; + + HEXAGON_Vect32C &operator=(const HEXAGON_Vect32C &v) { + data = v.data; + return *this; + }; + + operator int() { + return data; + }; + + // Extract word methods + int W(void) { + return HEXAGON_V32_GET_W(data); + }; + unsigned int UW(void) { + return HEXAGON_V32_GET_UW(data); + }; + + // Extract half word methods + short H0(void) { + return HEXAGON_V32_GET_H0(data); + }; + short H1(void) { + return HEXAGON_V32_GET_H1(data); + }; + unsigned short UH0(void) { + return HEXAGON_V32_GET_UH0(data); + }; + unsigned short UH1(void) { + return HEXAGON_V32_GET_UH1(data); + }; + + // Extract byte methods + signed char B0(void) { + return HEXAGON_V32_GET_B0(data); + }; + signed char B1(void) { + return HEXAGON_V32_GET_B1(data); + }; + signed char B2(void) { + return HEXAGON_V32_GET_B2(data); + }; + signed char B3(void) { + return HEXAGON_V32_GET_B3(data); + }; + unsigned char UB0(void) { + return HEXAGON_V32_GET_UB0(data); + }; + unsigned char UB1(void) { + return HEXAGON_V32_GET_UB1(data); + }; + unsigned char UB2(void) { + return HEXAGON_V32_GET_UB2(data); + }; + unsigned char UB3(void) { + return HEXAGON_V32_GET_UB3(data); + }; + + // NOTE: All set methods return a HEXAGON_Vect32C type + + // Set word method + HEXAGON_Vect32C W(int w) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_W(data, w)); + }; + + // Set half word methods + HEXAGON_Vect32C H0(short h) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_H0(data, h)); + }; + HEXAGON_Vect32C H1(short h) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_H1(data, h)); + }; + + // Set byte methods + HEXAGON_Vect32C B0(signed char b) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_B0(data, b)); + }; + HEXAGON_Vect32C B1(signed char b) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_B1(data, b)); + }; + HEXAGON_Vect32C B2(signed char b) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_B2(data, b)); + }; + HEXAGON_Vect32C B3(signed char b) { + return HEXAGON_Vect32C(HEXAGON_V32_PUT_B3(data, b)); + }; + +private: + int data; +}; + +#endif /* __cplusplus */ + +// V65 Silver types +#if __Q6S_ARCH__ >= 65 + // Silver vector types are 128 bytes, and pairs are 256. The vector predicate + // types are 16 bytes and 32 bytes for pairs. + typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16))) + __attribute__((aligned(128))); + + typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32))) + __attribute__((aligned(128))); + + typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + + typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256))) + __attribute__((aligned(256))); + + typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(4))); + + typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256))) + __attribute__((aligned(4))); + + #define Q6S_VectorPredPair HEXAGON_VecPred256 + #define Q6S_VectorPred HEXAGON_VecPred128 + #define Q6S_Vector HEXAGON_Vect1024 + #define Q6S_VectorPair HEXAGON_Vect2048 + #define Q6S_UVector HEXAGON_UVect1024 + #define Q6S_UVectorPair HEXAGON_UVect2048 + +#else /* __Q6S_ARCH__ >= 65 */ + +// V65 Vector types +#if __HVX_ARCH__ >= 65 +#if defined __HVX__ && (__HVX_LENGTH__ == 128) + typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + + typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + + typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256))) + __attribute__((aligned(256))); + + typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(4))); + + typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256))) + __attribute__((aligned(4))); + + #define HVX_VectorPred HEXAGON_VecPred128 + #define HVX_Vector HEXAGON_Vect1024 + #define HVX_VectorPair HEXAGON_Vect2048 + #define HVX_UVector HEXAGON_UVect1024 + #define HVX_UVectorPair HEXAGON_UVect2048 +#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */ +#if defined __HVX__ && (__HVX_LENGTH__ == 64) + typedef long HEXAGON_VecPred64 __attribute__((__vector_size__(64))) + __attribute__((aligned(64))); + + typedef long HEXAGON_Vect512 __attribute__((__vector_size__(64))) + __attribute__((aligned(64))); + + typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + + typedef long HEXAGON_UVect512 __attribute__((__vector_size__(64))) + __attribute__((aligned(4))); + + typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(4))); + + #define HVX_VectorPred HEXAGON_VecPred64 + #define HVX_Vector HEXAGON_Vect512 + #define HVX_VectorPair HEXAGON_Vect1024 + #define HVX_UVector HEXAGON_UVect512 + #define HVX_UVectorPair HEXAGON_UVect1024 +#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */ +#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */ +#endif /* __HVX_ARCH__ >= 65 */ +#endif /* __Q6S_ARCH__ >= 65 */ + +/* Predicates */ + +typedef int HEXAGON_Pred; + +/*** + *** backward compatibility aliases + ***/ + +/* Old names */ +#define Q6Vect Q6Vect64 +#define Q6V_GET_D Q6V64_GET_D +#define Q6V_GET_UD Q6V64_GET_UD +#define Q6V_GET_W0 Q6V64_GET_W0 +#define Q6V_GET_W1 Q6V64_GET_W1 +#define Q6V_GET_UW0 Q6V64_GET_UW0 +#define Q6V_GET_UW1 Q6V64_GET_UW1 +#define Q6V_GET_H0 Q6V64_GET_H0 +#define Q6V_GET_H1 Q6V64_GET_H1 +#define Q6V_GET_H2 Q6V64_GET_H2 +#define Q6V_GET_H3 Q6V64_GET_H3 +#define Q6V_GET_UH0 Q6V64_GET_UH0 +#define Q6V_GET_UH1 Q6V64_GET_UH1 +#define Q6V_GET_UH2 Q6V64_GET_UH2 +#define Q6V_GET_UH3 Q6V64_GET_UH3 +#define Q6V_GET_B0 Q6V64_GET_B0 +#define Q6V_GET_B1 Q6V64_GET_B1 +#define Q6V_GET_B2 Q6V64_GET_B2 +#define Q6V_GET_B3 Q6V64_GET_B3 +#define Q6V_GET_B4 Q6V64_GET_B4 +#define Q6V_GET_B5 Q6V64_GET_B5 +#define Q6V_GET_B6 Q6V64_GET_B6 +#define Q6V_GET_B7 Q6V64_GET_B7 +#define Q6V_GET_UB0 Q6V64_GET_UB0 +#define Q6V_GET_UB1 Q6V64_GET_UB1 +#define Q6V_GET_UB2 Q6V64_GET_UB2 +#define Q6V_GET_UB3 Q6V64_GET_UB3 +#define Q6V_GET_UB4 Q6V64_GET_UB4 +#define Q6V_GET_UB5 Q6V64_GET_UB5 +#define Q6V_GET_UB6 Q6V64_GET_UB6 +#define Q6V_GET_UB7 Q6V64_GET_UB7 +#define Q6V_PUT_D Q6V64_PUT_D +#define Q6V_PUT_W0 Q6V64_PUT_W0 +#define Q6V_PUT_W1 Q6V64_PUT_W1 +#define Q6V_PUT_H0 Q6V64_PUT_H0 +#define Q6V_PUT_H1 Q6V64_PUT_H1 +#define Q6V_PUT_H2 Q6V64_PUT_H2 +#define Q6V_PUT_H3 Q6V64_PUT_H3 +#define Q6V_PUT_B0 Q6V64_PUT_B0 +#define Q6V_PUT_B1 Q6V64_PUT_B1 +#define Q6V_PUT_B2 Q6V64_PUT_B2 +#define Q6V_PUT_B3 Q6V64_PUT_B3 +#define Q6V_PUT_B4 Q6V64_PUT_B4 +#define Q6V_PUT_B5 Q6V64_PUT_B5 +#define Q6V_PUT_B6 Q6V64_PUT_B6 +#define Q6V_PUT_B7 Q6V64_PUT_B7 +#define Q6V_CREATE_D Q6V64_CREATE_D +#define Q6V_CREATE_W Q6V64_CREATE_W +#define Q6V_CREATE_H Q6V64_CREATE_H +#define Q6V_CREATE_B Q6V64_CREATE_B + +#ifdef __cplusplus +#define Q6VectC Q6Vect64C +#endif /* __cplusplus */ + +/* 64 Bit Vectors */ + +typedef long long __attribute__((__may_alias__)) Q6Vect64; + +/* Extract doubleword macros */ + +#define Q6V64_GET_D(v) (v) +#define Q6V64_GET_UD(v) ((unsigned long long)(v)) + +/* Extract word macros */ + +#define Q6V64_GET_W0(v) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.w[0]; \ + }) +#define Q6V64_GET_W1(v) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.w[1]; \ + }) +#define Q6V64_GET_UW0(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned int uw[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.uw[0]; \ + }) +#define Q6V64_GET_UW1(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned int uw[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.uw[1]; \ + }) + +/* Extract half word macros */ + +#define Q6V64_GET_H0(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[0]; \ + }) +#define Q6V64_GET_H1(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[1]; \ + }) +#define Q6V64_GET_H2(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[2]; \ + }) +#define Q6V64_GET_H3(v) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[3]; \ + }) +#define Q6V64_GET_UH0(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.uh[0]; \ + }) +#define Q6V64_GET_UH1(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.uh[1]; \ + }) +#define Q6V64_GET_UH2(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.uh[2]; \ + }) +#define Q6V64_GET_UH3(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned short uh[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.uh[3]; \ + }) + +/* Extract byte macros */ + +#define Q6V64_GET_B0(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[0]; \ + }) +#define Q6V64_GET_B1(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[1]; \ + }) +#define Q6V64_GET_B2(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[2]; \ + }) +#define Q6V64_GET_B3(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[3]; \ + }) +#define Q6V64_GET_B4(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[4]; \ + }) +#define Q6V64_GET_B5(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[5]; \ + }) +#define Q6V64_GET_B6(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[6]; \ + }) +#define Q6V64_GET_B7(v) \ + __extension__({ \ + union { \ + long long d; \ + signed char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[7]; \ + }) +#define Q6V64_GET_UB0(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[0]; \ + }) +#define Q6V64_GET_UB1(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[1]; \ + }) +#define Q6V64_GET_UB2(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[2]; \ + }) +#define Q6V64_GET_UB3(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[3]; \ + }) +#define Q6V64_GET_UB4(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[4]; \ + }) +#define Q6V64_GET_UB5(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[5]; \ + }) +#define Q6V64_GET_UB6(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[6]; \ + }) +#define Q6V64_GET_UB7(v) \ + __extension__({ \ + union { \ + long long d; \ + unsigned char ub[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.ub[7]; \ + }) + +/* NOTE: All set macros return a Q6Vect64 type */ + +/* Set doubleword macro */ + +#define Q6V64_PUT_D(v, new) (new) + +/* Set word macros */ + +#ifdef __qdsp6__ + +#define Q6V64_PUT_W0(v, new) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.w[0] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_W1(v, new) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.w[1] = (new); \ + _Q6V64_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V64_PUT_W0(v, new) \ + (((v) & 0xffffffff00000000LL) | ((Q6Vect64)((unsigned int)(new)))) +#define Q6V64_PUT_W1(v, new) \ + (((v) & 0x00000000ffffffffLL) | (((Q6Vect64)(new)) << 32LL)) + +#endif /* !__qdsp6__ */ + +/* Set half word macros */ + +#ifdef __qdsp6__ + +#define Q6V64_PUT_H0(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[0] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_H1(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[1] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_H2(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[2] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_H3(v, new) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.h[3] = (new); \ + _Q6V64_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V64_PUT_H0(v, new) \ + (((v) & 0xffffffffffff0000LL) | ((Q6Vect64)((unsigned short)(new)))) +#define Q6V64_PUT_H1(v, new) \ + (((v) & 0xffffffff0000ffffLL) | (((Q6Vect64)((unsigned short)(new))) << 16LL)) +#define Q6V64_PUT_H2(v, new) \ + (((v) & 0xffff0000ffffffffLL) | (((Q6Vect64)((unsigned short)(new))) << 32LL)) +#define Q6V64_PUT_H3(v, new) \ + (((v) & 0x0000ffffffffffffLL) | (((Q6Vect64)(new)) << 48LL)) + +#endif /* !__qdsp6__ */ + +/* Set byte macros */ + +#ifdef __qdsp6__ + +#define Q6V64_PUT_B0(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[0] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B1(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[1] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B2(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[2] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B3(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[3] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B4(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[4] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B5(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[5] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B6(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[6] = (new); \ + _Q6V64_internal_union.d; \ + }) +#define Q6V64_PUT_B7(v, new) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.d = (v); \ + _Q6V64_internal_union.b[7] = (new); \ + _Q6V64_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V64_PUT_B0(v, new) \ + (((v) & 0xffffffffffffff00LL) | ((Q6Vect64)((unsigned char)(new)))) +#define Q6V64_PUT_B1(v, new) \ + (((v) & 0xffffffffffff00ffLL) | (((Q6Vect64)((unsigned char)(new))) << 8LL)) +#define Q6V64_PUT_B2(v, new) \ + (((v) & 0xffffffffff00ffffLL) | (((Q6Vect64)((unsigned char)(new))) << 16LL)) +#define Q6V64_PUT_B3(v, new) \ + (((v) & 0xffffffff00ffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 24LL)) +#define Q6V64_PUT_B4(v, new) \ + (((v) & 0xffffff00ffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 32LL)) +#define Q6V64_PUT_B5(v, new) \ + (((v) & 0xffff00ffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 40LL)) +#define Q6V64_PUT_B6(v, new) \ + (((v) & 0xff00ffffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 48LL)) +#define Q6V64_PUT_B7(v, new) \ + (((v) & 0x00ffffffffffffffLL) | (((Q6Vect64)(new)) << 56LL)) + +#endif /* !__qdsp6__ */ + +/* NOTE: All create macros return a Q6Vect64 type */ + +/* Create from a doubleword */ + +#define Q6V64_CREATE_D(d) (d) + +/* Create from words */ + +#ifdef __qdsp6__ + +#define Q6V64_CREATE_W(w1, w0) \ + __extension__({ \ + union { \ + long long d; \ + int w[2]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.w[0] = (w0); \ + _Q6V64_internal_union.w[1] = (w1); \ + _Q6V64_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V64_CREATE_W(w1, w0) \ + ((((Q6Vect64)(w1)) << 32LL) | ((Q6Vect64)((w0) & 0xffffffff))) + +#endif /* !__qdsp6__ */ + +/* Create from half words */ + +#ifdef __qdsp6__ + +#define Q6V64_CREATE_H(h3, h2, h1, h0) \ + __extension__({ \ + union { \ + long long d; \ + short h[4]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.h[0] = (h0); \ + _Q6V64_internal_union.h[1] = (h1); \ + _Q6V64_internal_union.h[2] = (h2); \ + _Q6V64_internal_union.h[3] = (h3); \ + _Q6V64_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V64_CREATE_H(h3, h2, h1, h0) \ + ((((Q6Vect64)(h3)) << 48LL) | (((Q6Vect64)((h2) & 0xffff)) << 32LL) | \ + (((Q6Vect64)((h1) & 0xffff)) << 16LL) | ((Q6Vect64)((h0) & 0xffff))) + +#endif /* !__qdsp6__ */ + +/* Create from bytes */ + +#ifdef __qdsp6__ + +#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \ + __extension__({ \ + union { \ + long long d; \ + char b[8]; \ + } _Q6V64_internal_union; \ + _Q6V64_internal_union.b[0] = (b0); \ + _Q6V64_internal_union.b[1] = (b1); \ + _Q6V64_internal_union.b[2] = (b2); \ + _Q6V64_internal_union.b[3] = (b3); \ + _Q6V64_internal_union.b[4] = (b4); \ + _Q6V64_internal_union.b[5] = (b5); \ + _Q6V64_internal_union.b[6] = (b6); \ + _Q6V64_internal_union.b[7] = (b7); \ + _Q6V64_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \ + ((((Q6Vect64)(b7)) << 56LL) | (((Q6Vect64)((b6) & 0xff)) << 48LL) | \ + (((Q6Vect64)((b5) & 0xff)) << 40LL) | (((Q6Vect64)((b4) & 0xff)) << 32LL) | \ + (((Q6Vect64)((b3) & 0xff)) << 24LL) | (((Q6Vect64)((b2) & 0xff)) << 16LL) | \ + (((Q6Vect64)((b1) & 0xff)) << 8LL) | ((Q6Vect64)((b0) & 0xff))) + +#endif /* !__qdsp6__ */ + +#ifdef __cplusplus + +class Q6Vect64C { +public: + // Constructors + Q6Vect64C(long long d = 0) : data(d) {}; + Q6Vect64C(int w1, int w0) : data(Q6V64_CREATE_W(w1, w0)) {}; + Q6Vect64C(short h3, short h2, short h1, short h0) + : data(Q6V64_CREATE_H(h3, h2, h1, h0)) {}; + Q6Vect64C(signed char b7, signed char b6, signed char b5, signed char b4, + signed char b3, signed char b2, signed char b1, signed char b0) + : data(Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {}; + Q6Vect64C(const Q6Vect64C &v) : data(v.data) {}; + + Q6Vect64C &operator=(const Q6Vect64C &v) { + data = v.data; + return *this; + }; + + operator long long() { + return data; + }; + + // Extract doubleword methods + long long D(void) { + return Q6V64_GET_D(data); + }; + unsigned long long UD(void) { + return Q6V64_GET_UD(data); + }; + + // Extract word methods + int W0(void) { + return Q6V64_GET_W0(data); + }; + int W1(void) { + return Q6V64_GET_W1(data); + }; + unsigned int UW0(void) { + return Q6V64_GET_UW0(data); + }; + unsigned int UW1(void) { + return Q6V64_GET_UW1(data); + }; + + // Extract half word methods + short H0(void) { + return Q6V64_GET_H0(data); + }; + short H1(void) { + return Q6V64_GET_H1(data); + }; + short H2(void) { + return Q6V64_GET_H2(data); + }; + short H3(void) { + return Q6V64_GET_H3(data); + }; + unsigned short UH0(void) { + return Q6V64_GET_UH0(data); + }; + unsigned short UH1(void) { + return Q6V64_GET_UH1(data); + }; + unsigned short UH2(void) { + return Q6V64_GET_UH2(data); + }; + unsigned short UH3(void) { + return Q6V64_GET_UH3(data); + }; + + // Extract byte methods + signed char B0(void) { + return Q6V64_GET_B0(data); + }; + signed char B1(void) { + return Q6V64_GET_B1(data); + }; + signed char B2(void) { + return Q6V64_GET_B2(data); + }; + signed char B3(void) { + return Q6V64_GET_B3(data); + }; + signed char B4(void) { + return Q6V64_GET_B4(data); + }; + signed char B5(void) { + return Q6V64_GET_B5(data); + }; + signed char B6(void) { + return Q6V64_GET_B6(data); + }; + signed char B7(void) { + return Q6V64_GET_B7(data); + }; + unsigned char UB0(void) { + return Q6V64_GET_UB0(data); + }; + unsigned char UB1(void) { + return Q6V64_GET_UB1(data); + }; + unsigned char UB2(void) { + return Q6V64_GET_UB2(data); + }; + unsigned char UB3(void) { + return Q6V64_GET_UB3(data); + }; + unsigned char UB4(void) { + return Q6V64_GET_UB4(data); + }; + unsigned char UB5(void) { + return Q6V64_GET_UB5(data); + }; + unsigned char UB6(void) { + return Q6V64_GET_UB6(data); + }; + unsigned char UB7(void) { + return Q6V64_GET_UB7(data); + }; + + // NOTE: All set methods return a Q6Vect64C type + + // Set doubleword method + Q6Vect64C D(long long d) { + return Q6Vect64C(Q6V64_PUT_D(data, d)); + }; + + // Set word methods + Q6Vect64C W0(int w) { + return Q6Vect64C(Q6V64_PUT_W0(data, w)); + }; + Q6Vect64C W1(int w) { + return Q6Vect64C(Q6V64_PUT_W1(data, w)); + }; + + // Set half word methods + Q6Vect64C H0(short h) { + return Q6Vect64C(Q6V64_PUT_H0(data, h)); + }; + Q6Vect64C H1(short h) { + return Q6Vect64C(Q6V64_PUT_H1(data, h)); + }; + Q6Vect64C H2(short h) { + return Q6Vect64C(Q6V64_PUT_H2(data, h)); + }; + Q6Vect64C H3(short h) { + return Q6Vect64C(Q6V64_PUT_H3(data, h)); + }; + + // Set byte methods + Q6Vect64C B0(signed char b) { + return Q6Vect64C(Q6V64_PUT_B0(data, b)); + }; + Q6Vect64C B1(signed char b) { + return Q6Vect64C(Q6V64_PUT_B1(data, b)); + }; + Q6Vect64C B2(signed char b) { + return Q6Vect64C(Q6V64_PUT_B2(data, b)); + }; + Q6Vect64C B3(signed char b) { + return Q6Vect64C(Q6V64_PUT_B3(data, b)); + }; + Q6Vect64C B4(signed char b) { + return Q6Vect64C(Q6V64_PUT_B4(data, b)); + }; + Q6Vect64C B5(signed char b) { + return Q6Vect64C(Q6V64_PUT_B5(data, b)); + }; + Q6Vect64C B6(signed char b) { + return Q6Vect64C(Q6V64_PUT_B6(data, b)); + }; + Q6Vect64C B7(signed char b) { + return Q6Vect64C(Q6V64_PUT_B7(data, b)); + }; + +private: + long long data; +}; + +#endif /* __cplusplus */ + +/* 32 Bit Vectors */ + +typedef int Q6Vect32; + +/* Extract word macros */ + +#define Q6V32_GET_W(v) (v) +#define Q6V32_GET_UW(v) ((unsigned int)(v)) + +/* Extract half word macros */ + +#define Q6V32_GET_H0(v) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.h[0]; \ + }) +#define Q6V32_GET_H1(v) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.h[1]; \ + }) +#define Q6V32_GET_UH0(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned short uh[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.uh[0]; \ + }) +#define Q6V32_GET_UH1(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned short uh[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.uh[1]; \ + }) + +/* Extract byte macros */ + +#define Q6V32_GET_B0(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[0]; \ + }) +#define Q6V32_GET_B1(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[1]; \ + }) +#define Q6V32_GET_B2(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[2]; \ + }) +#define Q6V32_GET_B3(v) \ + __extension__({ \ + union { \ + int w; \ + signed char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[3]; \ + }) +#define Q6V32_GET_UB0(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.ub[0]; \ + }) +#define Q6V32_GET_UB1(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.ub[1]; \ + }) +#define Q6V32_GET_UB2(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.ub[2]; \ + }) +#define Q6V32_GET_UB3(v) \ + __extension__({ \ + union { \ + int w; \ + unsigned char ub[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.ub[3]; \ + }) + +/* NOTE: All set macros return a Q6Vect32 type */ + +/* Set word macro */ + +#define Q6V32_PUT_W(v, new) (new) + +/* Set half word macros */ + +#ifdef __qdsp6__ + +#define Q6V32_PUT_H0(v, new) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.h[0] = (new); \ + _Q6V32_internal_union.w; \ + }) +#define Q6V32_PUT_H1(v, new) \ + __extension__({ \ + union { \ + int w; \ + short h[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.h[1] = (new); \ + _Q6V32_internal_union.w; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V32_PUT_H0(v, new) \ + (((v) & 0xffff0000) | ((Q6Vect32)((unsigned short)(new)))) +#define Q6V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((Q6Vect32)(new)) << 16)) + +#endif /* !__qdsp6__ */ + +/* Set byte macros */ + +#ifdef __qdsp6__ + +#define Q6V32_PUT_B0(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[0] = (new); \ + _Q6V32_internal_union.w; \ + }) +#define Q6V32_PUT_B1(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[1] = (new); \ + _Q6V32_internal_union.w; \ + }) +#define Q6V32_PUT_B2(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[2] = (new); \ + _Q6V32_internal_union.w; \ + }) +#define Q6V32_PUT_B3(v, new) \ + __extension__({ \ + union { \ + int w; \ + char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.w = (v); \ + _Q6V32_internal_union.b[3] = (new); \ + _Q6V32_internal_union.w; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V32_PUT_B0(v, new) \ + (((v) & 0xffffff00) | ((Q6Vect32)((unsigned char)(new)))) +#define Q6V32_PUT_B1(v, new) \ + (((v) & 0xffff00ff) | (((Q6Vect32)((unsigned char)(new))) << 8)) +#define Q6V32_PUT_B2(v, new) \ + (((v) & 0xff00ffff) | (((Q6Vect32)((unsigned char)(new))) << 16)) +#define Q6V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((Q6Vect32)(new)) << 24)) + +#endif /* !__qdsp6__ */ + +/* NOTE: All create macros return a Q6Vect32 type */ + +/* Create from a word */ + +#define Q6V32_CREATE_W(w) (w) + +/* Create from half words */ + +#ifdef __qdsp6__ + +#define Q6V32_CREATE_H(h1, h0) \ + __extension__({ \ + union { \ + long long d; \ + short h[2]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.h[0] = (h0); \ + _Q6V32_internal_union.h[1] = (h1); \ + _Q6V32_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V32_CREATE_H(h1, h0) \ + ((((Q6Vect32)(h1)) << 16) | ((Q6Vect32)((h0) & 0xffff))) + +#endif /* !__qdsp6__ */ + +/* Create from bytes */ +#ifdef __qdsp6__ + +#define Q6V32_CREATE_B(b3, b2, b1, b0) \ + __extension__({ \ + union { \ + long long d; \ + char b[4]; \ + } _Q6V32_internal_union; \ + _Q6V32_internal_union.b[0] = (b0); \ + _Q6V32_internal_union.b[1] = (b1); \ + _Q6V32_internal_union.b[2] = (b2); \ + _Q6V32_internal_union.b[3] = (b3); \ + _Q6V32_internal_union.d; \ + }) + +#else /* !__qdsp6__ */ + +#define Q6V32_CREATE_B(b3, b2, b1, b0) \ + ((((Q6Vect32)(b3)) << 24) | (((Q6Vect32)((b2) & 0xff)) << 16) | \ + (((Q6Vect32)((b1) & 0xff)) << 8) | ((Q6Vect32)((b0) & 0xff))) + +#endif /* !__qdsp6__ */ + +#ifdef __cplusplus + +class Q6Vect32C { +public: + // Constructors + Q6Vect32C(int w = 0) : data(w) {}; + Q6Vect32C(short h1, short h0) : data(Q6V32_CREATE_H(h1, h0)) {}; + Q6Vect32C(signed char b3, signed char b2, signed char b1, signed char b0) + : data(Q6V32_CREATE_B(b3, b2, b1, b0)) {}; + Q6Vect32C(const Q6Vect32C &v) : data(v.data) {}; + + Q6Vect32C &operator=(const Q6Vect32C &v) { + data = v.data; + return *this; + }; + + operator int() { + return data; + }; + + // Extract word methods + int W(void) { + return Q6V32_GET_W(data); + }; + unsigned int UW(void) { + return Q6V32_GET_UW(data); + }; + + // Extract half word methods + short H0(void) { + return Q6V32_GET_H0(data); + }; + short H1(void) { + return Q6V32_GET_H1(data); + }; + unsigned short UH0(void) { + return Q6V32_GET_UH0(data); + }; + unsigned short UH1(void) { + return Q6V32_GET_UH1(data); + }; + + // Extract byte methods + signed char B0(void) { + return Q6V32_GET_B0(data); + }; + signed char B1(void) { + return Q6V32_GET_B1(data); + }; + signed char B2(void) { + return Q6V32_GET_B2(data); + }; + signed char B3(void) { + return Q6V32_GET_B3(data); + }; + unsigned char UB0(void) { + return Q6V32_GET_UB0(data); + }; + unsigned char UB1(void) { + return Q6V32_GET_UB1(data); + }; + unsigned char UB2(void) { + return Q6V32_GET_UB2(data); + }; + unsigned char UB3(void) { + return Q6V32_GET_UB3(data); + }; + + // NOTE: All set methods return a Q6Vect32C type + + // Set word method + Q6Vect32C W(int w) { + return Q6Vect32C(Q6V32_PUT_W(data, w)); + }; + + // Set half word methods + Q6Vect32C H0(short h) { + return Q6Vect32C(Q6V32_PUT_H0(data, h)); + }; + Q6Vect32C H1(short h) { + return Q6Vect32C(Q6V32_PUT_H1(data, h)); + }; + + // Set byte methods + Q6Vect32C B0(signed char b) { + return Q6Vect32C(Q6V32_PUT_B0(data, b)); + }; + Q6Vect32C B1(signed char b) { + return Q6Vect32C(Q6V32_PUT_B1(data, b)); + }; + Q6Vect32C B2(signed char b) { + return Q6Vect32C(Q6V32_PUT_B2(data, b)); + }; + Q6Vect32C B3(signed char b) { + return Q6Vect32C(Q6V32_PUT_B3(data, b)); + }; + +private: + int data; +}; + +#endif /* __cplusplus */ + +// V65 Vector types +#if __HVX_ARCH__ >= 65 +#if defined __HVX__ && (__HVX_LENGTH__ == 128) +typedef long Q6VecPred128 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + +typedef long Q6Vect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + +typedef long Q6Vect2048 __attribute__((__vector_size__(256))) + __attribute__((aligned(256))); + +#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */ +#if defined __HVX__ && (__HVX_LENGTH__ == 64) +typedef long Q6VecPred64 __attribute__((__vector_size__(64))) + __attribute__((aligned(64))); + +typedef long Q6Vect512 __attribute__((__vector_size__(64))) + __attribute__((aligned(64))); + +typedef long Q6Vect1024 __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + +#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */ +#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */ +#endif /* __HVX_ARCH__ >= 65 */ + +/* Predicates */ + +typedef int Q6Pred; + + +#ifdef __HVX__ + +// Extract HVX VectorPair macro. +#define HEXAGON_HVX_GET_W(v) (v) + +// Extract HVX Vector macros. +#define HEXAGON_HVX_GET_V0(v) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_Vector V[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.W = (v); \ + _HEXAGON_HVX_internal_union.V[0]; \ + }) +#define HEXAGON_HVX_GET_V1(v) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_Vector V[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.W = (v); \ + _HEXAGON_HVX_internal_union.V[1]; \ + }) +#define HEXAGON_HVX_GET_P(v) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_VectorPred P[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.W = (v); \ + _HEXAGON_HVX_internal_union.P[0]; \ + }) + +// Set HVX VectorPair macro. +#define HEXAGON_HVX_PUT_W(v, new) (new) + +// Set HVX Vector macros. +#define HEXAGON_HVX_PUT_V0(v, new) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_Vector V[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.W = (v); \ + _HEXAGON_HVX_internal_union.V[0] = (new); \ + _HEXAGON_HVX_internal_union.W; \ + }) + +#define HEXAGON_HVX_PUT_V1(v, new) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_Vector V[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.W = (v); \ + _HEXAGON_HVX_internal_union.V[1] = (new); \ + _HEXAGON_HVX_internal_union.W; \ + }) + +#define HEXAGON_HVX_PUT_P(v, new) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_VectorPred P[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.W = (v); \ + _HEXAGON_HVX_internal_union.P[0] = (new); \ + _HEXAGON_HVX_internal_union.W; \ + }) + + +#define HEXAGON_HVX_CREATE_W(v1, v0) \ + __extension__({ \ + union { \ + HVX_VectorPair W; \ + HVX_Vector V[2]; \ + } _HEXAGON_HVX_internal_union; \ + _HEXAGON_HVX_internal_union.V[0] = (v0); \ + _HEXAGON_HVX_internal_union.V[1] = (v1); \ + _HEXAGON_HVX_internal_union.W; \ + }) + +#ifdef __cplusplus + +class HVX_Vect { +public: + // Constructors. + // Default. + HVX_Vect() : data(Q6_W_vcombine_VV(Q6_V_vzero(), Q6_V_vzero())){}; + + // Custom constructors. + HVX_Vect(HVX_VectorPair W) : data(W){}; + HVX_Vect(HVX_Vector v1, HVX_Vector v0) : data(HEXAGON_HVX_CREATE_W(v1, v0)){}; + + // Copy constructor. + HVX_Vect(const HVX_Vect &W) = default; + + // Move constructor. + HVX_Vect(HVX_Vect &&W) = default; + + // Assignment operator. + HVX_Vect &operator=(const HVX_Vect &W) = default; + + operator HVX_VectorPair() { return data; }; + + // Extract VectorPair method. + HVX_VectorPair W(void) { return HEXAGON_HVX_GET_W(data); }; + + // Extract Vector methods. + HVX_Vector V0(void) { return HEXAGON_HVX_GET_V0(data); }; + HVX_Vector V1(void) { return HEXAGON_HVX_GET_V1(data); }; + HVX_VectorPred P(void) { return HEXAGON_HVX_GET_P(data); }; + + // NOTE: All set methods return a HVX_Vect type. + // Set HVX VectorPair method. + HVX_Vect W(HVX_VectorPair w) { return HVX_Vect(HEXAGON_HVX_PUT_W(data, w)); }; + + // Set HVX Vector methods. + HVX_Vect V0(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V0(data, v)); }; + HVX_Vect V1(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V1(data, v)); }; + HVX_Vect P(HVX_VectorPred p) { return HVX_Vect(HEXAGON_HVX_PUT_P(data, p)); }; + +private: + HVX_VectorPair data; +}; + +#endif /* __cplusplus */ +#endif /* __HVX__ */ + +#define HEXAGON_UDMA_DM0_STATUS_IDLE 0x00000000 +#define HEXAGON_UDMA_DM0_STATUS_RUN 0x00000001 +#define HEXAGON_UDMA_DM0_STATUS_ERROR 0x00000002 +#define HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE 0 +#define HEXAGON_UDMA_DESC_DSTATE_COMPLETE 1 +#define HEXAGON_UDMA_DESC_ORDER_NOORDER 0 +#define HEXAGON_UDMA_DESC_ORDER_ORDER 1 +#define HEXAGON_UDMA_DESC_BYPASS_OFF 0 +#define HEXAGON_UDMA_DESC_BYPASS_ON 1 +#define HEXAGON_UDMA_DESC_COMP_NONE 0 +#define HEXAGON_UDMA_DESC_COMP_DLBC 1 +#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE0 0 +#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE1 1 + +typedef struct hexagon_udma_descriptor_type0_s +{ + void *next; + unsigned int length:24; + unsigned int desctype:2; + unsigned int dstcomp:1; + unsigned int srccomp:1; + unsigned int dstbypass:1; + unsigned int srcbypass:1; + unsigned int order:1; + unsigned int dstate:1; + void *src; + void *dst; +} hexagon_udma_descriptor_type0_t; + +typedef struct hexagon_udma_descriptor_type1_s +{ + void *next; + unsigned int length:24; + unsigned int desctype:2; + unsigned int dstcomp:1; + unsigned int srccomp:1; + unsigned int dstbypass:1; + unsigned int srcbypass:1; + unsigned int order:1; + unsigned int dstate:1; + void *src; + void *dst; + unsigned int allocation:28; + unsigned int padding:4; + unsigned int roiwidth:16; + unsigned int roiheight:16; + unsigned int srcstride:16; + unsigned int dststride:16; + unsigned int srcwidthoffset:16; + unsigned int dstwidthoffset:16; +} hexagon_udma_descriptor_type1_t; + +#endif /* !HEXAGON_TYPES_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hresetintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hresetintrin.h new file mode 100644 index 0000000..13e31a2 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hresetintrin.h @@ -0,0 +1,49 @@ +/*===---------------- hresetintrin.h - HRESET intrinsics -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __HRESETINTRIN_H +#define __HRESETINTRIN_H + +#if __has_extension(gnu_asm) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("hreset"))) + +/// Provides a hint to the processor to selectively reset the prediction +/// history of the current logical processor specified by a 32-bit integer +/// value \a __eax. +/// +/// This intrinsic corresponds to the HRESET instruction. +/// +/// \operation +/// IF __eax == 0 +/// // nop +/// ELSE +/// FOR i := 0 to 31 +/// IF __eax[i] +/// ResetPredictionFeature(i) +/// FI +/// ENDFOR +/// FI +/// \endoperation +static __inline void __DEFAULT_FN_ATTRS +_hreset(int __eax) +{ + __asm__ ("hreset $0" :: "a"(__eax)); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __has_extension(gnu_asm) */ + +#endif /* __HRESETINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/htmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/htmintrin.h new file mode 100644 index 0000000..49c2b98 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/htmintrin.h @@ -0,0 +1,212 @@ +/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __HTMINTRIN_H +#define __HTMINTRIN_H + +#ifndef __HTM__ +#error "HTM instruction set not enabled" +#endif + +#ifdef __powerpc__ + +#include + +typedef uint64_t texasr_t; +typedef uint32_t texasru_t; +typedef uint32_t texasrl_t; +typedef uintptr_t tfiar_t; +typedef uintptr_t tfhar_t; + +#define _HTM_STATE(CR0) ((CR0 >> 1) & 0x3) +#define _HTM_NONTRANSACTIONAL 0x0 +#define _HTM_SUSPENDED 0x1 +#define _HTM_TRANSACTIONAL 0x2 + +#define _TEXASR_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \ + (((TEXASR) >> (63-(BITNUM))) & ((1<<(SIZE))-1)) +#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \ + (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1)) + +#define _TEXASR_FAILURE_CODE(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 7, 8) +#define _TEXASRU_FAILURE_CODE(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 8) + +#define _TEXASR_FAILURE_PERSISTENT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 7, 1) +#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1) + +#define _TEXASR_DISALLOWED(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 8, 1) +#define _TEXASRU_DISALLOWED(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 8, 1) + +#define _TEXASR_NESTING_OVERFLOW(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 9, 1) +#define _TEXASRU_NESTING_OVERFLOW(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 9, 1) + +#define _TEXASR_FOOTPRINT_OVERFLOW(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 10, 1) +#define _TEXASRU_FOOTPRINT_OVERFLOW(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 10, 1) + +#define _TEXASR_SELF_INDUCED_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 11, 1) +#define _TEXASRU_SELF_INDUCED_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 11, 1) + +#define _TEXASR_NON_TRANSACTIONAL_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 12, 1) +#define _TEXASRU_NON_TRANSACTIONAL_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 12, 1) + +#define _TEXASR_TRANSACTION_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 13, 1) +#define _TEXASRU_TRANSACTION_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 13, 1) + +#define _TEXASR_TRANSLATION_INVALIDATION_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 14, 1) +#define _TEXASRU_TRANSLATION_INVALIDATION_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 14, 1) + +#define _TEXASR_IMPLEMENTAION_SPECIFIC(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 15, 1) +#define _TEXASRU_IMPLEMENTAION_SPECIFIC(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 15, 1) + +#define _TEXASR_INSTRUCTION_FETCH_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 16, 1) +#define _TEXASRU_INSTRUCTION_FETCH_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 16, 1) + +#define _TEXASR_ABORT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 31, 1) +#define _TEXASRU_ABORT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 31, 1) + + +#define _TEXASR_SUSPENDED(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 32, 1) + +#define _TEXASR_PRIVILEGE(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 35, 2) + +#define _TEXASR_FAILURE_SUMMARY(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 36, 1) + +#define _TEXASR_TFIAR_EXACT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 37, 1) + +#define _TEXASR_ROT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 38, 1) + +#define _TEXASR_TRANSACTION_LEVEL(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 63, 12) + +#endif /* __powerpc */ + +#ifdef __s390__ + +/* Condition codes generated by tbegin */ +#define _HTM_TBEGIN_STARTED 0 +#define _HTM_TBEGIN_INDETERMINATE 1 +#define _HTM_TBEGIN_TRANSIENT 2 +#define _HTM_TBEGIN_PERSISTENT 3 + +/* The abort codes below this threshold are reserved for machine use. */ +#define _HTM_FIRST_USER_ABORT_CODE 256 + +/* The transaction diagnostic block is it is defined in the Principles + of Operation chapter 5-91. */ + +struct __htm_tdb { + unsigned char format; /* 0 */ + unsigned char flags; + unsigned char reserved1[4]; + unsigned short nesting_depth; + unsigned long long abort_code; /* 8 */ + unsigned long long conflict_token; /* 16 */ + unsigned long long atia; /* 24 */ + unsigned char eaid; /* 32 */ + unsigned char dxc; + unsigned char reserved2[2]; + unsigned int program_int_id; + unsigned long long exception_id; /* 40 */ + unsigned long long bea; /* 48 */ + unsigned char reserved3[72]; /* 56 */ + unsigned long long gprs[16]; /* 128 */ +} __attribute__((__packed__, __aligned__ (8))); + + +/* Helper intrinsics to retry tbegin in case of transient failure. */ + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_null (int __retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT + && i++ < __retry) + __builtin_tx_assist(i); + + return cc; +} + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_tdb (void *__tdb, int __retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin(__tdb)) == _HTM_TBEGIN_TRANSIENT + && i++ < __retry) + __builtin_tx_assist(i); + + return cc; +} + +#define __builtin_tbegin_retry(tdb, retry) \ + (__builtin_constant_p(tdb == 0) && tdb == 0 ? \ + __builtin_tbegin_retry_null(retry) : \ + __builtin_tbegin_retry_tdb(tdb, retry)) + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_nofloat_null (int __retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT + && i++ < __retry) + __builtin_tx_assist(i); + + return cc; +} + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_nofloat_tdb (void *__tdb, int __retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin_nofloat(__tdb)) == _HTM_TBEGIN_TRANSIENT + && i++ < __retry) + __builtin_tx_assist(i); + + return cc; +} + +#define __builtin_tbegin_retry_nofloat(tdb, retry) \ + (__builtin_constant_p(tdb == 0) && tdb == 0 ? \ + __builtin_tbegin_retry_nofloat_null(retry) : \ + __builtin_tbegin_retry_nofloat_tdb(tdb, retry)) + +#endif /* __s390__ */ + +#endif /* __HTMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/htmxlintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/htmxlintrin.h new file mode 100644 index 0000000..6ef6f4b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/htmxlintrin.h @@ -0,0 +1,345 @@ +/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __HTMXLINTRIN_H +#define __HTMXLINTRIN_H + +#ifndef __HTM__ +#error "HTM instruction set not enabled" +#endif + +#include + +#ifdef __powerpc__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define _TEXASR_PTR(TM_BUF) ((texasr_t *)((char *)(TM_BUF) + 0)) +#define _TEXASRU_PTR(TM_BUF) ((texasru_t *)((char *)(TM_BUF) + 0)) +#define _TEXASRL_PTR(TM_BUF) ((texasrl_t *)((char *)(TM_BUF) + 4)) +#define _TFIAR_PTR(TM_BUF) ((tfiar_t *)((char *)(TM_BUF) + 8)) + +typedef char TM_buff_type[16]; + +/* This macro can be used to determine whether a transaction was successfully + started from the __TM_begin() and __TM_simple_begin() intrinsic functions + below. */ +#define _HTM_TBEGIN_STARTED 1 + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_simple_begin (void) +{ + if (__builtin_expect (__builtin_tbegin (0), 1)) + return _HTM_TBEGIN_STARTED; + return 0; +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_begin (void* const __TM_buff) +{ + *_TEXASRL_PTR (__TM_buff) = 0; + if (__builtin_expect (__builtin_tbegin (0), 1)) + return _HTM_TBEGIN_STARTED; +#ifdef __powerpc64__ + *_TEXASR_PTR (__TM_buff) = __builtin_get_texasr (); +#else + *_TEXASRU_PTR (__TM_buff) = __builtin_get_texasru (); + *_TEXASRL_PTR (__TM_buff) = __builtin_get_texasr (); +#endif + *_TFIAR_PTR (__TM_buff) = __builtin_get_tfiar (); + return 0; +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_end (void) +{ + if (__builtin_expect (__builtin_tend (0), 1)) + return 1; + return 0; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_abort (void) +{ + __builtin_tabort (0); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_named_abort (unsigned char const __code) +{ + __builtin_tabort (__code); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_resume (void) +{ + __builtin_tresume (); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_suspend (void) +{ + __builtin_tsuspend (); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_user_abort (void* const __TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + return _TEXASRU_ABORT (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_named_user_abort (void* const __TM_buff, unsigned char *__code) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + + *__code = _TEXASRU_FAILURE_CODE (texasru); + return _TEXASRU_ABORT (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_illegal (void* const __TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + return _TEXASRU_DISALLOWED (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_footprint_exceeded (void* const __TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + return _TEXASRU_FOOTPRINT_OVERFLOW (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_nesting_depth (void* const __TM_buff) +{ + texasrl_t texasrl; + + if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL) + { + texasrl = *_TEXASRL_PTR (__TM_buff); + if (!_TEXASR_FAILURE_SUMMARY (texasrl)) + texasrl = 0; + } + else + texasrl = (texasrl_t) __builtin_get_texasr (); + + return _TEXASR_TRANSACTION_LEVEL (texasrl); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_nested_too_deep(void* const __TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + return _TEXASRU_NESTING_OVERFLOW (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_conflict(void* const __TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + /* Return TEXASR bits 11 (Self-Induced Conflict) through + 14 (Translation Invalidation Conflict). */ + return (_TEXASRU_EXTRACT_BITS (texasru, 14, 4)) ? 1 : 0; +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_failure_persistent(void* const __TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); + return _TEXASRU_FAILURE_PERSISTENT (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_failure_address(void* const __TM_buff) +{ + return *_TFIAR_PTR (__TM_buff); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_failure_code(void* const __TM_buff) +{ + return *_TEXASR_PTR (__TM_buff); +} + +#ifdef __cplusplus +} +#endif + +#endif /* __powerpc__ */ + +#ifdef __s390__ + +#include + +/* These intrinsics are being made available for compatibility with + the IBM XL compiler. For documentation please see the "z/OS XL + C/C++ Programming Guide" publicly available on the web. */ + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_simple_begin () +{ + return __builtin_tbegin_nofloat (0); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_begin (void* const __tdb) +{ + return __builtin_tbegin_nofloat (__tdb); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_end () +{ + return __builtin_tend (); +} + +static __inline void __attribute__((__always_inline__)) +__TM_abort () +{ + return __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE); +} + +static __inline void __attribute__((__always_inline__, __nodebug__)) +__TM_named_abort (unsigned char const __code) +{ + return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + __code); +} + +static __inline void __attribute__((__always_inline__, __nodebug__)) +__TM_non_transactional_store (void* const __addr, long long const __value) +{ + __builtin_non_tx_store ((uint64_t*)__addr, (uint64_t)__value); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_nesting_depth (void* const __tdb_ptr) +{ + int depth = __builtin_tx_nesting_depth (); + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + if (depth != 0) + return depth; + + if (tdb->format != 1) + return 0; + return tdb->nesting_depth; +} + +/* Transaction failure diagnostics */ + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_user_abort (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + if (tdb->format != 1) + return 0; + + return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_named_user_abort (void* const __tdb_ptr, unsigned char* __code) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + if (tdb->format != 1) + return 0; + + if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE) + { + *__code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE; + return 1; + } + return 0; +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_illegal (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + return (tdb->format == 1 + && (tdb->abort_code == 4 /* unfiltered program interruption */ + || tdb->abort_code == 11 /* restricted instruction */)); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_footprint_exceeded (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + return (tdb->format == 1 + && (tdb->abort_code == 7 /* fetch overflow */ + || tdb->abort_code == 8 /* store overflow */)); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_nested_too_deep (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */ +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_conflict (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + return (tdb->format == 1 + && (tdb->abort_code == 9 /* fetch conflict */ + || tdb->abort_code == 10 /* store conflict */)); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_failure_persistent (long const __result) +{ + return __result == _HTM_TBEGIN_PERSISTENT; +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_failure_address (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + return tdb->atia; +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_failure_code (void* const __tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; + + return tdb->abort_code; +} + +#endif /* __s390__ */ + +#endif /* __HTMXLINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hvx_hexagon_protos.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hvx_hexagon_protos.h new file mode 100644 index 0000000..41ce7a6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/hvx_hexagon_protos.h @@ -0,0 +1,4392 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Automatically generated file, do not edit! +//===----------------------------------------------------------------------===// + + + +#ifndef _HVX_HEXAGON_PROTOS_H_ +#define _HVX_HEXAGON_PROTOS_H_ 1 + +#ifdef __HVX__ +#if __HVX_LENGTH__ == 128 +#define __BUILTIN_VECTOR_WRAP(a) a ## _128B +#else +#define __BUILTIN_VECTOR_WRAP(a) a +#endif + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rd32=vextract(Vu32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs) + Instruction Type: LD + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=hi(Vss32) + C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=lo(Vss32) + C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=and(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=and(Qs4,!Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=not(Qs4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=or(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=or(Qs4,!Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vsetq(Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=xor(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) vmem(Rt32+#s4)=Vs32 + C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) vmem(Rt32+#s4):nt=Vs32 + C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) vmem(Rt32+#s4):nt=Vs32 + C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) vmem(Rt32+#s4)=Vs32 + C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vabsdiff(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vabsdiff(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vabs(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vabs(Vu32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vabs(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vabs(Vu32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.b+=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.b+=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.h+=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.h+=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.w+=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.w+=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=valign(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=valign(Vu32,Vv32,#u3) + C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32|=vand(Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vand(Vu32,Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vand(Vu32,Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasl(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasl(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasl(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vasl(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasl(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasr(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vasr(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasr(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=Vu32 + C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=Vuu32 + C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vcl0(Vu32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vcl0(Vu32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vcombine(Vu32,Vv32) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=#0 + C Intrinsic Prototype: HVX_Vector Q6_V_vzero() + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vdeal(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vdeale(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vdeal(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vdeal(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vdelta(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vdmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vdmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vdmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vdmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vdmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w=vinsert(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vlalign(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vlalign(Vu32,Vv32,#u3) + C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vlsr(Vu32.uh,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vlsr(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vlsr(Vu32.uw,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vlsr(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmax(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vmax(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmax(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmax(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmin(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vmin(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmin(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmin(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpa(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpa(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpye(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Rt32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyieo(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyie(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyio(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vmux(Qt4,Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vnavg(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vnavg(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vnavg(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vnormamt(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vnormamt(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vnot(Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vor(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpacke(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpacke(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpack(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vpack(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpacko(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpacko(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpack(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vpack(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpopcount(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vrdelta(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vror(Vu32,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vround(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vround(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vround(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vround(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsat(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsat(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsxt(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsxt(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuffe(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuff(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuffe(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuff(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuffo(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vshuff(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vshuffoe(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vshuffoe(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuffo(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.b-=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.b-=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.h-=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.h-=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.w-=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.w-=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vswap(Qt4,Vu32,Vv32) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vtmpy(Vuu32.b,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.b,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vtmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vtmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vtmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vunpack(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vunpack(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vunpacko(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w|=vunpacko(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vunpack(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vunpack(Vu32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vxor(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vzxt(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vzxt(Vu32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4=vsetq2(Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4.b=vshuffe(Qs4.h,Qt4.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4.h=vshuffe(Qs4.w,Qt4.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(vclb(Vu32.h),Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(vclb(Vu32.w),Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vadd(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vadd(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(!Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32|=vand(!Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(!Qv4,Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Qv4,Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vlsr(Vu32.ub,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,#u3) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vmax(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vmin(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpa(Vuu32.uh,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpa(Vuu32.uh,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32=vmpye(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32+=vmpyo(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vround(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vround(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vsat(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vabs(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vabs(Vu32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vasl(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vasr(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vdd32=#0 + C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero() + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vv32.h).h + C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h + C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h + C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv) + Instruction Type: CVI_GATHER_DV + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h + C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv) + Instruction Type: CVI_GATHER_DV + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.w=vgather(Rt32,Mu2,Vv32.w).w + C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w + C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.h=vlut4(Vu32.uh,Rtt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vmpye(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vmpye(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vnavg(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.h=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.w=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vxx32.w=vasrinto(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrotr(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsatdw(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#endif /* __HVX__ */ + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ia32intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ia32intrin.h new file mode 100644 index 0000000..00138ef --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ia32intrin.h @@ -0,0 +1,441 @@ +/* ===-------- ia32intrin.h ---------------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IA32INTRIN_H +#define __IA32INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS_SSE42 __attribute__((__always_inline__, __nodebug__, __target__("sse4.2"))) + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + +/** Find the first set bit starting from the lsb. Result is undefined if + * input is 0. + * + * \headerfile + * + * This intrinsic corresponds to the BSF instruction or the + * TZCNT instruction. + * + * \param __A + * A 32-bit integer operand. + * \returns A 32-bit integer containing the bit number. + */ +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsfd(int __A) { + return __builtin_ctz(__A); +} + +/** Find the first set bit starting from the msb. Result is undefined if + * input is 0. + * + * \headerfile + * + * This intrinsic corresponds to the BSR instruction or the + * LZCNT instruction and an XOR . + * + * \param __A + * A 32-bit integer operand. + * \returns A 32-bit integer containing the bit number. + */ +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsrd(int __A) { + return 31 - __builtin_clz(__A); +} + +/** Swaps the bytes in the input. Converting little endian to big endian or + * vice versa. + * + * \headerfile + * + * This intrinsic corresponds to the BSWAP instruction. + * + * \param __A + * A 32-bit integer operand. + * \returns A 32-bit integer containing the swapped bytes. + */ +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bswapd(int __A) { + return __builtin_bswap32(__A); +} + +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +_bswap(int __A) { + return __builtin_bswap32(__A); +} + +#define _bit_scan_forward(A) __bsfd((A)) +#define _bit_scan_reverse(A) __bsrd((A)) + +#ifdef __x86_64__ +/** Find the first set bit starting from the lsb. Result is undefined if + * input is 0. + * + * \headerfile + * + * This intrinsic corresponds to the BSF instruction or the + * TZCNT instruction. + * + * \param __A + * A 64-bit integer operand. + * \returns A 32-bit integer containing the bit number. + */ +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsfq(long long __A) { + return __builtin_ctzll(__A); +} + +/** Find the first set bit starting from the msb. Result is undefined if + * input is 0. + * + * \headerfile + * + * This intrinsic corresponds to the BSR instruction or the + * LZCNT instruction and an XOR . + * + * \param __A + * A 64-bit integer operand. + * \returns A 32-bit integer containing the bit number. + */ +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsrq(long long __A) { + return 63 - __builtin_clzll(__A); +} + +/** Swaps the bytes in the input. Converting little endian to big endian or + * vice versa. + * + * \headerfile + * + * This intrinsic corresponds to the BSWAP instruction. + * + * \param __A + * A 64-bit integer operand. + * \returns A 64-bit integer containing the swapped bytes. + */ +static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR +__bswapq(long long __A) { + return __builtin_bswap64(__A); +} + +#define _bswap64(A) __bswapq((A)) +#endif + +/** Counts the number of bits in the source operand having a value of 1. + * + * \headerfile + * + * This intrinsic corresponds to the POPCNT instruction or a + * a sequence of arithmetic and logic ops to calculate it. + * + * \param __A + * An unsigned 32-bit integer operand. + * \returns A 32-bit integer containing the number of bits with value 1 in the + * source operand. + */ +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__popcntd(unsigned int __A) +{ + return __builtin_popcount(__A); +} + +#define _popcnt32(A) __popcntd((A)) + +#ifdef __x86_64__ +/** Counts the number of bits in the source operand having a value of 1. + * + * \headerfile + * + * This intrinsic corresponds to the POPCNT instruction or a + * a sequence of arithmetic and logic ops to calculate it. + * + * \param __A + * An unsigned 64-bit integer operand. + * \returns A 64-bit integer containing the number of bits with value 1 in the + * source operand. + */ +static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR +__popcntq(unsigned long long __A) +{ + return __builtin_popcountll(__A); +} + +#define _popcnt64(A) __popcntq((A)) +#endif /* __x86_64__ */ + +#ifdef __x86_64__ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__readeflags(void) +{ + return __builtin_ia32_readeflags_u64(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +__writeeflags(unsigned long long __f) +{ + __builtin_ia32_writeeflags_u64(__f); +} + +#else /* !__x86_64__ */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__readeflags(void) +{ + return __builtin_ia32_readeflags_u32(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +__writeeflags(unsigned int __f) +{ + __builtin_ia32_writeeflags_u32(__f); +} +#endif /* !__x86_64__ */ + +/** Cast a 32-bit float value to a 32-bit unsigned integer value + * + * \headerfile + * This intrinsic corresponds to the VMOVD / MOVD instruction in x86_64, + * and corresponds to the VMOVL / MOVL instruction in ia32. + * + * \param __A + * A 32-bit float value. + * \returns a 32-bit unsigned integer containing the converted value. + */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST +_castf32_u32(float __A) { + return __builtin_bit_cast(unsigned int, __A); +} + +/** Cast a 64-bit float value to a 64-bit unsigned integer value + * + * \headerfile + * This intrinsic corresponds to the VMOVQ / MOVQ instruction in x86_64, + * and corresponds to the VMOVL / MOVL instruction in ia32. + * + * \param __A + * A 64-bit float value. + * \returns a 64-bit unsigned integer containing the converted value. + */ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST +_castf64_u64(double __A) { + return __builtin_bit_cast(unsigned long long, __A); +} + +/** Cast a 32-bit unsigned integer value to a 32-bit float value + * + * \headerfile + * This intrinsic corresponds to the VMOVQ / MOVQ instruction in x86_64, + * and corresponds to the FLDS instruction in ia32. + * + * \param __A + * A 32-bit unsigned integer value. + * \returns a 32-bit float value containing the converted value. + */ +static __inline__ float __DEFAULT_FN_ATTRS_CAST +_castu32_f32(unsigned int __A) { + return __builtin_bit_cast(float, __A); +} + +/** Cast a 64-bit unsigned integer value to a 64-bit float value + * + * \headerfile + * This intrinsic corresponds to the VMOVQ / MOVQ instruction in x86_64, + * and corresponds to the FLDL instruction in ia32. + * + * \param __A + * A 64-bit unsigned integer value. + * \returns a 64-bit float value containing the converted value. + */ +static __inline__ double __DEFAULT_FN_ATTRS_CAST +_castu64_f64(unsigned long long __A) { + return __builtin_bit_cast(double, __A); +} + +/** Adds the unsigned integer operand to the CRC-32C checksum of the + * unsigned char operand. + * + * \headerfile + * + * This intrinsic corresponds to the CRC32B instruction. + * + * \param __C + * An unsigned integer operand to add to the CRC-32C checksum of operand + * \a __D. + * \param __D + * An unsigned 8-bit integer operand used to compute the CRC-32C checksum. + * \returns The result of adding operand \a __C to the CRC-32C checksum of + * operand \a __D. + */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42 +__crc32b(unsigned int __C, unsigned char __D) +{ + return __builtin_ia32_crc32qi(__C, __D); +} + +/** Adds the unsigned integer operand to the CRC-32C checksum of the + * unsigned short operand. + * + * \headerfile + * + * This intrinsic corresponds to the CRC32W instruction. + * + * \param __C + * An unsigned integer operand to add to the CRC-32C checksum of operand + * \a __D. + * \param __D + * An unsigned 16-bit integer operand used to compute the CRC-32C checksum. + * \returns The result of adding operand \a __C to the CRC-32C checksum of + * operand \a __D. + */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42 +__crc32w(unsigned int __C, unsigned short __D) +{ + return __builtin_ia32_crc32hi(__C, __D); +} + +/** Adds the unsigned integer operand to the CRC-32C checksum of the + * second unsigned integer operand. + * + * \headerfile + * + * This intrinsic corresponds to the CRC32D instruction. + * + * \param __C + * An unsigned integer operand to add to the CRC-32C checksum of operand + * \a __D. + * \param __D + * An unsigned 32-bit integer operand used to compute the CRC-32C checksum. + * \returns The result of adding operand \a __C to the CRC-32C checksum of + * operand \a __D. + */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42 +__crc32d(unsigned int __C, unsigned int __D) +{ + return __builtin_ia32_crc32si(__C, __D); +} + +#ifdef __x86_64__ +/** Adds the unsigned integer operand to the CRC-32C checksum of the + * unsigned 64-bit integer operand. + * + * \headerfile + * + * This intrinsic corresponds to the CRC32Q instruction. + * + * \param __C + * An unsigned integer operand to add to the CRC-32C checksum of operand + * \a __D. + * \param __D + * An unsigned 64-bit integer operand used to compute the CRC-32C checksum. + * \returns The result of adding operand \a __C to the CRC-32C checksum of + * operand \a __D. + */ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_SSE42 +__crc32q(unsigned long long __C, unsigned long long __D) +{ + return __builtin_ia32_crc32di(__C, __D); +} +#endif /* __x86_64__ */ + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__rdpmc(int __A) { + return __builtin_ia32_rdpmc(__A); +} + +/* __rdtscp */ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__rdtscp(unsigned int *__A) { + return __builtin_ia32_rdtscp(__A); +} + +#define _rdtsc() __rdtsc() + +#define _rdpmc(A) __rdpmc(A) + +static __inline__ void __DEFAULT_FN_ATTRS +_wbinvd(void) { + __builtin_ia32_wbinvd(); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR +__rolb(unsigned char __X, int __C) { + return __builtin_rotateleft8(__X, __C); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR +__rorb(unsigned char __X, int __C) { + return __builtin_rotateright8(__X, __C); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR +__rolw(unsigned short __X, int __C) { + return __builtin_rotateleft16(__X, __C); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR +__rorw(unsigned short __X, int __C) { + return __builtin_rotateright16(__X, __C); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR +__rold(unsigned int __X, int __C) { + return __builtin_rotateleft32(__X, __C); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR +__rord(unsigned int __X, int __C) { + return __builtin_rotateright32(__X, __C); +} + +#ifdef __x86_64__ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR +__rolq(unsigned long long __X, int __C) { + return __builtin_rotateleft64(__X, __C); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR +__rorq(unsigned long long __X, int __C) { + return __builtin_rotateright64(__X, __C); +} +#endif /* __x86_64__ */ + +#ifndef _MSC_VER +/* These are already provided as builtins for MSVC. */ +/* Select the correct function based on the size of long. */ +#ifdef __LP64__ +#define _lrotl(a,b) __rolq((a), (b)) +#define _lrotr(a,b) __rorq((a), (b)) +#else +#define _lrotl(a,b) __rold((a), (b)) +#define _lrotr(a,b) __rord((a), (b)) +#endif +#define _rotl(a,b) __rold((a), (b)) +#define _rotr(a,b) __rord((a), (b)) +#endif // _MSC_VER + +/* These are not builtins so need to be provided in all modes. */ +#define _rotwl(a,b) __rolw((a), (b)) +#define _rotwr(a,b) __rorw((a), (b)) + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_CAST +#undef __DEFAULT_FN_ATTRS_SSE42 +#undef __DEFAULT_FN_ATTRS_CONSTEXPR + +#endif /* __IA32INTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/immintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/immintrin.h new file mode 100644 index 0000000..56d3dad --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/immintrin.h @@ -0,0 +1,600 @@ +/*===---- immintrin.h - Intel intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#define __IMMINTRIN_H + +#include + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__MMX__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SSE__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SSE2__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SSE3__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SSSE3__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__SSE4_2__) || defined(__SSE4_1__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AES__) || defined(__PCLMUL__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__CLFLUSHOPT__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__CLWB__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX2__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__F16C__) +#include +#endif + +/* No feature check desired due to internal checks */ +#include + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__BMI2__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__LZCNT__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__POPCNT__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__FMA__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512F__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512VL__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512BW__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512BITALG__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512CD__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512VPOPCNTDQ__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512VNNI__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512VNNI__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVXVNNI__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512DQ__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BITALG__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BW__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512CD__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512DQ__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512ER__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512IFMA__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512IFMA__) && defined(__AVX512VL__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512VBMI__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VBMI__) && defined(__AVX512VL__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512VBMI2__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VBMI2__) && defined(__AVX512VL__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512PF__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512BF16__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BF16__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__PKU__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__VPCLMULQDQ__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__VAES__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__GFNI__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__RDPID__) +/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDPID instruction. +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid"))) +_rdpid_u32(void) { + return __builtin_ia32_rdpid(); +} +#endif // __RDPID__ + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__RDRND__) +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand16_step(unsigned short *__p) +{ + return __builtin_ia32_rdrand16_step(__p); +} + +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand32_step(unsigned int *__p) +{ + return __builtin_ia32_rdrand32_step(__p); +} + +#ifdef __x86_64__ +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand64_step(unsigned long long *__p) +{ + return __builtin_ia32_rdrand64_step(__p); +} +#endif +#endif /* __RDRND__ */ + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__FSGSBASE__) +#ifdef __x86_64__ +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readfsbase_u32(void) +{ + return __builtin_ia32_rdfsbase32(); +} + +static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readfsbase_u64(void) +{ + return __builtin_ia32_rdfsbase64(); +} + +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readgsbase_u32(void) +{ + return __builtin_ia32_rdgsbase32(); +} + +static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readgsbase_u64(void) +{ + return __builtin_ia32_rdgsbase64(); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writefsbase_u32(unsigned int __V) +{ + __builtin_ia32_wrfsbase32(__V); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writefsbase_u64(unsigned long long __V) +{ + __builtin_ia32_wrfsbase64(__V); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writegsbase_u32(unsigned int __V) +{ + __builtin_ia32_wrgsbase32(__V); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writegsbase_u64(unsigned long long __V) +{ + __builtin_ia32_wrgsbase64(__V); +} + +#endif +#endif /* __FSGSBASE__ */ + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__MOVBE__) + +/* The structs used below are to force the load/store to be unaligned. This + * is accomplished with the __packed__ attribute. The __may_alias__ prevents + * tbaa metadata from being generated based on the struct and the type of the + * field inside of it. + */ + +static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_loadbe_i16(void const * __P) { + struct __loadu_i16 { + short __v; + } __attribute__((__packed__, __may_alias__)); + return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_storebe_i16(void * __P, short __D) { + struct __storeu_i16 { + short __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D); +} + +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_loadbe_i32(void const * __P) { + struct __loadu_i32 { + int __v; + } __attribute__((__packed__, __may_alias__)); + return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_storebe_i32(void * __P, int __D) { + struct __storeu_i32 { + int __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D); +} + +#ifdef __x86_64__ +static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_loadbe_i64(void const * __P) { + struct __loadu_i64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_storebe_i64(void * __P, long long __D) { + struct __storeu_i64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D); +} +#endif +#endif /* __MOVBE */ + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__RTM__) +#include +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SHA__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__FXSR__) +#include +#endif + +/* No feature check desired due to internal MSC_VER checks */ +#include + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__XSAVEOPT__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__XSAVEC__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__XSAVES__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SHSTK__) +#include +#endif + +/* Some intrinsics inside adxintrin.h are available only on processors with ADX, + * whereas others are also available at all times. */ +#include + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__RDSEED__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__WBNOINVD__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__CLDEMOTE__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__WAITPKG__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__MOVDIRI__) || defined(__MOVDIR64B__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__PCONFIG__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SGX__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__PTWRITE__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__INVPCID__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__KL__) || defined(__WIDEKL__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512VP2INTERSECT__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__)) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__ENQCMD__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SERIALIZE__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__TSXLDTRK__) +#include +#endif + +#if defined(_MSC_VER) && __has_extension(gnu_asm) +/* Define the default attributes for these intrinsics */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#ifdef __cplusplus +extern "C" { +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange HLE +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) { + __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) { + __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +#endif +#if defined(__x86_64__) +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) { + __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) { + __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Compare Exchange HLE +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination, + long _Exchange, long _Comparand) { + __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_HLERelease(long volatile *_Destination, + long _Exchange, long _Comparand) { + __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +#endif +#if defined(__x86_64__) +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand) { + __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand) { + __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +#endif +#ifdef __cplusplus +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */ + +#endif /* __IMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/intrin.h new file mode 100644 index 0000000..ff8eb8f --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/intrin.h @@ -0,0 +1,618 @@ +/* ===-------- intrin.h ---------------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __INTRIN_H +#define __INTRIN_H + +/* First include the standard intrinsics. */ +#if defined(__i386__) || defined(__x86_64__) +#include +#endif + +#if defined(__arm__) +#include +#endif + +#if defined(__aarch64__) +#include +#endif + +/* For the definition of jmp_buf. */ +#if __STDC_HOSTED__ +#include +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +#if __x86_64__ +#define __LPTRINT_TYPE__ __int64 +#else +#define __LPTRINT_TYPE__ long +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__MMX__) +/* And the random ones that aren't in those files. */ +__m64 _m_from_float(float); +float _m_to_float(__m64); +#endif + +/* Other assorted instruction intrinsics. */ +void __addfsbyte(unsigned long, unsigned char); +void __addfsdword(unsigned long, unsigned long); +void __addfsword(unsigned long, unsigned short); +void __code_seg(const char *); +void __cpuid(int[4], int); +void __cpuidex(int[4], int, int); +__int64 __emul(int, int); +unsigned __int64 __emulu(unsigned int, unsigned int); +unsigned int __getcallerseflags(void); +void __halt(void); +unsigned char __inbyte(unsigned short); +void __inbytestring(unsigned short, unsigned char *, unsigned long); +void __incfsbyte(unsigned long); +void __incfsdword(unsigned long); +void __incfsword(unsigned long); +unsigned long __indword(unsigned short); +void __indwordstring(unsigned short, unsigned long *, unsigned long); +void __int2c(void); +void __invlpg(void *); +unsigned short __inword(unsigned short); +void __inwordstring(unsigned short, unsigned short *, unsigned long); +void __lidt(void *); +unsigned __int64 __ll_lshift(unsigned __int64, int); +__int64 __ll_rshift(__int64, int); +void __movsb(unsigned char *, unsigned char const *, size_t); +void __movsd(unsigned long *, unsigned long const *, size_t); +void __movsw(unsigned short *, unsigned short const *, size_t); +void __nop(void); +void __nvreg_restore_fence(void); +void __nvreg_save_fence(void); +void __outbyte(unsigned short, unsigned char); +void __outbytestring(unsigned short, unsigned char *, unsigned long); +void __outdword(unsigned short, unsigned long); +void __outdwordstring(unsigned short, unsigned long *, unsigned long); +void __outword(unsigned short, unsigned short); +void __outwordstring(unsigned short, unsigned short *, unsigned long); +unsigned long __readcr0(void); +unsigned long __readcr2(void); +unsigned __LPTRINT_TYPE__ __readcr3(void); +unsigned long __readcr4(void); +unsigned long __readcr8(void); +unsigned int __readdr(unsigned int); +#ifdef __i386__ +unsigned char __readfsbyte(unsigned long); +unsigned __int64 __readfsqword(unsigned long); +unsigned short __readfsword(unsigned long); +#endif +unsigned __int64 __readmsr(unsigned long); +unsigned __int64 __readpmc(unsigned long); +unsigned long __segmentlimit(unsigned long); +void __sidt(void *); +void __stosb(unsigned char *, unsigned char, size_t); +void __stosd(unsigned long *, unsigned long, size_t); +void __stosw(unsigned short *, unsigned short, size_t); +void __svm_clgi(void); +void __svm_invlpga(void *, int); +void __svm_skinit(int); +void __svm_stgi(void); +void __svm_vmload(size_t); +void __svm_vmrun(size_t); +void __svm_vmsave(size_t); +void __ud2(void); +unsigned __int64 __ull_rshift(unsigned __int64, int); +void __vmx_off(void); +void __vmx_vmptrst(unsigned __int64 *); +void __wbinvd(void); +void __writecr0(unsigned int); +void __writecr3(unsigned __INTPTR_TYPE__); +void __writecr4(unsigned int); +void __writecr8(unsigned int); +void __writedr(unsigned int, unsigned int); +void __writefsbyte(unsigned long, unsigned char); +void __writefsdword(unsigned long, unsigned long); +void __writefsqword(unsigned long, unsigned __int64); +void __writefsword(unsigned long, unsigned short); +void __writemsr(unsigned long, unsigned __int64); +void *_AddressOfReturnAddress(void); +unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); +unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); +unsigned char _bittest(long const *, long); +unsigned char _bittestandcomplement(long *, long); +unsigned char _bittestandreset(long *, long); +unsigned char _bittestandset(long *, long); +void __cdecl _disable(void); +void __cdecl _enable(void); +long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value); +unsigned char _interlockedbittestandreset(long volatile *, long); +unsigned char _interlockedbittestandset(long volatile *, long); +void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *, + void *); +void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *, + void *); +long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long); +long _InterlockedExchangeAdd_HLERelease(long volatile *, long); +__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64); +__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64); +void __attribute__((__deprecated__( + "use other intrinsics or C++11 atomics instead"))) _ReadBarrier(void); +void __attribute__((__deprecated__( + "use other intrinsics or C++11 atomics instead"))) _ReadWriteBarrier(void); +unsigned int _rorx_u32(unsigned int, const unsigned int); +int _sarx_i32(int, unsigned int); +#if __STDC_HOSTED__ +int __cdecl _setjmp(jmp_buf); +#endif +unsigned int _shlx_u32(unsigned int, unsigned int); +unsigned int _shrx_u32(unsigned int, unsigned int); +void _Store_HLERelease(long volatile *, long); +void _Store64_HLERelease(__int64 volatile *, __int64); +void _StorePointer_HLERelease(void *volatile *, void *); +void __attribute__((__deprecated__( + "use other intrinsics or C++11 atomics instead"))) _WriteBarrier(void); +unsigned __int32 xbegin(void); +void _xend(void); + +/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */ +#ifdef __x86_64__ +void __addgsbyte(unsigned long, unsigned char); +void __addgsdword(unsigned long, unsigned long); +void __addgsqword(unsigned long, unsigned __int64); +void __addgsword(unsigned long, unsigned short); +void __faststorefence(void); +void __incgsbyte(unsigned long); +void __incgsdword(unsigned long); +void __incgsqword(unsigned long); +void __incgsword(unsigned long); +void __movsq(unsigned long long *, unsigned long long const *, size_t); +unsigned char __readgsbyte(unsigned long); +unsigned long __readgsdword(unsigned long); +unsigned __int64 __readgsqword(unsigned long); +unsigned short __readgsword(unsigned long); +unsigned __int64 __shiftleft128(unsigned __int64 _LowPart, + unsigned __int64 _HighPart, + unsigned char _Shift); +unsigned __int64 __shiftright128(unsigned __int64 _LowPart, + unsigned __int64 _HighPart, + unsigned char _Shift); +void __stosq(unsigned __int64 *, unsigned __int64, size_t); +unsigned char __vmx_on(unsigned __int64 *); +unsigned char __vmx_vmclear(unsigned __int64 *); +unsigned char __vmx_vmlaunch(void); +unsigned char __vmx_vmptrld(unsigned __int64 *); +unsigned char __vmx_vmread(size_t, size_t *); +unsigned char __vmx_vmresume(void); +unsigned char __vmx_vmwrite(size_t, size_t); +void __writegsbyte(unsigned long, unsigned char); +void __writegsdword(unsigned long, unsigned long); +void __writegsqword(unsigned long, unsigned __int64); +void __writegsword(unsigned long, unsigned short); +unsigned char _bittest64(__int64 const *, __int64); +unsigned char _bittestandcomplement64(__int64 *, __int64); +unsigned char _bittestandreset64(__int64 *, __int64); +unsigned char _bittestandset64(__int64 *, __int64); +long _InterlockedAnd_np(long volatile *_Value, long _Mask); +short _InterlockedAnd16_np(short volatile *_Value, short _Mask); +__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask); +char _InterlockedAnd8_np(char volatile *_Value, char _Mask); +unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64); +unsigned char _interlockedbittestandset64(__int64 volatile *, __int64); +long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange, + long _Comparand); +unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +short _InterlockedCompareExchange16_np(short volatile *_Destination, + short _Exchange, short _Comparand); +__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand); +void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination, + void *_Exchange, void *_Comparand); +long _InterlockedOr_np(long volatile *_Value, long _Mask); +short _InterlockedOr16_np(short volatile *_Value, short _Mask); +__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask); +char _InterlockedOr8_np(char volatile *_Value, char _Mask); +long _InterlockedXor_np(long volatile *_Value, long _Mask); +short _InterlockedXor16_np(short volatile *_Value, short _Mask); +__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask); +char _InterlockedXor8_np(char volatile *_Value, char _Mask); +unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int); +__int64 _sarx_i64(__int64, unsigned int); +unsigned __int64 _shlx_u64(unsigned __int64, unsigned int); +unsigned __int64 _shrx_u64(unsigned __int64, unsigned int); +__int64 __mulh(__int64, __int64); +unsigned __int64 __umulh(unsigned __int64, unsigned __int64); +__int64 _mul128(__int64, __int64, __int64*); +unsigned __int64 _umul128(unsigned __int64, + unsigned __int64, + unsigned __int64*); + +#endif /* __x86_64__ */ + +#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) + +unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); +unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); + +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) +__int64 _InterlockedDecrement64(__int64 volatile *_Addend); +__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value); +__int64 _InterlockedIncrement64(__int64 volatile *_Addend); +__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask); + +#endif + +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange Add +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value); +char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value); +char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value); +short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value); +short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value); +short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value); +long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value); +long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value); +long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value); +__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Increment +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +short _InterlockedIncrement16_acq(short volatile *_Value); +short _InterlockedIncrement16_nf(short volatile *_Value); +short _InterlockedIncrement16_rel(short volatile *_Value); +long _InterlockedIncrement_acq(long volatile *_Value); +long _InterlockedIncrement_nf(long volatile *_Value); +long _InterlockedIncrement_rel(long volatile *_Value); +__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value); +__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value); +__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Decrement +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +short _InterlockedDecrement16_acq(short volatile *_Value); +short _InterlockedDecrement16_nf(short volatile *_Value); +short _InterlockedDecrement16_rel(short volatile *_Value); +long _InterlockedDecrement_acq(long volatile *_Value); +long _InterlockedDecrement_nf(long volatile *_Value); +long _InterlockedDecrement_rel(long volatile *_Value); +__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value); +__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value); +__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked And +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +char _InterlockedAnd8_acq(char volatile *_Value, char _Mask); +char _InterlockedAnd8_nf(char volatile *_Value, char _Mask); +char _InterlockedAnd8_rel(char volatile *_Value, char _Mask); +short _InterlockedAnd16_acq(short volatile *_Value, short _Mask); +short _InterlockedAnd16_nf(short volatile *_Value, short _Mask); +short _InterlockedAnd16_rel(short volatile *_Value, short _Mask); +long _InterlockedAnd_acq(long volatile *_Value, long _Mask); +long _InterlockedAnd_nf(long volatile *_Value, long _Mask); +long _InterlockedAnd_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask); +#endif +/*----------------------------------------------------------------------------*\ +|* Bit Counting and Testing +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +unsigned char _interlockedbittestandset_acq(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandset_nf(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandset_rel(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase, + long _BitPos); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Or +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +char _InterlockedOr8_acq(char volatile *_Value, char _Mask); +char _InterlockedOr8_nf(char volatile *_Value, char _Mask); +char _InterlockedOr8_rel(char volatile *_Value, char _Mask); +short _InterlockedOr16_acq(short volatile *_Value, short _Mask); +short _InterlockedOr16_nf(short volatile *_Value, short _Mask); +short _InterlockedOr16_rel(short volatile *_Value, short _Mask); +long _InterlockedOr_acq(long volatile *_Value, long _Mask); +long _InterlockedOr_nf(long volatile *_Value, long _Mask); +long _InterlockedOr_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Xor +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +char _InterlockedXor8_acq(char volatile *_Value, char _Mask); +char _InterlockedXor8_nf(char volatile *_Value, char _Mask); +char _InterlockedXor8_rel(char volatile *_Value, char _Mask); +short _InterlockedXor16_acq(short volatile *_Value, short _Mask); +short _InterlockedXor16_nf(short volatile *_Value, short _Mask); +short _InterlockedXor16_rel(short volatile *_Value, short _Mask); +long _InterlockedXor_acq(long volatile *_Value, long _Mask); +long _InterlockedXor_nf(long volatile *_Value, long _Mask); +long _InterlockedXor_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +char _InterlockedExchange8_acq(char volatile *_Target, char _Value); +char _InterlockedExchange8_nf(char volatile *_Target, char _Value); +char _InterlockedExchange8_rel(char volatile *_Target, char _Value); +short _InterlockedExchange16_acq(short volatile *_Target, short _Value); +short _InterlockedExchange16_nf(short volatile *_Target, short _Value); +short _InterlockedExchange16_rel(short volatile *_Target, short _Value); +long _InterlockedExchange_acq(long volatile *_Target, long _Value); +long _InterlockedExchange_nf(long volatile *_Target, long _Value); +long _InterlockedExchange_rel(long volatile *_Target, long _Value); +__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value); +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Compare Exchange +\*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) +char _InterlockedCompareExchange8_acq(char volatile *_Destination, + char _Exchange, char _Comparand); +char _InterlockedCompareExchange8_nf(char volatile *_Destination, + char _Exchange, char _Comparand); +char _InterlockedCompareExchange8_rel(char volatile *_Destination, + char _Exchange, char _Comparand); +short _InterlockedCompareExchange16_acq(short volatile *_Destination, + short _Exchange, short _Comparand); +short _InterlockedCompareExchange16_nf(short volatile *_Destination, + short _Exchange, short _Comparand); +short _InterlockedCompareExchange16_rel(short volatile *_Destination, + short _Exchange, short _Comparand); +long _InterlockedCompareExchange_acq(long volatile *_Destination, + long _Exchange, long _Comparand); +long _InterlockedCompareExchange_nf(long volatile *_Destination, + long _Exchange, long _Comparand); +long _InterlockedCompareExchange_rel(long volatile *_Destination, + long _Exchange, long _Comparand); +__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand); +__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand); +__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand); +#endif +#if defined(__x86_64__) || defined(__aarch64__) +unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +#endif +#if defined(__aarch64__) +unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +#endif + +/*----------------------------------------------------------------------------*\ +|* movs, stos +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst, + unsigned char const *__src, + size_t __n) { +#if defined(__x86_64__) + __asm__ __volatile__("rep movsb" + : "+D"(__dst), "+S"(__src), "+c"(__n) + : + : "memory"); +#else + __asm__ __volatile__("xchg %%esi, %1\nrep movsb\nxchg %%esi, %1" + : "+D"(__dst), "+r"(__src), "+c"(__n) + : + : "memory"); +#endif +} +static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst, + unsigned long const *__src, + size_t __n) { +#if defined(__x86_64__) + __asm__ __volatile__("rep movsl" + : "+D"(__dst), "+S"(__src), "+c"(__n) + : + : "memory"); +#else + __asm__ __volatile__("xchg %%esi, %1\nrep movsl\nxchg %%esi, %1" + : "+D"(__dst), "+r"(__src), "+c"(__n) + : + : "memory"); +#endif +} +static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst, + unsigned short const *__src, + size_t __n) { +#if defined(__x86_64__) + __asm__ __volatile__("rep movsw" + : "+D"(__dst), "+S"(__src), "+c"(__n) + : + : "memory"); +#else + __asm__ __volatile__("xchg %%esi, %1\nrep movsw\nxchg %%esi, %1" + : "+D"(__dst), "+r"(__src), "+c"(__n) + : + : "memory"); +#endif +} +static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst, + unsigned long __x, + size_t __n) { + __asm__ __volatile__("rep stosl" + : "+D"(__dst), "+c"(__n) + : "a"(__x) + : "memory"); +} +static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst, + unsigned short __x, + size_t __n) { + __asm__ __volatile__("rep stosw" + : "+D"(__dst), "+c"(__n) + : "a"(__x) + : "memory"); +} +#endif +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS __movsq( + unsigned long long *__dst, unsigned long long const *__src, size_t __n) { + __asm__ __volatile__("rep movsq" + : "+D"(__dst), "+S"(__src), "+c"(__n) + : + : "memory"); +} +static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst, + unsigned __int64 __x, + size_t __n) { + __asm__ __volatile__("rep stosq" : "+D"(__dst), "+c"(__n) : "a"(__x) + : "memory"); +} +#endif + +/*----------------------------------------------------------------------------*\ +|* Misc +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) +#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \ + __asm("cpuid" \ + : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx) \ + : "0"(__leaf), "2"(__count)) +#else +/* x86-64 uses %rbx as the base register, so preserve it. */ +#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \ + __asm("xchgq %%rbx,%q1\n" \ + "cpuid\n" \ + "xchgq %%rbx,%q1" \ + : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \ + : "0"(__leaf), "2"(__count)) +#endif +static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) { + __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]); +} +static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level, + int __ecx) { + __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]); +} +static __inline__ void __DEFAULT_FN_ATTRS __halt(void) { + __asm__ volatile("hlt"); +} +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) +static __inline__ void __DEFAULT_FN_ATTRS __nop(void) { + __asm__ volatile("nop"); +} +#endif + +/*----------------------------------------------------------------------------*\ +|* MS AArch64 specific +\*----------------------------------------------------------------------------*/ +#if defined(__aarch64__) +unsigned __int64 __getReg(int); +long _InterlockedAdd(long volatile *Addend, long Value); +__int64 _ReadStatusReg(int); +void _WriteStatusReg(int, __int64); + +unsigned short __cdecl _byteswap_ushort(unsigned short val); +unsigned long __cdecl _byteswap_ulong (unsigned long val); +unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val); +#endif + +/*----------------------------------------------------------------------------*\ +|* Privileged intrinsics +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS +__readmsr(unsigned long __register) { + // Loads the contents of a 64-bit model specific register (MSR) specified in + // the ECX register into registers EDX:EAX. The EDX register is loaded with + // the high-order 32 bits of the MSR and the EAX register is loaded with the + // low-order 32 bits. If less than 64 bits are implemented in the MSR being + // read, the values returned to EDX:EAX in unimplemented bit locations are + // undefined. + unsigned long __edx; + unsigned long __eax; + __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register)); + return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax; +} +#endif + +static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) { + unsigned __LPTRINT_TYPE__ __cr3_val; + __asm__ __volatile__ ("mov %%cr3, %0" : "=r"(__cr3_val) : : "memory"); + return __cr3_val; +} + +static __inline__ void __DEFAULT_FN_ATTRS +__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) { + __asm__ ("mov %0, %%cr3" : : "r"(__cr3_val) : "memory"); +} + +#ifdef __cplusplus +} +#endif + +#undef __LPTRINT_TYPE__ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __INTRIN_H */ +#endif /* _MSC_VER */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/inttypes.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/inttypes.h new file mode 100644 index 0000000..1c894c4 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/inttypes.h @@ -0,0 +1,97 @@ +/*===---- inttypes.h - Standard header for integer printf macros ----------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __CLANG_INTTYPES_H +// AIX system headers need inttypes.h to be re-enterable while _STD_TYPES_T +// is defined until an inclusion of it without _STD_TYPES_T occurs, in which +// case the header guard macro is defined. +#if !defined(_AIX) || !defined(_STD_TYPES_T) +#define __CLANG_INTTYPES_H +#endif + +#if defined(_MSC_VER) && _MSC_VER < 1800 +#error MSVC does not have inttypes.h prior to Visual Studio 2013 +#endif + +#include_next + +#if defined(_MSC_VER) && _MSC_VER < 1900 +/* MSVC headers define int32_t as int, but PRIx32 as "lx" instead of "x". + * This triggers format warnings, so fix it up here. */ +#undef PRId32 +#undef PRIdLEAST32 +#undef PRIdFAST32 +#undef PRIi32 +#undef PRIiLEAST32 +#undef PRIiFAST32 +#undef PRIo32 +#undef PRIoLEAST32 +#undef PRIoFAST32 +#undef PRIu32 +#undef PRIuLEAST32 +#undef PRIuFAST32 +#undef PRIx32 +#undef PRIxLEAST32 +#undef PRIxFAST32 +#undef PRIX32 +#undef PRIXLEAST32 +#undef PRIXFAST32 + +#undef SCNd32 +#undef SCNdLEAST32 +#undef SCNdFAST32 +#undef SCNi32 +#undef SCNiLEAST32 +#undef SCNiFAST32 +#undef SCNo32 +#undef SCNoLEAST32 +#undef SCNoFAST32 +#undef SCNu32 +#undef SCNuLEAST32 +#undef SCNuFAST32 +#undef SCNx32 +#undef SCNxLEAST32 +#undef SCNxFAST32 + +#define PRId32 "d" +#define PRIdLEAST32 "d" +#define PRIdFAST32 "d" +#define PRIi32 "i" +#define PRIiLEAST32 "i" +#define PRIiFAST32 "i" +#define PRIo32 "o" +#define PRIoLEAST32 "o" +#define PRIoFAST32 "o" +#define PRIu32 "u" +#define PRIuLEAST32 "u" +#define PRIuFAST32 "u" +#define PRIx32 "x" +#define PRIxLEAST32 "x" +#define PRIxFAST32 "x" +#define PRIX32 "X" +#define PRIXLEAST32 "X" +#define PRIXFAST32 "X" + +#define SCNd32 "d" +#define SCNdLEAST32 "d" +#define SCNdFAST32 "d" +#define SCNi32 "i" +#define SCNiLEAST32 "i" +#define SCNiFAST32 "i" +#define SCNo32 "o" +#define SCNoLEAST32 "o" +#define SCNoFAST32 "o" +#define SCNu32 "u" +#define SCNuLEAST32 "u" +#define SCNuFAST32 "u" +#define SCNx32 "x" +#define SCNxLEAST32 "x" +#define SCNxFAST32 "x" +#endif + +#endif /* __CLANG_INTTYPES_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/invpcidintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/invpcidintrin.h new file mode 100644 index 0000000..48dae0a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/invpcidintrin.h @@ -0,0 +1,23 @@ +/*===------------- invpcidintrin.h - INVPCID intrinsic ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __INVPCIDINTRIN_H +#define __INVPCIDINTRIN_H + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("invpcid"))) +_invpcid(unsigned int __type, void *__descriptor) { + __builtin_ia32_invpcid(__type, __descriptor); +} + +#endif /* __INVPCIDINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/iso646.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/iso646.h new file mode 100644 index 0000000..e0a20c6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/iso646.h @@ -0,0 +1,27 @@ +/*===---- iso646.h - Standard header for alternate spellings of operators---=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ISO646_H +#define __ISO646_H + +#ifndef __cplusplus +#define and && +#define and_eq &= +#define bitand & +#define bitor | +#define compl ~ +#define not ! +#define not_eq != +#define or || +#define or_eq |= +#define xor ^ +#define xor_eq ^= +#endif + +#endif /* __ISO646_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/keylockerintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/keylockerintrin.h new file mode 100644 index 0000000..68b0a56 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/keylockerintrin.h @@ -0,0 +1,536 @@ +/*===----------------- keylockerintrin.h - KL Intrinsics -------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _KEYLOCKERINTRIN_H +#define _KEYLOCKERINTRIN_H + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__KL__) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("kl"),\ + __min_vector_width__(128))) + +/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl +/// will assigned to EAX, whch specifies the KeySource and whether backing up +/// the key is permitted. The 256-bit encryption key is loaded from the two +/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is +/// loaded from the implicit operand XMM0 which assigned by __intkey. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LOADIWKEY instructions. +/// +/// \operation +/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode) +/// GP (0) +/// FI +/// IF “LOADIWKEY exiting” VM execution control set +/// VMexit +/// FI +/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used +/// GP (0) +/// FI +/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set +/// GP (0) +/// FI +/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part +/// GP (0) +/// FI +/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part +/// GP (0) +/// FI +/// IF (__ctl[4:1] == 0) // KeySource of 0. +/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0]: +/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] +/// IWKey.IntegrityKey[127:0] := __intkey[127:0] +/// IWKey.NoBackup := __ctl[0] +/// IWKey.KeySource := __ctl[4:1] +/// ZF := 0 +/// ELSE // KeySource of 1. See RDSEED definition for details of randomness +/// IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received +/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0] +/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128] +/// IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0] +/// IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256] +/// IWKey.NoBackup := __ctl[0] +/// IWKey.KeySource := __ctl[4:1] +/// ZF := 0 +/// ELSE // Random data was not returned from RDSEED. IWKey was not loaded +/// ZF := 1 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ void __DEFAULT_FN_ATTRS +_mm_loadiwkey (unsigned int __ctl, __m128i __intkey, + __m128i __enkey_lo, __m128i __enkey_hi) { + __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl); +} + +/// Wrap a 128-bit AES key from __key into a key handle and output in +/// ((__m128i*)__h) to ((__m128i*)__h) + 5 and a 32-bit value as return. +/// The explicit source operand __htype specifies handle restrictions. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ENCODEKEY128 instructions. +/// +/// \operation +/// InputKey[127:0] := __key[127:0] +/// KeyMetadata[2:0] := __htype[2:0] +/// KeyMetadata[23:3] := 0 // Reserved for future usage +/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0) +/// KeyMetadata[127:28] := 0 // Reserved for future usage +/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0], +/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0]) +/// dst[0] := IWKey.NoBackup +/// dst[4:1] := IWKey.KeySource[3:0] +/// dst[31:5] := 0 +/// MEM[__h+127:__h] := Handle[127:0] // AAD +/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag +/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText +/// MEM[__h+511:__h+384] := 0 // Reserved for future usage +/// MEM[__h+639:__h+512] := 0 // Reserved for future usage +/// MEM[__h+767:__h+640] := 0 // Reserved for future usage +/// OF := 0 +/// SF := 0 +/// ZF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) { + return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h); +} + +/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then +/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 6 and +/// a 32-bit value as return. +/// The explicit source operand __htype specifies handle restrictions. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ENCODEKEY256 instructions. +/// +/// \operation +/// InputKey[127:0] := __key_lo[127:0] +/// InputKey[255:128] := __key_hi[255:128] +/// KeyMetadata[2:0] := __htype[2:0] +/// KeyMetadata[23:3] := 0 // Reserved for future usage +/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1) +/// KeyMetadata[127:28] := 0 // Reserved for future usage +/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0], +/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0]) +/// dst[0] := IWKey.NoBackup +/// dst[4:1] := IWKey.KeySource[3:0] +/// dst[31:5] := 0 +/// MEM[__h+127:__h] := Handle[127:0] // AAD +/// MEM[__h+255:__h+128] := Handle[255:128] // Tag +/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0] +/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128] +/// MEM[__h+639:__h+512] := 0 // Reserved for future usage +/// MEM[__h+767:__h+640] := 0 // Reserved for future usage +/// MEM[__h+895:__h+768] := 0 Integrity// Reserved for future usage +/// OF := 0 +/// SF := 0 +/// ZF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi, + void *__h) { + return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo, + (__v2di)__key_hi, __h); +} + +/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using +/// the 128-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENC128KL instructions. +/// +/// \operation +/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic. +/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[383:256] || +/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// ELSE +/// MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using +/// the 256-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENC256KL instructions. +/// +/// \operation +/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic. +/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using +/// the 128-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDEC128KL instructions. +/// +/// \operation +/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic. +/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[383:256] || +/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128) +/// IF (IllegalHandle) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using +/// the 256-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDEC256KL instructions. +/// +/// \operation +/// Handle[511:0] := MEM[__h+511:__h] +/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[383:256] || +/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256) +/// IF (IllegalHandle) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ + || defined(__KL__) */ + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__WIDEKL__) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\ + __min_vector_width__(128))) + +/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENCWIDE128KL instructions. +/// +/// \operation +/// Handle := MEM[__h+383:__h] +/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENCWIDE256KL instructions. +/// +/// \operation +/// Handle[511:0] := MEM[__h+511:__h] +/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDECWIDE128KL instructions. +/// +/// \operation +/// Handle[383:0] := MEM[__h+383:__h] +/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDECWIDE256KL instructions. +/// +/// \operation +/// Handle[511:0] := MEM[__h+511:__h] +/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 ) +/// If (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ + || defined(__WIDEKL__) */ + +#endif /* _KEYLOCKERINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/limits.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/limits.h new file mode 100644 index 0000000..c653580 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/limits.h @@ -0,0 +1,102 @@ +/*===---- limits.h - Standard header for integer sizes --------------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __CLANG_LIMITS_H +#define __CLANG_LIMITS_H + +/* The system's limits.h may, in turn, try to #include_next GCC's limits.h. + Avert this #include_next madness. */ +#if defined __GNUC__ && !defined _GCC_LIMITS_H_ +#define _GCC_LIMITS_H_ +#endif + +/* System headers include a number of constants from POSIX in . + Include it if we're hosted. */ +#if __STDC_HOSTED__ && __has_include_next() +#include_next +#endif + +/* Many system headers try to "help us out" by defining these. No really, we + know how big each datatype is. */ +#undef SCHAR_MIN +#undef SCHAR_MAX +#undef UCHAR_MAX +#undef SHRT_MIN +#undef SHRT_MAX +#undef USHRT_MAX +#undef INT_MIN +#undef INT_MAX +#undef UINT_MAX +#undef LONG_MIN +#undef LONG_MAX +#undef ULONG_MAX + +#undef CHAR_BIT +#undef CHAR_MIN +#undef CHAR_MAX + +/* C90/99 5.2.4.2.1 */ +#define SCHAR_MAX __SCHAR_MAX__ +#define SHRT_MAX __SHRT_MAX__ +#define INT_MAX __INT_MAX__ +#define LONG_MAX __LONG_MAX__ + +#define SCHAR_MIN (-__SCHAR_MAX__-1) +#define SHRT_MIN (-__SHRT_MAX__ -1) +#define INT_MIN (-__INT_MAX__ -1) +#define LONG_MIN (-__LONG_MAX__ -1L) + +#define UCHAR_MAX (__SCHAR_MAX__*2 +1) +#define USHRT_MAX (__SHRT_MAX__ *2 +1) +#define UINT_MAX (__INT_MAX__ *2U +1U) +#define ULONG_MAX (__LONG_MAX__ *2UL+1UL) + +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +#define CHAR_BIT __CHAR_BIT__ + +#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */ +#define CHAR_MIN 0 +#define CHAR_MAX UCHAR_MAX +#else +#define CHAR_MIN SCHAR_MIN +#define CHAR_MAX __SCHAR_MAX__ +#endif + +/* C99 5.2.4.2.1: Added long long. + C++11 18.3.3.2: same contents as the Standard C Library header . + */ +#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L + +#undef LLONG_MIN +#undef LLONG_MAX +#undef ULLONG_MAX + +#define LLONG_MAX __LONG_LONG_MAX__ +#define LLONG_MIN (-__LONG_LONG_MAX__-1LL) +#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL) +#endif + +/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad + that we don't have something like #pragma poison that could be used to + deprecate a macro - the code should just use LLONG_MAX and friends. + */ +#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__) + +#undef LONG_LONG_MIN +#undef LONG_LONG_MAX +#undef ULONG_LONG_MAX + +#define LONG_LONG_MAX __LONG_LONG_MAX__ +#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL) +#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL) +#endif + +#endif /* __CLANG_LIMITS_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/lwpintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/lwpintrin.h new file mode 100644 index 0000000..d8ab0db --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/lwpintrin.h @@ -0,0 +1,136 @@ +/*===---- lwpintrin.h - LWP intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __LWPINTRIN_H +#define __LWPINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lwp"))) + +/// Parses the LWPCB at the specified address and enables +/// profiling if valid. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LLWPCB instruction. +/// +/// \param __addr +/// Address to the new Lightweight Profiling Control Block (LWPCB). If the +/// LWPCB is valid, writes the address into the LWP_CBADDR MSR and enables +/// Lightweight Profiling. +static __inline__ void __DEFAULT_FN_ATTRS +__llwpcb (void *__addr) +{ + __builtin_ia32_llwpcb(__addr); +} + +/// Flushes the LWP state to memory and returns the address of the LWPCB. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SLWPCB instruction. +/// +/// \return +/// Address to the current Lightweight Profiling Control Block (LWPCB). +/// If LWP is not currently enabled, returns NULL. +static __inline__ void* __DEFAULT_FN_ATTRS +__slwpcb (void) +{ + return __builtin_ia32_slwpcb(); +} + +/// Inserts programmed event record into the LWP event ring buffer +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPINS instruction. +/// +/// \param DATA2 +/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +/// \returns If the ring buffer is full and LWP is running in Synchronized Mode, +/// the event record overwrites the last record in the buffer, the MissedEvents +/// counter in the LWPCB is incremented, the head pointer is not advanced, and +/// 1 is returned. Otherwise 0 is returned. +#define __lwpins32(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpins32((unsigned int) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +/// Decrements the LWP programmed value sample event counter. If the result is +/// negative, inserts an event record into the LWP event ring buffer in memory +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPVAL instruction. +/// +/// \param DATA2 +/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +#define __lwpval32(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpval32((unsigned int) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +#ifdef __x86_64__ + +/// Inserts programmed event record into the LWP event ring buffer +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPINS instruction. +/// +/// \param DATA2 +/// A 64-bit value is inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +/// \returns If the ring buffer is full and LWP is running in Synchronized Mode, +/// the event record overwrites the last record in the buffer, the MissedEvents +/// counter in the LWPCB is incremented, the head pointer is not advanced, and +/// 1 is returned. Otherwise 0 is returned. +#define __lwpins64(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpins64((unsigned long long) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +/// Decrements the LWP programmed value sample event counter. If the result is +/// negative, inserts an event record into the LWP event ring buffer in memory +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPVAL instruction. +/// +/// \param DATA2 +/// A 64-bit value is and inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +#define __lwpval64(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpval64((unsigned long long) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __LWPINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/lzcntintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/lzcntintrin.h new file mode 100644 index 0000000..f4ddce9 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/lzcntintrin.h @@ -0,0 +1,104 @@ +/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __LZCNTINTRIN_H +#define __LZCNTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt"))) + +#ifndef _MSC_VER +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of leading zero +/// bits in the operand. +#define __lzcnt16(X) __builtin_ia32_lzcnt_u16((unsigned short)(X)) +#endif // _MSC_VER + +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of leading zero +/// bits in the operand. +/// \see _lzcnt_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__lzcnt32(unsigned int __X) +{ + return __builtin_ia32_lzcnt_u32(__X); +} + +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of leading zero +/// bits in the operand. +/// \see __lzcnt32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_lzcnt_u32(unsigned int __X) +{ + return __builtin_ia32_lzcnt_u32(__X); +} + +#ifdef __x86_64__ +#ifndef _MSC_VER +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of leading zero +/// bits in the operand. +/// \see _lzcnt_u64 +#define __lzcnt64(X) __builtin_ia32_lzcnt_u64((unsigned long long)(X)) +#endif // _MSC_VER + +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of leading zero +/// bits in the operand. +/// \see __lzcnt64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_lzcnt_u64(unsigned long long __X) +{ + return __builtin_ia32_lzcnt_u64(__X); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __LZCNTINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mm3dnow.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mm3dnow.h new file mode 100644 index 0000000..22ab13a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mm3dnow.h @@ -0,0 +1,157 @@ +/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef _MM3DNOW_H_INCLUDED +#define _MM3DNOW_H_INCLUDED + +#include +#include + +typedef float __v2sf __attribute__((__vector_size__(8))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow"), __min_vector_width__(64))) + +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("3dnow"))) +_m_femms(void) { + __builtin_ia32_femms(); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pavgusb(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pf2id(__m64 __m) { + return (__m64)__builtin_ia32_pf2id((__v2sf)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfacc(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfadd(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfcmpeq(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfcmpge(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfcmpgt(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfmax(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfmin(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfmul(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfrcp(__m64 __m) { + return (__m64)__builtin_ia32_pfrcp((__v2sf)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfrcpit1(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfrcpit2(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfrsqrt(__m64 __m) { + return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfrsqrtit1(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfsub(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfsubr(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pi2fd(__m64 __m) { + return (__m64)__builtin_ia32_pi2fd((__v2si)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pmulhrw(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2); +} + +/* Handle the 3dnowa instructions here. */ +#undef __DEFAULT_FN_ATTRS +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa"), __min_vector_width__(64))) + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pf2iw(__m64 __m) { + return (__m64)__builtin_ia32_pf2iw((__v2sf)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfnacc(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pfpnacc(__m64 __m1, __m64 __m2) { + return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pi2fw(__m64 __m) { + return (__m64)__builtin_ia32_pi2fw((__v2si)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pswapdsf(__m64 __m) { + return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m); +} + +static __inline__ __m64 __DEFAULT_FN_ATTRS +_m_pswapdsi(__m64 __m) { + return (__m64)__builtin_ia32_pswapdsi((__v2si)__m); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mm_malloc.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mm_malloc.h new file mode 100644 index 0000000..933dbaa --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mm_malloc.h @@ -0,0 +1,67 @@ +/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __MM_MALLOC_H +#define __MM_MALLOC_H + +#include + +#ifdef _WIN32 +#include +#else +#ifndef __cplusplus +extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size); +#else +// Some systems (e.g. those with GNU libc) declare posix_memalign with an +// exception specifier. Via an "egregious workaround" in +// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid +// redeclaration of glibc's declaration. +extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size); +#endif +#endif + +#if !(defined(_WIN32) && defined(_mm_malloc)) +static __inline__ void *__attribute__((__always_inline__, __nodebug__, + __malloc__)) +_mm_malloc(size_t __size, size_t __align) +{ + if (__align == 1) { + return malloc(__size); + } + + if (!(__align & (__align - 1)) && __align < sizeof(void *)) + __align = sizeof(void *); + + void *__mallocedMemory; +#if defined(__MINGW32__) + __mallocedMemory = __mingw_aligned_malloc(__size, __align); +#elif defined(_WIN32) + __mallocedMemory = _aligned_malloc(__size, __align); +#else + if (posix_memalign(&__mallocedMemory, __align, __size)) + return 0; +#endif + + return __mallocedMemory; +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_mm_free(void *__p) +{ +#if defined(__MINGW32__) + __mingw_aligned_free(__p); +#elif defined(_WIN32) + _aligned_free(__p); +#else + free(__p); +#endif +} +#endif + +#endif /* __MM_MALLOC_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mmintrin.h new file mode 100644 index 0000000..79a8b55 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mmintrin.h @@ -0,0 +1,1558 @@ +/*===---- mmintrin.h - MMX intrinsics --------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __MMINTRIN_H +#define __MMINTRIN_H + +typedef long long __m64 __attribute__((__vector_size__(8), __aligned__(8))); + +typedef long long __v1di __attribute__((__vector_size__(8))); +typedef int __v2si __attribute__((__vector_size__(8))); +typedef short __v4hi __attribute__((__vector_size__(8))); +typedef char __v8qi __attribute__((__vector_size__(8))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx"), __min_vector_width__(64))) + +/// Clears the MMX state by setting the state of the x87 stack registers +/// to empty. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the EMMS instruction. +/// +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("mmx"))) +_mm_empty(void) +{ + __builtin_ia32_emms(); +} + +/// Constructs a 64-bit integer vector, setting the lower 32 bits to the +/// value of the 32-bit integer parameter and setting the upper 32 bits to 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVD instruction. +/// +/// \param __i +/// A 32-bit integer value. +/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the +/// parameter. The upper 32 bits are set to 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cvtsi32_si64(int __i) +{ + return (__m64)__builtin_ia32_vec_init_v2si(__i, 0); +} + +/// Returns the lower 32 bits of a 64-bit integer vector as a 32-bit +/// signed integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVD instruction. +/// +/// \param __m +/// A 64-bit integer vector. +/// \returns A 32-bit signed integer value containing the lower 32 bits of the +/// parameter. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtsi64_si32(__m64 __m) +{ + return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0); +} + +/// Casts a 64-bit signed integer value into a 64-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVQ instruction. +/// +/// \param __i +/// A 64-bit signed integer. +/// \returns A 64-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cvtsi64_m64(long long __i) +{ + return (__m64)__i; +} + +/// Casts a 64-bit integer vector into a 64-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVQ instruction. +/// +/// \param __m +/// A 64-bit integer vector. +/// \returns A 64-bit signed integer containing the same bitwise pattern as the +/// parameter. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvtm64_si64(__m64 __m) +{ + return (long long)__m; +} + +/// Converts 16-bit signed integers from both 64-bit integer vector +/// parameters of [4 x i16] into 8-bit signed integer values, and constructs +/// a 64-bit integer vector of [8 x i8] as the result. Positive values +/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80 +/// are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKSSWB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit signed integer with +/// saturation. Positive values greater than 0x7F are saturated to 0x7F. +/// Negative values less than 0x80 are saturated to 0x80. The converted +/// [4 x i8] values are written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit signed integer with +/// saturation. Positive values greater than 0x7F are saturated to 0x7F. +/// Negative values less than 0x80 are saturated to 0x80. The converted +/// [4 x i8] values are written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_packs_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); +} + +/// Converts 32-bit signed integers from both 64-bit integer vector +/// parameters of [2 x i32] into 16-bit signed integer values, and constructs +/// a 64-bit integer vector of [4 x i16] as the result. Positive values +/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than +/// 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKSSDW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a +/// 32-bit signed integer and is converted to a 16-bit signed integer with +/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. +/// Negative values less than 0x8000 are saturated to 0x8000. The converted +/// [2 x i16] values are written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a +/// 32-bit signed integer and is converted to a 16-bit signed integer with +/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. +/// Negative values less than 0x8000 are saturated to 0x8000. The converted +/// [2 x i16] values are written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_packs_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); +} + +/// Converts 16-bit signed integers from both 64-bit integer vector +/// parameters of [4 x i16] into 8-bit unsigned integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. Values +/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated +/// to 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKUSWB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0 are saturated to 0. The converted [4 x i8] values are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0 are saturated to 0. The converted [4 x i8] values are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_packs_pu16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2); +} + +/// Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8] +/// and interleaves them into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. \n +/// Bits [39:32] are written to bits [7:0] of the result. \n +/// Bits [47:40] are written to bits [23:16] of the result. \n +/// Bits [55:48] are written to bits [39:32] of the result. \n +/// Bits [63:56] are written to bits [55:48] of the result. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [39:32] are written to bits [15:8] of the result. \n +/// Bits [47:40] are written to bits [31:24] of the result. \n +/// Bits [55:48] are written to bits [47:40] of the result. \n +/// Bits [63:56] are written to bits [63:56] of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpackhi_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2); +} + +/// Unpacks the upper 32 bits from two 64-bit integer vectors of +/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [47:32] are written to bits [15:0] of the result. \n +/// Bits [63:48] are written to bits [47:32] of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [47:32] are written to bits [31:16] of the result. \n +/// Bits [63:48] are written to bits [63:48] of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpackhi_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2); +} + +/// Unpacks the upper 32 bits from two 64-bit integer vectors of +/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHDQ instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpackhi_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2); +} + +/// Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8] +/// and interleaves them into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [7:0] are written to bits [7:0] of the result. \n +/// Bits [15:8] are written to bits [23:16] of the result. \n +/// Bits [23:16] are written to bits [39:32] of the result. \n +/// Bits [31:24] are written to bits [55:48] of the result. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [7:0] are written to bits [15:8] of the result. \n +/// Bits [15:8] are written to bits [31:24] of the result. \n +/// Bits [23:16] are written to bits [47:40] of the result. \n +/// Bits [31:24] are written to bits [63:56] of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpacklo_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2); +} + +/// Unpacks the lower 32 bits from two 64-bit integer vectors of +/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [15:0] are written to bits [15:0] of the result. \n +/// Bits [31:16] are written to bits [47:32] of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [15:0] are written to bits [31:16] of the result. \n +/// Bits [31:16] are written to bits [63:48] of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpacklo_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2); +} + +/// Unpacks the lower 32 bits from two 64-bit integer vectors of +/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLDQ instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpacklo_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2); +} + +/// Adds each 8-bit integer element of the first 64-bit integer vector +/// of [8 x i8] to the corresponding 8-bit integer element of the second +/// 64-bit integer vector of [8 x i8]. The lower 8 bits of the results are +/// packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_add_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Adds each 16-bit integer element of the first 64-bit integer vector +/// of [4 x i16] to the corresponding 16-bit integer element of the second +/// 64-bit integer vector of [4 x i16]. The lower 16 bits of the results are +/// packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_add_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Adds each 32-bit integer element of the first 64-bit integer vector +/// of [2 x i32] to the corresponding 32-bit integer element of the second +/// 64-bit integer vector of [2 x i32]. The lower 32 bits of the results are +/// packed into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_add_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); +} + +/// Adds each 8-bit signed integer element of the first 64-bit integer +/// vector of [8 x i8] to the corresponding 8-bit signed integer element of +/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than +/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to +/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums +/// of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Adds each 16-bit signed integer element of the first 64-bit integer +/// vector of [4 x i16] to the corresponding 16-bit signed integer element of +/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than +/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are +/// saturated to 0x8000. The results are packed into a 64-bit integer vector +/// of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums +/// of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Adds each 8-bit unsigned integer element of the first 64-bit integer +/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of +/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are +/// saturated to 0xFF. The results are packed into a 64-bit integer vector of +/// [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDUSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// unsigned sums of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pu8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Adds each 16-bit unsigned integer element of the first 64-bit integer +/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element +/// of the second 64-bit integer vector of [4 x i16]. Sums greater than +/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDUSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// unsigned sums of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pu16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Subtracts each 8-bit integer element of the second 64-bit integer +/// vector of [8 x i8] from the corresponding 8-bit integer element of the +/// first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results +/// are packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the differences of +/// both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sub_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Subtracts each 16-bit integer element of the second 64-bit integer +/// vector of [4 x i16] from the corresponding 16-bit integer element of the +/// first 64-bit integer vector of [4 x i16]. The lower 16 bits of the +/// results are packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the differences of +/// both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sub_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Subtracts each 32-bit integer element of the second 64-bit integer +/// vector of [2 x i32] from the corresponding 32-bit integer element of the +/// first 64-bit integer vector of [2 x i32]. The lower 32 bits of the +/// results are packed into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32] containing the subtrahends. +/// \returns A 64-bit integer vector of [2 x i32] containing the differences of +/// both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sub_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); +} + +/// Subtracts each 8-bit signed integer element of the second 64-bit +/// integer vector of [8 x i8] from the corresponding 8-bit signed integer +/// element of the first 64-bit integer vector of [8 x i8]. Positive results +/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80 +/// are saturated to 0x80. The results are packed into a 64-bit integer +/// vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Subtracts each 16-bit signed integer element of the second 64-bit +/// integer vector of [4 x i16] from the corresponding 16-bit signed integer +/// element of the first 64-bit integer vector of [4 x i16]. Positive results +/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than +/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Subtracts each 8-bit unsigned integer element of the second 64-bit +/// integer vector of [8 x i8] from the corresponding 8-bit unsigned integer +/// element of the first 64-bit integer vector of [8 x i8]. +/// +/// If an element of the first vector is less than the corresponding element +/// of the second vector, the result is saturated to 0. The results are +/// packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBUSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pu8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Subtracts each 16-bit unsigned integer element of the second 64-bit +/// integer vector of [4 x i16] from the corresponding 16-bit unsigned +/// integer element of the first 64-bit integer vector of [4 x i16]. +/// +/// If an element of the first vector is less than the corresponding element +/// of the second vector, the result is saturated to 0. The results are +/// packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBUSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pu16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16] and get four +/// 32-bit products. Adds adjacent pairs of products to get two 32-bit sums. +/// The lower 32 bits of these two sums are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// For example, bits [15:0] of both parameters are multiplied, bits [31:16] +/// of both parameters are multiplied, and the sum of both results is written +/// to bits [31:0] of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMADDWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [2 x i32] containing the sums of +/// products of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_madd_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2); +} + +/// Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16]. Packs the upper +/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULHW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits +/// of the products of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_mulhi_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16]. Packs the lower +/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULLW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits +/// of the products of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_mullo_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Left-shifts each 16-bit signed integer element of the first +/// parameter, which is a 64-bit integer vector of [4 x i16], by the number +/// of bits specified by the second parameter, which is a 64-bit integer. The +/// lower 16 bits of the results are packed into a 64-bit integer vector of +/// [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted +/// values. If \a __count is greater or equal to 16, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sll_pi16(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count); +} + +/// Left-shifts each 16-bit signed integer element of a 64-bit integer +/// vector of [4 x i16] by the number of bits specified by a 32-bit integer. +/// The lower 16 bits of the results are packed into a 64-bit integer vector +/// of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted +/// values. If \a __count is greater or equal to 16, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_slli_pi16(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count); +} + +/// Left-shifts each 32-bit signed integer element of the first +/// parameter, which is a 64-bit integer vector of [2 x i32], by the number +/// of bits specified by the second parameter, which is a 64-bit integer. The +/// lower 32 bits of the results are packed into a 64-bit integer vector of +/// [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted +/// values. If \a __count is greater or equal to 32, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sll_pi32(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_pslld((__v2si)__m, __count); +} + +/// Left-shifts each 32-bit signed integer element of a 64-bit integer +/// vector of [2 x i32] by the number of bits specified by a 32-bit integer. +/// The lower 32 bits of the results are packed into a 64-bit integer vector +/// of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted +/// values. If \a __count is greater or equal to 32, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_slli_pi32(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count); +} + +/// Left-shifts the first 64-bit integer parameter by the number of bits +/// specified by the second 64-bit integer parameter. The lower 64 bits of +/// result are returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector containing the left-shifted value. If +/// \a __count is greater or equal to 64, the result is set to 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sll_si64(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psllq((__v1di)__m, __count); +} + +/// Left-shifts the first parameter, which is a 64-bit integer, by the +/// number of bits specified by the second parameter, which is a 32-bit +/// integer. The lower 64 bits of result are returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector containing the left-shifted value. If +/// \a __count is greater or equal to 64, the result is set to 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_slli_si64(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count); +} + +/// Right-shifts each 16-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [4 x i16], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 16-bit element. The 16-bit results are packed into a 64-bit integer +/// vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sra_pi16(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count); +} + +/// Right-shifts each 16-bit integer element of a 64-bit integer vector +/// of [4 x i16] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 16-bit element. The 16-bit results are packed into a 64-bit integer +/// vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srai_pi16(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count); +} + +/// Right-shifts each 32-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [2 x i32], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 32-bit element. The 32-bit results are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sra_pi32(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrad((__v2si)__m, __count); +} + +/// Right-shifts each 32-bit integer element of a 64-bit integer vector +/// of [2 x i32] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 32-bit element. The 32-bit results are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srai_pi32(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psradi((__v2si)__m, __count); +} + +/// Right-shifts each 16-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [4 x i16], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are cleared. The 16-bit results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srl_pi16(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count); +} + +/// Right-shifts each 16-bit integer element of a 64-bit integer vector +/// of [4 x i16] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are cleared. The 16-bit results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srli_pi16(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count); +} + +/// Right-shifts each 32-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [2 x i32], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are cleared. The 32-bit results are packed into a 64-bit +/// integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srl_pi32(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrld((__v2si)__m, __count); +} + +/// Right-shifts each 32-bit integer element of a 64-bit integer vector +/// of [2 x i32] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are cleared. The 32-bit results are packed into a 64-bit +/// integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srli_pi32(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count); +} + +/// Right-shifts the first 64-bit integer parameter by the number of bits +/// specified by the second 64-bit integer parameter. +/// +/// High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector containing the right-shifted value. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srl_si64(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count); +} + +/// Right-shifts the first parameter, which is a 64-bit integer, by the +/// number of bits specified by the second parameter, which is a 32-bit +/// integer. +/// +/// High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector containing the right-shifted value. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srli_si64(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count); +} + +/// Performs a bitwise AND of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAND instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise AND of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_and_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2); +} + +/// Performs a bitwise NOT of the first 64-bit integer vector, and then +/// performs a bitwise AND of the intermediate result and the second 64-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PANDN instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. The one's complement of this parameter is used +/// in the bitwise AND. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise AND of the second +/// parameter and the one's complement of the first parameter. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_andnot_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2); +} + +/// Performs a bitwise OR of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POR instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise OR of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_or_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2); +} + +/// Performs a bitwise exclusive OR of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PXOR instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_xor_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2); +} + +/// Compares the 8-bit integer elements of two 64-bit integer vectors of +/// [8 x i8] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. +/// +/// The comparison yields 0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpeq_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Compares the 16-bit integer elements of two 64-bit integer vectors of +/// [4 x i16] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. +/// +/// The comparison yields 0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpeq_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Compares the 32-bit integer elements of two 64-bit integer vectors of +/// [2 x i32] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. +/// +/// The comparison yields 0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpeq_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2); +} + +/// Compares the 8-bit integer elements of two 64-bit integer vectors of +/// [8 x i8] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. +/// +/// The comparison yields 0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpgt_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Compares the 16-bit integer elements of two 64-bit integer vectors of +/// [4 x i16] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. +/// +/// The comparison yields 0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpgt_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Compares the 32-bit integer elements of two 64-bit integer vectors of +/// [2 x i32] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. +/// +/// The comparison yields 0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpgt_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2); +} + +/// Constructs a 64-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PXOR instruction. +/// +/// \returns An initialized 64-bit integer vector with all elements set to zero. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setzero_si64(void) +{ + return __extension__ (__m64){ 0LL }; +} + +/// Constructs a 64-bit integer vector initialized with the specified +/// 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i1 +/// A 32-bit integer value used to initialize the upper 32 bits of the +/// result. +/// \param __i0 +/// A 32-bit integer value used to initialize the lower 32 bits of the +/// result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set_pi32(int __i1, int __i0) +{ + return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1); +} + +/// Constructs a 64-bit integer vector initialized with the specified +/// 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __s3 +/// A 16-bit integer value used to initialize bits [63:48] of the result. +/// \param __s2 +/// A 16-bit integer value used to initialize bits [47:32] of the result. +/// \param __s1 +/// A 16-bit integer value used to initialize bits [31:16] of the result. +/// \param __s0 +/// A 16-bit integer value used to initialize bits [15:0] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set_pi16(short __s3, short __s2, short __s1, short __s0) +{ + return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3); +} + +/// Constructs a 64-bit integer vector initialized with the specified +/// 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b7 +/// An 8-bit integer value used to initialize bits [63:56] of the result. +/// \param __b6 +/// An 8-bit integer value used to initialize bits [55:48] of the result. +/// \param __b5 +/// An 8-bit integer value used to initialize bits [47:40] of the result. +/// \param __b4 +/// An 8-bit integer value used to initialize bits [39:32] of the result. +/// \param __b3 +/// An 8-bit integer value used to initialize bits [31:24] of the result. +/// \param __b2 +/// An 8-bit integer value used to initialize bits [23:16] of the result. +/// \param __b1 +/// An 8-bit integer value used to initialize bits [15:8] of the result. +/// \param __b0 +/// An 8-bit integer value used to initialize bits [7:0] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, + char __b1, char __b0) +{ + return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); +} + +/// Constructs a 64-bit integer vector of [2 x i32], with each of the +/// 32-bit integer vector elements set to the specified 32-bit integer +/// value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i +/// A 32-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [2 x i32]. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set1_pi32(int __i) +{ + return _mm_set_pi32(__i, __i); +} + +/// Constructs a 64-bit integer vector of [4 x i16], with each of the +/// 16-bit integer vector elements set to the specified 16-bit integer +/// value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w +/// A 16-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [4 x i16]. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set1_pi16(short __w) +{ + return _mm_set_pi16(__w, __w, __w, __w); +} + +/// Constructs a 64-bit integer vector of [8 x i8], with each of the +/// 8-bit integer vector elements set to the specified 8-bit integer value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b +/// An 8-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [8 x i8]. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set1_pi8(char __b) +{ + return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b); +} + +/// Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integer value used to initialize the lower 32 bits of the +/// result. +/// \param __i1 +/// A 32-bit integer value used to initialize the upper 32 bits of the +/// result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setr_pi32(int __i0, int __i1) +{ + return _mm_set_pi32(__i1, __i0); +} + +/// Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w0 +/// A 16-bit integer value used to initialize bits [15:0] of the result. +/// \param __w1 +/// A 16-bit integer value used to initialize bits [31:16] of the result. +/// \param __w2 +/// A 16-bit integer value used to initialize bits [47:32] of the result. +/// \param __w3 +/// A 16-bit integer value used to initialize bits [63:48] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16(__w3, __w2, __w1, __w0); +} + +/// Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b0 +/// An 8-bit integer value used to initialize bits [7:0] of the result. +/// \param __b1 +/// An 8-bit integer value used to initialize bits [15:8] of the result. +/// \param __b2 +/// An 8-bit integer value used to initialize bits [23:16] of the result. +/// \param __b3 +/// An 8-bit integer value used to initialize bits [31:24] of the result. +/// \param __b4 +/// An 8-bit integer value used to initialize bits [39:32] of the result. +/// \param __b5 +/// An 8-bit integer value used to initialize bits [47:40] of the result. +/// \param __b6 +/// An 8-bit integer value used to initialize bits [55:48] of the result. +/// \param __b7 +/// An 8-bit integer value used to initialize bits [63:56] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, + char __b6, char __b7) +{ + return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +#undef __DEFAULT_FN_ATTRS + +/* Aliases for compatibility. */ +#define _m_empty _mm_empty +#define _m_from_int _mm_cvtsi32_si64 +#define _m_from_int64 _mm_cvtsi64_m64 +#define _m_to_int _mm_cvtsi64_si32 +#define _m_to_int64 _mm_cvtm64_si64 +#define _m_packsswb _mm_packs_pi16 +#define _m_packssdw _mm_packs_pi32 +#define _m_packuswb _mm_packs_pu16 +#define _m_punpckhbw _mm_unpackhi_pi8 +#define _m_punpckhwd _mm_unpackhi_pi16 +#define _m_punpckhdq _mm_unpackhi_pi32 +#define _m_punpcklbw _mm_unpacklo_pi8 +#define _m_punpcklwd _mm_unpacklo_pi16 +#define _m_punpckldq _mm_unpacklo_pi32 +#define _m_paddb _mm_add_pi8 +#define _m_paddw _mm_add_pi16 +#define _m_paddd _mm_add_pi32 +#define _m_paddsb _mm_adds_pi8 +#define _m_paddsw _mm_adds_pi16 +#define _m_paddusb _mm_adds_pu8 +#define _m_paddusw _mm_adds_pu16 +#define _m_psubb _mm_sub_pi8 +#define _m_psubw _mm_sub_pi16 +#define _m_psubd _mm_sub_pi32 +#define _m_psubsb _mm_subs_pi8 +#define _m_psubsw _mm_subs_pi16 +#define _m_psubusb _mm_subs_pu8 +#define _m_psubusw _mm_subs_pu16 +#define _m_pmaddwd _mm_madd_pi16 +#define _m_pmulhw _mm_mulhi_pi16 +#define _m_pmullw _mm_mullo_pi16 +#define _m_psllw _mm_sll_pi16 +#define _m_psllwi _mm_slli_pi16 +#define _m_pslld _mm_sll_pi32 +#define _m_pslldi _mm_slli_pi32 +#define _m_psllq _mm_sll_si64 +#define _m_psllqi _mm_slli_si64 +#define _m_psraw _mm_sra_pi16 +#define _m_psrawi _mm_srai_pi16 +#define _m_psrad _mm_sra_pi32 +#define _m_psradi _mm_srai_pi32 +#define _m_psrlw _mm_srl_pi16 +#define _m_psrlwi _mm_srli_pi16 +#define _m_psrld _mm_srl_pi32 +#define _m_psrldi _mm_srli_pi32 +#define _m_psrlq _mm_srl_si64 +#define _m_psrlqi _mm_srli_si64 +#define _m_pand _mm_and_si64 +#define _m_pandn _mm_andnot_si64 +#define _m_por _mm_or_si64 +#define _m_pxor _mm_xor_si64 +#define _m_pcmpeqb _mm_cmpeq_pi8 +#define _m_pcmpeqw _mm_cmpeq_pi16 +#define _m_pcmpeqd _mm_cmpeq_pi32 +#define _m_pcmpgtb _mm_cmpgt_pi8 +#define _m_pcmpgtw _mm_cmpgt_pi16 +#define _m_pcmpgtd _mm_cmpgt_pi32 + +#endif /* __MMINTRIN_H */ + diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/module.modulemap b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/module.modulemap new file mode 100644 index 0000000..6894672 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/module.modulemap @@ -0,0 +1,164 @@ +/*===---- module.modulemap - intrinsics module map -------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +module _Builtin_intrinsics [system] [extern_c] { + explicit module altivec { + requires altivec + header "altivec.h" + } + + explicit module arm { + requires arm + + explicit module acle { + header "arm_acle.h" + export * + } + + explicit module neon { + requires neon + header "arm_neon.h" + header "arm_fp16.h" + export * + } + + explicit module sve { + requires sve + header "arm_sve.h" + export * + } + } + + explicit module intel { + requires x86 + export * + + header "immintrin.h" + textual header "f16cintrin.h" + textual header "avxintrin.h" + textual header "avx2intrin.h" + textual header "avx512fintrin.h" + textual header "avx512erintrin.h" + textual header "fmaintrin.h" + + header "x86intrin.h" + textual header "bmiintrin.h" + textual header "bmi2intrin.h" + textual header "lzcntintrin.h" + textual header "xopintrin.h" + textual header "fma4intrin.h" + textual header "mwaitxintrin.h" + textual header "clzerointrin.h" + textual header "wbnoinvdintrin.h" + textual header "cldemoteintrin.h" + textual header "waitpkgintrin.h" + textual header "movdirintrin.h" + textual header "pconfigintrin.h" + textual header "sgxintrin.h" + textual header "ptwriteintrin.h" + textual header "invpcidintrin.h" + + textual header "__wmmintrin_aes.h" + textual header "__wmmintrin_pclmul.h" + + explicit module mm_malloc { + requires !freestanding + header "mm_malloc.h" + export * // note: for dependency + } + + explicit module cpuid { + requires gnuinlineasm + header "cpuid.h" + } + + explicit module mmx { + header "mmintrin.h" + } + + explicit module sse { + export mm_malloc + export mmx + export sse2 // note: for hackish dependency + header "xmmintrin.h" + } + + explicit module sse2 { + export sse + header "emmintrin.h" + } + + explicit module sse3 { + export sse2 + header "pmmintrin.h" + } + + explicit module ssse3 { + export sse3 + header "tmmintrin.h" + } + + explicit module sse4_1 { + export ssse3 + header "smmintrin.h" + } + + explicit module sse4_2 { + export sse4_1 + header "nmmintrin.h" + } + + explicit module sse4a { + export sse3 + header "ammintrin.h" + } + + explicit module popcnt { + header "popcntintrin.h" + } + + explicit module mm3dnow { + header "mm3dnow.h" + } + + explicit module aes_pclmul { + header "wmmintrin.h" + export aes + export pclmul + } + } + + explicit module systemz { + requires systemz + export * + + header "s390intrin.h" + + explicit module htm { + requires htm + header "htmintrin.h" + header "htmxlintrin.h" + } + + explicit module zvector { + requires zvector, vx + header "vecintrin.h" + } + } +} + +module _Builtin_stddef_max_align_t [system] [extern_c] { + header "__stddef_max_align_t.h" +} + +module opencl_c { + requires opencl + header "opencl-c.h" + header "opencl-c-base.h" +} diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/movdirintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/movdirintrin.h new file mode 100644 index 0000000..30c4d02 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/movdirintrin.h @@ -0,0 +1,49 @@ +/*===------------------------- movdirintrin.h ------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _MOVDIRINTRIN_H +#define _MOVDIRINTRIN_H + +/* Move doubleword as direct store */ +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("movdiri"))) +_directstoreu_u32 (void *__dst, unsigned int __value) +{ + __builtin_ia32_directstore_u32((unsigned int *)__dst, (unsigned int)__value); +} + +#ifdef __x86_64__ + +/* Move quadword as direct store */ +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("movdiri"))) +_directstoreu_u64 (void *__dst, unsigned long __value) +{ + __builtin_ia32_directstore_u64((unsigned long *)__dst, __value); +} + +#endif /* __x86_64__ */ + +/* + * movdir64b - Move 64 bytes as direct store. + * The destination must be 64 byte aligned, and the store is atomic. + * The source address has no alignment requirement, and the load from + * the source address is not atomic. + */ +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("movdir64b"))) +_movdir64b (void *__dst __attribute__((align_value(64))), const void *__src) +{ + __builtin_ia32_movdir64b(__dst, __src); +} + +#endif /* _MOVDIRINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/msa.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/msa.h new file mode 100644 index 0000000..0ca4900 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/msa.h @@ -0,0 +1,573 @@ +/*===---- msa.h - MIPS MSA intrinsics --------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef _MSA_H +#define _MSA_H 1 + +#if defined(__mips_msa) +typedef signed char v16i8 __attribute__((vector_size(16), aligned(16))); +typedef signed char v16i8_b __attribute__((vector_size(16), aligned(1))); +typedef unsigned char v16u8 __attribute__((vector_size(16), aligned(16))); +typedef unsigned char v16u8_b __attribute__((vector_size(16), aligned(1))); +typedef short v8i16 __attribute__((vector_size(16), aligned(16))); +typedef short v8i16_h __attribute__((vector_size(16), aligned(2))); +typedef unsigned short v8u16 __attribute__((vector_size(16), aligned(16))); +typedef unsigned short v8u16_h __attribute__((vector_size(16), aligned(2))); +typedef int v4i32 __attribute__((vector_size(16), aligned(16))); +typedef int v4i32_w __attribute__((vector_size(16), aligned(4))); +typedef unsigned int v4u32 __attribute__((vector_size(16), aligned(16))); +typedef unsigned int v4u32_w __attribute__((vector_size(16), aligned(4))); +typedef long long v2i64 __attribute__((vector_size(16), aligned(16))); +typedef long long v2i64_d __attribute__((vector_size(16), aligned(8))); +typedef unsigned long long v2u64 __attribute__((vector_size(16), aligned(16))); +typedef unsigned long long v2u64_d __attribute__((vector_size(16), aligned(8))); +typedef float v4f32 __attribute__((vector_size(16), aligned(16))); +typedef float v4f32_w __attribute__((vector_size(16), aligned(4))); +typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); +typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); + +#define __msa_sll_b __builtin_msa_sll_b +#define __msa_sll_h __builtin_msa_sll_h +#define __msa_sll_w __builtin_msa_sll_w +#define __msa_sll_d __builtin_msa_sll_d +#define __msa_slli_b __builtin_msa_slli_b +#define __msa_slli_h __builtin_msa_slli_h +#define __msa_slli_w __builtin_msa_slli_w +#define __msa_slli_d __builtin_msa_slli_d +#define __msa_sra_b __builtin_msa_sra_b +#define __msa_sra_h __builtin_msa_sra_h +#define __msa_sra_w __builtin_msa_sra_w +#define __msa_sra_d __builtin_msa_sra_d +#define __msa_srai_b __builtin_msa_srai_b +#define __msa_srai_h __builtin_msa_srai_h +#define __msa_srai_w __builtin_msa_srai_w +#define __msa_srai_d __builtin_msa_srai_d +#define __msa_srar_b __builtin_msa_srar_b +#define __msa_srar_h __builtin_msa_srar_h +#define __msa_srar_w __builtin_msa_srar_w +#define __msa_srar_d __builtin_msa_srar_d +#define __msa_srari_b __builtin_msa_srari_b +#define __msa_srari_h __builtin_msa_srari_h +#define __msa_srari_w __builtin_msa_srari_w +#define __msa_srari_d __builtin_msa_srari_d +#define __msa_srl_b __builtin_msa_srl_b +#define __msa_srl_h __builtin_msa_srl_h +#define __msa_srl_w __builtin_msa_srl_w +#define __msa_srl_d __builtin_msa_srl_d +#define __msa_srli_b __builtin_msa_srli_b +#define __msa_srli_h __builtin_msa_srli_h +#define __msa_srli_w __builtin_msa_srli_w +#define __msa_srli_d __builtin_msa_srli_d +#define __msa_srlr_b __builtin_msa_srlr_b +#define __msa_srlr_h __builtin_msa_srlr_h +#define __msa_srlr_w __builtin_msa_srlr_w +#define __msa_srlr_d __builtin_msa_srlr_d +#define __msa_srlri_b __builtin_msa_srlri_b +#define __msa_srlri_h __builtin_msa_srlri_h +#define __msa_srlri_w __builtin_msa_srlri_w +#define __msa_srlri_d __builtin_msa_srlri_d +#define __msa_bclr_b __builtin_msa_bclr_b +#define __msa_bclr_h __builtin_msa_bclr_h +#define __msa_bclr_w __builtin_msa_bclr_w +#define __msa_bclr_d __builtin_msa_bclr_d +#define __msa_bclri_b __builtin_msa_bclri_b +#define __msa_bclri_h __builtin_msa_bclri_h +#define __msa_bclri_w __builtin_msa_bclri_w +#define __msa_bclri_d __builtin_msa_bclri_d +#define __msa_bset_b __builtin_msa_bset_b +#define __msa_bset_h __builtin_msa_bset_h +#define __msa_bset_w __builtin_msa_bset_w +#define __msa_bset_d __builtin_msa_bset_d +#define __msa_bseti_b __builtin_msa_bseti_b +#define __msa_bseti_h __builtin_msa_bseti_h +#define __msa_bseti_w __builtin_msa_bseti_w +#define __msa_bseti_d __builtin_msa_bseti_d +#define __msa_bneg_b __builtin_msa_bneg_b +#define __msa_bneg_h __builtin_msa_bneg_h +#define __msa_bneg_w __builtin_msa_bneg_w +#define __msa_bneg_d __builtin_msa_bneg_d +#define __msa_bnegi_b __builtin_msa_bnegi_b +#define __msa_bnegi_h __builtin_msa_bnegi_h +#define __msa_bnegi_w __builtin_msa_bnegi_w +#define __msa_bnegi_d __builtin_msa_bnegi_d +#define __msa_binsl_b __builtin_msa_binsl_b +#define __msa_binsl_h __builtin_msa_binsl_h +#define __msa_binsl_w __builtin_msa_binsl_w +#define __msa_binsl_d __builtin_msa_binsl_d +#define __msa_binsli_b __builtin_msa_binsli_b +#define __msa_binsli_h __builtin_msa_binsli_h +#define __msa_binsli_w __builtin_msa_binsli_w +#define __msa_binsli_d __builtin_msa_binsli_d +#define __msa_binsr_b __builtin_msa_binsr_b +#define __msa_binsr_h __builtin_msa_binsr_h +#define __msa_binsr_w __builtin_msa_binsr_w +#define __msa_binsr_d __builtin_msa_binsr_d +#define __msa_binsri_b __builtin_msa_binsri_b +#define __msa_binsri_h __builtin_msa_binsri_h +#define __msa_binsri_w __builtin_msa_binsri_w +#define __msa_binsri_d __builtin_msa_binsri_d +#define __msa_addv_b __builtin_msa_addv_b +#define __msa_addv_h __builtin_msa_addv_h +#define __msa_addv_w __builtin_msa_addv_w +#define __msa_addv_d __builtin_msa_addv_d +#define __msa_addvi_b __builtin_msa_addvi_b +#define __msa_addvi_h __builtin_msa_addvi_h +#define __msa_addvi_w __builtin_msa_addvi_w +#define __msa_addvi_d __builtin_msa_addvi_d +#define __msa_subv_b __builtin_msa_subv_b +#define __msa_subv_h __builtin_msa_subv_h +#define __msa_subv_w __builtin_msa_subv_w +#define __msa_subv_d __builtin_msa_subv_d +#define __msa_subvi_b __builtin_msa_subvi_b +#define __msa_subvi_h __builtin_msa_subvi_h +#define __msa_subvi_w __builtin_msa_subvi_w +#define __msa_subvi_d __builtin_msa_subvi_d +#define __msa_max_s_b __builtin_msa_max_s_b +#define __msa_max_s_h __builtin_msa_max_s_h +#define __msa_max_s_w __builtin_msa_max_s_w +#define __msa_max_s_d __builtin_msa_max_s_d +#define __msa_maxi_s_b __builtin_msa_maxi_s_b +#define __msa_maxi_s_h __builtin_msa_maxi_s_h +#define __msa_maxi_s_w __builtin_msa_maxi_s_w +#define __msa_maxi_s_d __builtin_msa_maxi_s_d +#define __msa_max_u_b __builtin_msa_max_u_b +#define __msa_max_u_h __builtin_msa_max_u_h +#define __msa_max_u_w __builtin_msa_max_u_w +#define __msa_max_u_d __builtin_msa_max_u_d +#define __msa_maxi_u_b __builtin_msa_maxi_u_b +#define __msa_maxi_u_h __builtin_msa_maxi_u_h +#define __msa_maxi_u_w __builtin_msa_maxi_u_w +#define __msa_maxi_u_d __builtin_msa_maxi_u_d +#define __msa_min_s_b __builtin_msa_min_s_b +#define __msa_min_s_h __builtin_msa_min_s_h +#define __msa_min_s_w __builtin_msa_min_s_w +#define __msa_min_s_d __builtin_msa_min_s_d +#define __msa_mini_s_b __builtin_msa_mini_s_b +#define __msa_mini_s_h __builtin_msa_mini_s_h +#define __msa_mini_s_w __builtin_msa_mini_s_w +#define __msa_mini_s_d __builtin_msa_mini_s_d +#define __msa_min_u_b __builtin_msa_min_u_b +#define __msa_min_u_h __builtin_msa_min_u_h +#define __msa_min_u_w __builtin_msa_min_u_w +#define __msa_min_u_d __builtin_msa_min_u_d +#define __msa_mini_u_b __builtin_msa_mini_u_b +#define __msa_mini_u_h __builtin_msa_mini_u_h +#define __msa_mini_u_w __builtin_msa_mini_u_w +#define __msa_mini_u_d __builtin_msa_mini_u_d +#define __msa_max_a_b __builtin_msa_max_a_b +#define __msa_max_a_h __builtin_msa_max_a_h +#define __msa_max_a_w __builtin_msa_max_a_w +#define __msa_max_a_d __builtin_msa_max_a_d +#define __msa_min_a_b __builtin_msa_min_a_b +#define __msa_min_a_h __builtin_msa_min_a_h +#define __msa_min_a_w __builtin_msa_min_a_w +#define __msa_min_a_d __builtin_msa_min_a_d +#define __msa_ceq_b __builtin_msa_ceq_b +#define __msa_ceq_h __builtin_msa_ceq_h +#define __msa_ceq_w __builtin_msa_ceq_w +#define __msa_ceq_d __builtin_msa_ceq_d +#define __msa_ceqi_b __builtin_msa_ceqi_b +#define __msa_ceqi_h __builtin_msa_ceqi_h +#define __msa_ceqi_w __builtin_msa_ceqi_w +#define __msa_ceqi_d __builtin_msa_ceqi_d +#define __msa_clt_s_b __builtin_msa_clt_s_b +#define __msa_clt_s_h __builtin_msa_clt_s_h +#define __msa_clt_s_w __builtin_msa_clt_s_w +#define __msa_clt_s_d __builtin_msa_clt_s_d +#define __msa_clti_s_b __builtin_msa_clti_s_b +#define __msa_clti_s_h __builtin_msa_clti_s_h +#define __msa_clti_s_w __builtin_msa_clti_s_w +#define __msa_clti_s_d __builtin_msa_clti_s_d +#define __msa_clt_u_b __builtin_msa_clt_u_b +#define __msa_clt_u_h __builtin_msa_clt_u_h +#define __msa_clt_u_w __builtin_msa_clt_u_w +#define __msa_clt_u_d __builtin_msa_clt_u_d +#define __msa_clti_u_b __builtin_msa_clti_u_b +#define __msa_clti_u_h __builtin_msa_clti_u_h +#define __msa_clti_u_w __builtin_msa_clti_u_w +#define __msa_clti_u_d __builtin_msa_clti_u_d +#define __msa_cle_s_b __builtin_msa_cle_s_b +#define __msa_cle_s_h __builtin_msa_cle_s_h +#define __msa_cle_s_w __builtin_msa_cle_s_w +#define __msa_cle_s_d __builtin_msa_cle_s_d +#define __msa_clei_s_b __builtin_msa_clei_s_b +#define __msa_clei_s_h __builtin_msa_clei_s_h +#define __msa_clei_s_w __builtin_msa_clei_s_w +#define __msa_clei_s_d __builtin_msa_clei_s_d +#define __msa_cle_u_b __builtin_msa_cle_u_b +#define __msa_cle_u_h __builtin_msa_cle_u_h +#define __msa_cle_u_w __builtin_msa_cle_u_w +#define __msa_cle_u_d __builtin_msa_cle_u_d +#define __msa_clei_u_b __builtin_msa_clei_u_b +#define __msa_clei_u_h __builtin_msa_clei_u_h +#define __msa_clei_u_w __builtin_msa_clei_u_w +#define __msa_clei_u_d __builtin_msa_clei_u_d +#define __msa_ld_b __builtin_msa_ld_b +#define __msa_ld_h __builtin_msa_ld_h +#define __msa_ld_w __builtin_msa_ld_w +#define __msa_ld_d __builtin_msa_ld_d +#define __msa_ldr_d __builtin_msa_ldr_d +#define __msa_ldr_w __builtin_msa_ldrq_w +#define __msa_st_b __builtin_msa_st_b +#define __msa_st_h __builtin_msa_st_h +#define __msa_st_w __builtin_msa_st_w +#define __msa_st_d __builtin_msa_st_d +#define __msa_str_d __builtin_msa_str_d +#define __msa_str_w __builtin_msa_strq_w +#define __msa_sat_s_b __builtin_msa_sat_s_b +#define __msa_sat_s_h __builtin_msa_sat_s_h +#define __msa_sat_s_w __builtin_msa_sat_s_w +#define __msa_sat_s_d __builtin_msa_sat_s_d +#define __msa_sat_u_b __builtin_msa_sat_u_b +#define __msa_sat_u_h __builtin_msa_sat_u_h +#define __msa_sat_u_w __builtin_msa_sat_u_w +#define __msa_sat_u_d __builtin_msa_sat_u_d +#define __msa_add_a_b __builtin_msa_add_a_b +#define __msa_add_a_h __builtin_msa_add_a_h +#define __msa_add_a_w __builtin_msa_add_a_w +#define __msa_add_a_d __builtin_msa_add_a_d +#define __msa_adds_a_b __builtin_msa_adds_a_b +#define __msa_adds_a_h __builtin_msa_adds_a_h +#define __msa_adds_a_w __builtin_msa_adds_a_w +#define __msa_adds_a_d __builtin_msa_adds_a_d +#define __msa_adds_s_b __builtin_msa_adds_s_b +#define __msa_adds_s_h __builtin_msa_adds_s_h +#define __msa_adds_s_w __builtin_msa_adds_s_w +#define __msa_adds_s_d __builtin_msa_adds_s_d +#define __msa_adds_u_b __builtin_msa_adds_u_b +#define __msa_adds_u_h __builtin_msa_adds_u_h +#define __msa_adds_u_w __builtin_msa_adds_u_w +#define __msa_adds_u_d __builtin_msa_adds_u_d +#define __msa_ave_s_b __builtin_msa_ave_s_b +#define __msa_ave_s_h __builtin_msa_ave_s_h +#define __msa_ave_s_w __builtin_msa_ave_s_w +#define __msa_ave_s_d __builtin_msa_ave_s_d +#define __msa_ave_u_b __builtin_msa_ave_u_b +#define __msa_ave_u_h __builtin_msa_ave_u_h +#define __msa_ave_u_w __builtin_msa_ave_u_w +#define __msa_ave_u_d __builtin_msa_ave_u_d +#define __msa_aver_s_b __builtin_msa_aver_s_b +#define __msa_aver_s_h __builtin_msa_aver_s_h +#define __msa_aver_s_w __builtin_msa_aver_s_w +#define __msa_aver_s_d __builtin_msa_aver_s_d +#define __msa_aver_u_b __builtin_msa_aver_u_b +#define __msa_aver_u_h __builtin_msa_aver_u_h +#define __msa_aver_u_w __builtin_msa_aver_u_w +#define __msa_aver_u_d __builtin_msa_aver_u_d +#define __msa_subs_s_b __builtin_msa_subs_s_b +#define __msa_subs_s_h __builtin_msa_subs_s_h +#define __msa_subs_s_w __builtin_msa_subs_s_w +#define __msa_subs_s_d __builtin_msa_subs_s_d +#define __msa_subs_u_b __builtin_msa_subs_u_b +#define __msa_subs_u_h __builtin_msa_subs_u_h +#define __msa_subs_u_w __builtin_msa_subs_u_w +#define __msa_subs_u_d __builtin_msa_subs_u_d +#define __msa_subsuu_s_b __builtin_msa_subsuu_s_b +#define __msa_subsuu_s_h __builtin_msa_subsuu_s_h +#define __msa_subsuu_s_w __builtin_msa_subsuu_s_w +#define __msa_subsuu_s_d __builtin_msa_subsuu_s_d +#define __msa_subsus_u_b __builtin_msa_subsus_u_b +#define __msa_subsus_u_h __builtin_msa_subsus_u_h +#define __msa_subsus_u_w __builtin_msa_subsus_u_w +#define __msa_subsus_u_d __builtin_msa_subsus_u_d +#define __msa_asub_s_b __builtin_msa_asub_s_b +#define __msa_asub_s_h __builtin_msa_asub_s_h +#define __msa_asub_s_w __builtin_msa_asub_s_w +#define __msa_asub_s_d __builtin_msa_asub_s_d +#define __msa_asub_u_b __builtin_msa_asub_u_b +#define __msa_asub_u_h __builtin_msa_asub_u_h +#define __msa_asub_u_w __builtin_msa_asub_u_w +#define __msa_asub_u_d __builtin_msa_asub_u_d +#define __msa_mulv_b __builtin_msa_mulv_b +#define __msa_mulv_h __builtin_msa_mulv_h +#define __msa_mulv_w __builtin_msa_mulv_w +#define __msa_mulv_d __builtin_msa_mulv_d +#define __msa_maddv_b __builtin_msa_maddv_b +#define __msa_maddv_h __builtin_msa_maddv_h +#define __msa_maddv_w __builtin_msa_maddv_w +#define __msa_maddv_d __builtin_msa_maddv_d +#define __msa_msubv_b __builtin_msa_msubv_b +#define __msa_msubv_h __builtin_msa_msubv_h +#define __msa_msubv_w __builtin_msa_msubv_w +#define __msa_msubv_d __builtin_msa_msubv_d +#define __msa_div_s_b __builtin_msa_div_s_b +#define __msa_div_s_h __builtin_msa_div_s_h +#define __msa_div_s_w __builtin_msa_div_s_w +#define __msa_div_s_d __builtin_msa_div_s_d +#define __msa_div_u_b __builtin_msa_div_u_b +#define __msa_div_u_h __builtin_msa_div_u_h +#define __msa_div_u_w __builtin_msa_div_u_w +#define __msa_div_u_d __builtin_msa_div_u_d +#define __msa_hadd_s_h __builtin_msa_hadd_s_h +#define __msa_hadd_s_w __builtin_msa_hadd_s_w +#define __msa_hadd_s_d __builtin_msa_hadd_s_d +#define __msa_hadd_u_h __builtin_msa_hadd_u_h +#define __msa_hadd_u_w __builtin_msa_hadd_u_w +#define __msa_hadd_u_d __builtin_msa_hadd_u_d +#define __msa_hsub_s_h __builtin_msa_hsub_s_h +#define __msa_hsub_s_w __builtin_msa_hsub_s_w +#define __msa_hsub_s_d __builtin_msa_hsub_s_d +#define __msa_hsub_u_h __builtin_msa_hsub_u_h +#define __msa_hsub_u_w __builtin_msa_hsub_u_w +#define __msa_hsub_u_d __builtin_msa_hsub_u_d +#define __msa_mod_s_b __builtin_msa_mod_s_b +#define __msa_mod_s_h __builtin_msa_mod_s_h +#define __msa_mod_s_w __builtin_msa_mod_s_w +#define __msa_mod_s_d __builtin_msa_mod_s_d +#define __msa_mod_u_b __builtin_msa_mod_u_b +#define __msa_mod_u_h __builtin_msa_mod_u_h +#define __msa_mod_u_w __builtin_msa_mod_u_w +#define __msa_mod_u_d __builtin_msa_mod_u_d +#define __msa_dotp_s_h __builtin_msa_dotp_s_h +#define __msa_dotp_s_w __builtin_msa_dotp_s_w +#define __msa_dotp_s_d __builtin_msa_dotp_s_d +#define __msa_dotp_u_h __builtin_msa_dotp_u_h +#define __msa_dotp_u_w __builtin_msa_dotp_u_w +#define __msa_dotp_u_d __builtin_msa_dotp_u_d +#define __msa_dpadd_s_h __builtin_msa_dpadd_s_h +#define __msa_dpadd_s_w __builtin_msa_dpadd_s_w +#define __msa_dpadd_s_d __builtin_msa_dpadd_s_d +#define __msa_dpadd_u_h __builtin_msa_dpadd_u_h +#define __msa_dpadd_u_w __builtin_msa_dpadd_u_w +#define __msa_dpadd_u_d __builtin_msa_dpadd_u_d +#define __msa_dpsub_s_h __builtin_msa_dpsub_s_h +#define __msa_dpsub_s_w __builtin_msa_dpsub_s_w +#define __msa_dpsub_s_d __builtin_msa_dpsub_s_d +#define __msa_dpsub_u_h __builtin_msa_dpsub_u_h +#define __msa_dpsub_u_w __builtin_msa_dpsub_u_w +#define __msa_dpsub_u_d __builtin_msa_dpsub_u_d +#define __msa_sld_b __builtin_msa_sld_b +#define __msa_sld_h __builtin_msa_sld_h +#define __msa_sld_w __builtin_msa_sld_w +#define __msa_sld_d __builtin_msa_sld_d +#define __msa_sldi_b __builtin_msa_sldi_b +#define __msa_sldi_h __builtin_msa_sldi_h +#define __msa_sldi_w __builtin_msa_sldi_w +#define __msa_sldi_d __builtin_msa_sldi_d +#define __msa_splat_b __builtin_msa_splat_b +#define __msa_splat_h __builtin_msa_splat_h +#define __msa_splat_w __builtin_msa_splat_w +#define __msa_splat_d __builtin_msa_splat_d +#define __msa_splati_b __builtin_msa_splati_b +#define __msa_splati_h __builtin_msa_splati_h +#define __msa_splati_w __builtin_msa_splati_w +#define __msa_splati_d __builtin_msa_splati_d +#define __msa_pckev_b __builtin_msa_pckev_b +#define __msa_pckev_h __builtin_msa_pckev_h +#define __msa_pckev_w __builtin_msa_pckev_w +#define __msa_pckev_d __builtin_msa_pckev_d +#define __msa_pckod_b __builtin_msa_pckod_b +#define __msa_pckod_h __builtin_msa_pckod_h +#define __msa_pckod_w __builtin_msa_pckod_w +#define __msa_pckod_d __builtin_msa_pckod_d +#define __msa_ilvl_b __builtin_msa_ilvl_b +#define __msa_ilvl_h __builtin_msa_ilvl_h +#define __msa_ilvl_w __builtin_msa_ilvl_w +#define __msa_ilvl_d __builtin_msa_ilvl_d +#define __msa_ilvr_b __builtin_msa_ilvr_b +#define __msa_ilvr_h __builtin_msa_ilvr_h +#define __msa_ilvr_w __builtin_msa_ilvr_w +#define __msa_ilvr_d __builtin_msa_ilvr_d +#define __msa_ilvev_b __builtin_msa_ilvev_b +#define __msa_ilvev_h __builtin_msa_ilvev_h +#define __msa_ilvev_w __builtin_msa_ilvev_w +#define __msa_ilvev_d __builtin_msa_ilvev_d +#define __msa_ilvod_b __builtin_msa_ilvod_b +#define __msa_ilvod_h __builtin_msa_ilvod_h +#define __msa_ilvod_w __builtin_msa_ilvod_w +#define __msa_ilvod_d __builtin_msa_ilvod_d +#define __msa_vshf_b __builtin_msa_vshf_b +#define __msa_vshf_h __builtin_msa_vshf_h +#define __msa_vshf_w __builtin_msa_vshf_w +#define __msa_vshf_d __builtin_msa_vshf_d +#define __msa_and_v __builtin_msa_and_v +#define __msa_andi_b __builtin_msa_andi_b +#define __msa_or_v __builtin_msa_or_v +#define __msa_ori_b __builtin_msa_ori_b +#define __msa_nor_v __builtin_msa_nor_v +#define __msa_nori_b __builtin_msa_nori_b +#define __msa_xor_v __builtin_msa_xor_v +#define __msa_xori_b __builtin_msa_xori_b +#define __msa_bmnz_v __builtin_msa_bmnz_v +#define __msa_bmnzi_b __builtin_msa_bmnzi_b +#define __msa_bmz_v __builtin_msa_bmz_v +#define __msa_bmzi_b __builtin_msa_bmzi_b +#define __msa_bsel_v __builtin_msa_bsel_v +#define __msa_bseli_b __builtin_msa_bseli_b +#define __msa_shf_b __builtin_msa_shf_b +#define __msa_shf_h __builtin_msa_shf_h +#define __msa_shf_w __builtin_msa_shf_w +#define __msa_test_bnz_v __builtin_msa_bnz_v +#define __msa_test_bz_v __builtin_msa_bz_v +#define __msa_fill_b __builtin_msa_fill_b +#define __msa_fill_h __builtin_msa_fill_h +#define __msa_fill_w __builtin_msa_fill_w +#define __msa_fill_d __builtin_msa_fill_d +#define __msa_pcnt_b __builtin_msa_pcnt_b +#define __msa_pcnt_h __builtin_msa_pcnt_h +#define __msa_pcnt_w __builtin_msa_pcnt_w +#define __msa_pcnt_d __builtin_msa_pcnt_d +#define __msa_nloc_b __builtin_msa_nloc_b +#define __msa_nloc_h __builtin_msa_nloc_h +#define __msa_nloc_w __builtin_msa_nloc_w +#define __msa_nloc_d __builtin_msa_nloc_d +#define __msa_nlzc_b __builtin_msa_nlzc_b +#define __msa_nlzc_h __builtin_msa_nlzc_h +#define __msa_nlzc_w __builtin_msa_nlzc_w +#define __msa_nlzc_d __builtin_msa_nlzc_d +#define __msa_copy_s_b __builtin_msa_copy_s_b +#define __msa_copy_s_h __builtin_msa_copy_s_h +#define __msa_copy_s_w __builtin_msa_copy_s_w +#define __msa_copy_s_d __builtin_msa_copy_s_d +#define __msa_copy_u_b __builtin_msa_copy_u_b +#define __msa_copy_u_h __builtin_msa_copy_u_h +#define __msa_copy_u_w __builtin_msa_copy_u_w +#define __msa_copy_u_d __builtin_msa_copy_u_d +#define __msa_insert_b __builtin_msa_insert_b +#define __msa_insert_h __builtin_msa_insert_h +#define __msa_insert_w __builtin_msa_insert_w +#define __msa_insert_d __builtin_msa_insert_d +#define __msa_insve_b __builtin_msa_insve_b +#define __msa_insve_h __builtin_msa_insve_h +#define __msa_insve_w __builtin_msa_insve_w +#define __msa_insve_d __builtin_msa_insve_d +#define __msa_test_bnz_b __builtin_msa_bnz_b +#define __msa_test_bnz_h __builtin_msa_bnz_h +#define __msa_test_bnz_w __builtin_msa_bnz_w +#define __msa_test_bnz_d __builtin_msa_bnz_d +#define __msa_test_bz_b __builtin_msa_bz_b +#define __msa_test_bz_h __builtin_msa_bz_h +#define __msa_test_bz_w __builtin_msa_bz_w +#define __msa_test_bz_d __builtin_msa_bz_d +#define __msa_ldi_b __builtin_msa_ldi_b +#define __msa_ldi_h __builtin_msa_ldi_h +#define __msa_ldi_w __builtin_msa_ldi_w +#define __msa_ldi_d __builtin_msa_ldi_d +#define __msa_fcaf_w __builtin_msa_fcaf_w +#define __msa_fcaf_d __builtin_msa_fcaf_d +#define __msa_fcor_w __builtin_msa_fcor_w +#define __msa_fcor_d __builtin_msa_fcor_d +#define __msa_fcun_w __builtin_msa_fcun_w +#define __msa_fcun_d __builtin_msa_fcun_d +#define __msa_fcune_w __builtin_msa_fcune_w +#define __msa_fcune_d __builtin_msa_fcune_d +#define __msa_fcueq_w __builtin_msa_fcueq_w +#define __msa_fcueq_d __builtin_msa_fcueq_d +#define __msa_fceq_w __builtin_msa_fceq_w +#define __msa_fceq_d __builtin_msa_fceq_d +#define __msa_fcne_w __builtin_msa_fcne_w +#define __msa_fcne_d __builtin_msa_fcne_d +#define __msa_fclt_w __builtin_msa_fclt_w +#define __msa_fclt_d __builtin_msa_fclt_d +#define __msa_fcult_w __builtin_msa_fcult_w +#define __msa_fcult_d __builtin_msa_fcult_d +#define __msa_fcle_w __builtin_msa_fcle_w +#define __msa_fcle_d __builtin_msa_fcle_d +#define __msa_fcule_w __builtin_msa_fcule_w +#define __msa_fcule_d __builtin_msa_fcule_d +#define __msa_fsaf_w __builtin_msa_fsaf_w +#define __msa_fsaf_d __builtin_msa_fsaf_d +#define __msa_fsor_w __builtin_msa_fsor_w +#define __msa_fsor_d __builtin_msa_fsor_d +#define __msa_fsun_w __builtin_msa_fsun_w +#define __msa_fsun_d __builtin_msa_fsun_d +#define __msa_fsune_w __builtin_msa_fsune_w +#define __msa_fsune_d __builtin_msa_fsune_d +#define __msa_fsueq_w __builtin_msa_fsueq_w +#define __msa_fsueq_d __builtin_msa_fsueq_d +#define __msa_fseq_w __builtin_msa_fseq_w +#define __msa_fseq_d __builtin_msa_fseq_d +#define __msa_fsne_w __builtin_msa_fsne_w +#define __msa_fsne_d __builtin_msa_fsne_d +#define __msa_fslt_w __builtin_msa_fslt_w +#define __msa_fslt_d __builtin_msa_fslt_d +#define __msa_fsult_w __builtin_msa_fsult_w +#define __msa_fsult_d __builtin_msa_fsult_d +#define __msa_fsle_w __builtin_msa_fsle_w +#define __msa_fsle_d __builtin_msa_fsle_d +#define __msa_fsule_w __builtin_msa_fsule_w +#define __msa_fsule_d __builtin_msa_fsule_d +#define __msa_fadd_w __builtin_msa_fadd_w +#define __msa_fadd_d __builtin_msa_fadd_d +#define __msa_fsub_w __builtin_msa_fsub_w +#define __msa_fsub_d __builtin_msa_fsub_d +#define __msa_fmul_w __builtin_msa_fmul_w +#define __msa_fmul_d __builtin_msa_fmul_d +#define __msa_fdiv_w __builtin_msa_fdiv_w +#define __msa_fdiv_d __builtin_msa_fdiv_d +#define __msa_fmadd_w __builtin_msa_fmadd_w +#define __msa_fmadd_d __builtin_msa_fmadd_d +#define __msa_fmsub_w __builtin_msa_fmsub_w +#define __msa_fmsub_d __builtin_msa_fmsub_d +#define __msa_fexp2_w __builtin_msa_fexp2_w +#define __msa_fexp2_d __builtin_msa_fexp2_d +#define __msa_fexdo_h __builtin_msa_fexdo_h +#define __msa_fexdo_w __builtin_msa_fexdo_w +#define __msa_ftq_h __builtin_msa_ftq_h +#define __msa_ftq_w __builtin_msa_ftq_w +#define __msa_fmin_w __builtin_msa_fmin_w +#define __msa_fmin_d __builtin_msa_fmin_d +#define __msa_fmin_a_w __builtin_msa_fmin_a_w +#define __msa_fmin_a_d __builtin_msa_fmin_a_d +#define __msa_fmax_w __builtin_msa_fmax_w +#define __msa_fmax_d __builtin_msa_fmax_d +#define __msa_fmax_a_w __builtin_msa_fmax_a_w +#define __msa_fmax_a_d __builtin_msa_fmax_a_d +#define __msa_mul_q_h __builtin_msa_mul_q_h +#define __msa_mul_q_w __builtin_msa_mul_q_w +#define __msa_mulr_q_h __builtin_msa_mulr_q_h +#define __msa_mulr_q_w __builtin_msa_mulr_q_w +#define __msa_madd_q_h __builtin_msa_madd_q_h +#define __msa_madd_q_w __builtin_msa_madd_q_w +#define __msa_maddr_q_h __builtin_msa_maddr_q_h +#define __msa_maddr_q_w __builtin_msa_maddr_q_w +#define __msa_msub_q_h __builtin_msa_msub_q_h +#define __msa_msub_q_w __builtin_msa_msub_q_w +#define __msa_msubr_q_h __builtin_msa_msubr_q_h +#define __msa_msubr_q_w __builtin_msa_msubr_q_w +#define __msa_fclass_w __builtin_msa_fclass_w +#define __msa_fclass_d __builtin_msa_fclass_d +#define __msa_fsqrt_w __builtin_msa_fsqrt_w +#define __msa_fsqrt_d __builtin_msa_fsqrt_d +#define __msa_frcp_w __builtin_msa_frcp_w +#define __msa_frcp_d __builtin_msa_frcp_d +#define __msa_frint_w __builtin_msa_frint_w +#define __msa_frint_d __builtin_msa_frint_d +#define __msa_frsqrt_w __builtin_msa_frsqrt_w +#define __msa_frsqrt_d __builtin_msa_frsqrt_d +#define __msa_flog2_w __builtin_msa_flog2_w +#define __msa_flog2_d __builtin_msa_flog2_d +#define __msa_fexupl_w __builtin_msa_fexupl_w +#define __msa_fexupl_d __builtin_msa_fexupl_d +#define __msa_fexupr_w __builtin_msa_fexupr_w +#define __msa_fexupr_d __builtin_msa_fexupr_d +#define __msa_ffql_w __builtin_msa_ffql_w +#define __msa_ffql_d __builtin_msa_ffql_d +#define __msa_ffqr_w __builtin_msa_ffqr_w +#define __msa_ffqr_d __builtin_msa_ffqr_d +#define __msa_ftint_s_w __builtin_msa_ftint_s_w +#define __msa_ftint_s_d __builtin_msa_ftint_s_d +#define __msa_ftint_u_w __builtin_msa_ftint_u_w +#define __msa_ftint_u_d __builtin_msa_ftint_u_d +#define __msa_ftrunc_s_w __builtin_msa_ftrunc_s_w +#define __msa_ftrunc_s_d __builtin_msa_ftrunc_s_d +#define __msa_ftrunc_u_w __builtin_msa_ftrunc_u_w +#define __msa_ftrunc_u_d __builtin_msa_ftrunc_u_d +#define __msa_ffint_s_w __builtin_msa_ffint_s_w +#define __msa_ffint_s_d __builtin_msa_ffint_s_d +#define __msa_ffint_u_w __builtin_msa_ffint_u_w +#define __msa_ffint_u_d __builtin_msa_ffint_u_d +#define __msa_cfcmsa __builtin_msa_cfcmsa +#define __msa_move_v __builtin_msa_move_v +#define __msa_cast_to_vector_float __builtin_msa_cast_to_vector_float +#define __msa_cast_to_vector_double __builtin_msa_cast_to_vector_double +#define __msa_cast_to_scalar_float __builtin_msa_cast_to_scalar_float +#define __msa_cast_to_scalar_double __builtin_msa_cast_to_scalar_double +#endif /* defined(__mips_msa) */ +#endif /* _MSA_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mwaitxintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mwaitxintrin.h new file mode 100644 index 0000000..ed48538 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/mwaitxintrin.h @@ -0,0 +1,33 @@ +/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __MWAITXINTRIN_H +#define __MWAITXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx"))) +static __inline__ void __DEFAULT_FN_ATTRS +_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_monitorx(__p, __extensions, __hints); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock) +{ + __builtin_ia32_mwaitx(__extensions, __hints, __clock); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __MWAITXINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/nmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/nmmintrin.h new file mode 100644 index 0000000..672aea4 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/nmmintrin.h @@ -0,0 +1,16 @@ +/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __NMMINTRIN_H +#define __NMMINTRIN_H + +/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h, + just include it now then. */ +#include +#endif /* __NMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/opencl-c-base.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/opencl-c-base.h new file mode 100644 index 0000000..ff8b776 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/opencl-c-base.h @@ -0,0 +1,756 @@ +//===----- opencl-c-base.h - OpenCL C language base definitions -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _OPENCL_BASE_H_ +#define _OPENCL_BASE_H_ + +// Define extension macros + +#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200) +// For SPIR all extensions are supported. +#if defined(__SPIR__) +#define cl_khr_subgroup_extended_types 1 +#define cl_khr_subgroup_non_uniform_vote 1 +#define cl_khr_subgroup_ballot 1 +#define cl_khr_subgroup_non_uniform_arithmetic 1 +#define cl_khr_subgroup_shuffle 1 +#define cl_khr_subgroup_shuffle_relative 1 +#define cl_khr_subgroup_clustered_reduce 1 +#define cl_khr_extended_bit_ops 1 +#define cl_khr_integer_dot_product 1 +#define __opencl_c_integer_dot_product_input_4x8bit 1 +#define __opencl_c_integer_dot_product_input_4x8bit_packed 1 + +#endif // defined(__SPIR__) +#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200) + +// Define feature macros for OpenCL C 2.0 +#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ == 200) +#define __opencl_c_pipes 1 +#define __opencl_c_generic_address_space 1 +#define __opencl_c_work_group_collective_functions 1 +#define __opencl_c_atomic_order_acq_rel 1 +#define __opencl_c_atomic_order_seq_cst 1 +#define __opencl_c_atomic_scope_device 1 +#define __opencl_c_atomic_scope_all_devices 1 +#define __opencl_c_device_enqueue 1 +#define __opencl_c_read_write_images 1 +#define __opencl_c_program_scope_global_variables 1 +#define __opencl_c_images 1 +#endif + +// Define header-only feature macros for OpenCL C 3.0. +#if (__OPENCL_C_VERSION__ == 300) +// For the SPIR target all features are supported. +#if defined(__SPIR__) +#define __opencl_c_atomic_scope_all_devices 1 +#endif // defined(__SPIR__) +#endif // (__OPENCL_C_VERSION__ == 300) + +// built-in scalar data types: + +/** + * An unsigned 8-bit integer. + */ +typedef unsigned char uchar; + +/** + * An unsigned 16-bit integer. + */ +typedef unsigned short ushort; + +/** + * An unsigned 32-bit integer. + */ +typedef unsigned int uint; + +/** + * An unsigned 64-bit integer. + */ +typedef unsigned long ulong; + +/** + * The unsigned integer type of the result of the sizeof operator. This + * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS + * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if + * CL_DEVICE_ADDRESS_BITS is 64-bits. + */ +typedef __SIZE_TYPE__ size_t; + +/** + * A signed integer type that is the result of subtracting two pointers. + * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS + * defined in table 4.3 is 32-bits and is a 64-bit signed integer if + * CL_DEVICE_ADDRESS_BITS is 64-bits. + */ +typedef __PTRDIFF_TYPE__ ptrdiff_t; + +/** + * A signed integer type with the property that any valid pointer to + * void can be converted to this type, then converted back to pointer + * to void, and the result will compare equal to the original pointer. + */ +typedef __INTPTR_TYPE__ intptr_t; + +/** + * An unsigned integer type with the property that any valid pointer to + * void can be converted to this type, then converted back to pointer + * to void, and the result will compare equal to the original pointer. + */ +typedef __UINTPTR_TYPE__ uintptr_t; + +// built-in vector data types: +typedef char char2 __attribute__((ext_vector_type(2))); +typedef char char3 __attribute__((ext_vector_type(3))); +typedef char char4 __attribute__((ext_vector_type(4))); +typedef char char8 __attribute__((ext_vector_type(8))); +typedef char char16 __attribute__((ext_vector_type(16))); +typedef uchar uchar2 __attribute__((ext_vector_type(2))); +typedef uchar uchar3 __attribute__((ext_vector_type(3))); +typedef uchar uchar4 __attribute__((ext_vector_type(4))); +typedef uchar uchar8 __attribute__((ext_vector_type(8))); +typedef uchar uchar16 __attribute__((ext_vector_type(16))); +typedef short short2 __attribute__((ext_vector_type(2))); +typedef short short3 __attribute__((ext_vector_type(3))); +typedef short short4 __attribute__((ext_vector_type(4))); +typedef short short8 __attribute__((ext_vector_type(8))); +typedef short short16 __attribute__((ext_vector_type(16))); +typedef ushort ushort2 __attribute__((ext_vector_type(2))); +typedef ushort ushort3 __attribute__((ext_vector_type(3))); +typedef ushort ushort4 __attribute__((ext_vector_type(4))); +typedef ushort ushort8 __attribute__((ext_vector_type(8))); +typedef ushort ushort16 __attribute__((ext_vector_type(16))); +typedef int int2 __attribute__((ext_vector_type(2))); +typedef int int3 __attribute__((ext_vector_type(3))); +typedef int int4 __attribute__((ext_vector_type(4))); +typedef int int8 __attribute__((ext_vector_type(8))); +typedef int int16 __attribute__((ext_vector_type(16))); +typedef uint uint2 __attribute__((ext_vector_type(2))); +typedef uint uint3 __attribute__((ext_vector_type(3))); +typedef uint uint4 __attribute__((ext_vector_type(4))); +typedef uint uint8 __attribute__((ext_vector_type(8))); +typedef uint uint16 __attribute__((ext_vector_type(16))); +typedef long long2 __attribute__((ext_vector_type(2))); +typedef long long3 __attribute__((ext_vector_type(3))); +typedef long long4 __attribute__((ext_vector_type(4))); +typedef long long8 __attribute__((ext_vector_type(8))); +typedef long long16 __attribute__((ext_vector_type(16))); +typedef ulong ulong2 __attribute__((ext_vector_type(2))); +typedef ulong ulong3 __attribute__((ext_vector_type(3))); +typedef ulong ulong4 __attribute__((ext_vector_type(4))); +typedef ulong ulong8 __attribute__((ext_vector_type(8))); +typedef ulong ulong16 __attribute__((ext_vector_type(16))); +typedef float float2 __attribute__((ext_vector_type(2))); +typedef float float3 __attribute__((ext_vector_type(3))); +typedef float float4 __attribute__((ext_vector_type(4))); +typedef float float8 __attribute__((ext_vector_type(8))); +typedef float float16 __attribute__((ext_vector_type(16))); +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +typedef half half2 __attribute__((ext_vector_type(2))); +typedef half half3 __attribute__((ext_vector_type(3))); +typedef half half4 __attribute__((ext_vector_type(4))); +typedef half half8 __attribute__((ext_vector_type(8))); +typedef half half16 __attribute__((ext_vector_type(16))); +#endif +#ifdef cl_khr_fp64 +#if __OPENCL_C_VERSION__ < CL_VERSION_1_2 +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#endif +typedef double double2 __attribute__((ext_vector_type(2))); +typedef double double3 __attribute__((ext_vector_type(3))); +typedef double double4 __attribute__((ext_vector_type(4))); +typedef double double8 __attribute__((ext_vector_type(8))); +typedef double double16 __attribute__((ext_vector_type(16))); +#endif + +#if defined(__OPENCL_CPP_VERSION__) +#define NULL nullptr +#elif defined(__OPENCL_C_VERSION__) +#define NULL ((void*)0) +#endif + +/** + * Value of maximum non-infinite single-precision floating-point + * number. + */ +#define MAXFLOAT 0x1.fffffep127f + +/** + * A positive float constant expression. HUGE_VALF evaluates + * to +infinity. Used as an error value returned by the built-in + * math functions. + */ +#define HUGE_VALF (__builtin_huge_valf()) + +/** + * A positive double constant expression. HUGE_VAL evaluates + * to +infinity. Used as an error value returned by the built-in + * math functions. + */ +#define HUGE_VAL (__builtin_huge_val()) + +/** + * A constant expression of type float representing positive or + * unsigned infinity. + */ +#define INFINITY (__builtin_inff()) + +/** + * A constant expression of type float representing a quiet NaN. + */ +#define NAN as_float(INT_MAX) + +#define FP_ILOGB0 INT_MIN +#define FP_ILOGBNAN INT_MAX + +#define FLT_DIG 6 +#define FLT_MANT_DIG 24 +#define FLT_MAX_10_EXP +38 +#define FLT_MAX_EXP +128 +#define FLT_MIN_10_EXP -37 +#define FLT_MIN_EXP -125 +#define FLT_RADIX 2 +#define FLT_MAX 0x1.fffffep127f +#define FLT_MIN 0x1.0p-126f +#define FLT_EPSILON 0x1.0p-23f + +#define M_E_F 2.71828182845904523536028747135266250f +#define M_LOG2E_F 1.44269504088896340735992468100189214f +#define M_LOG10E_F 0.434294481903251827651128918916605082f +#define M_LN2_F 0.693147180559945309417232121458176568f +#define M_LN10_F 2.30258509299404568401799145468436421f +#define M_PI_F 3.14159265358979323846264338327950288f +#define M_PI_2_F 1.57079632679489661923132169163975144f +#define M_PI_4_F 0.785398163397448309615660845819875721f +#define M_1_PI_F 0.318309886183790671537767526745028724f +#define M_2_PI_F 0.636619772367581343075535053490057448f +#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f +#define M_SQRT2_F 1.41421356237309504880168872420969808f +#define M_SQRT1_2_F 0.707106781186547524400844362104849039f + +#define DBL_DIG 15 +#define DBL_MANT_DIG 53 +#define DBL_MAX_10_EXP +308 +#define DBL_MAX_EXP +1024 +#define DBL_MIN_10_EXP -307 +#define DBL_MIN_EXP -1021 +#define DBL_RADIX 2 +#define DBL_MAX 0x1.fffffffffffffp1023 +#define DBL_MIN 0x1.0p-1022 +#define DBL_EPSILON 0x1.0p-52 + +#define M_E 0x1.5bf0a8b145769p+1 +#define M_LOG2E 0x1.71547652b82fep+0 +#define M_LOG10E 0x1.bcb7b1526e50ep-2 +#define M_LN2 0x1.62e42fefa39efp-1 +#define M_LN10 0x1.26bb1bbb55516p+1 +#define M_PI 0x1.921fb54442d18p+1 +#define M_PI_2 0x1.921fb54442d18p+0 +#define M_PI_4 0x1.921fb54442d18p-1 +#define M_1_PI 0x1.45f306dc9c883p-2 +#define M_2_PI 0x1.45f306dc9c883p-1 +#define M_2_SQRTPI 0x1.20dd750429b6dp+0 +#define M_SQRT2 0x1.6a09e667f3bcdp+0 +#define M_SQRT1_2 0x1.6a09e667f3bcdp-1 + +#ifdef cl_khr_fp16 + +#define HALF_DIG 3 +#define HALF_MANT_DIG 11 +#define HALF_MAX_10_EXP +4 +#define HALF_MAX_EXP +16 +#define HALF_MIN_10_EXP -4 +#define HALF_MIN_EXP -13 +#define HALF_RADIX 2 +#define HALF_MAX ((0x1.ffcp15h)) +#define HALF_MIN ((0x1.0p-14h)) +#define HALF_EPSILON ((0x1.0p-10h)) + +#define M_E_H 2.71828182845904523536028747135266250h +#define M_LOG2E_H 1.44269504088896340735992468100189214h +#define M_LOG10E_H 0.434294481903251827651128918916605082h +#define M_LN2_H 0.693147180559945309417232121458176568h +#define M_LN10_H 2.30258509299404568401799145468436421h +#define M_PI_H 3.14159265358979323846264338327950288h +#define M_PI_2_H 1.57079632679489661923132169163975144h +#define M_PI_4_H 0.785398163397448309615660845819875721h +#define M_1_PI_H 0.318309886183790671537767526745028724h +#define M_2_PI_H 0.636619772367581343075535053490057448h +#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h +#define M_SQRT2_H 1.41421356237309504880168872420969808h +#define M_SQRT1_2_H 0.707106781186547524400844362104849039h + +#endif //cl_khr_fp16 + +#define CHAR_BIT 8 +#define SCHAR_MAX 127 +#define SCHAR_MIN (-128) +#define UCHAR_MAX 255 +#define CHAR_MAX SCHAR_MAX +#define CHAR_MIN SCHAR_MIN +#define USHRT_MAX 65535 +#define SHRT_MAX 32767 +#define SHRT_MIN (-32768) +#define UINT_MAX 0xffffffff +#define INT_MAX 2147483647 +#define INT_MIN (-2147483647-1) +#define ULONG_MAX 0xffffffffffffffffUL +#define LONG_MAX 0x7fffffffffffffffL +#define LONG_MIN (-0x7fffffffffffffffL-1) + +// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions + +// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence +typedef uint cl_mem_fence_flags; + +/** + * Queue a memory fence to ensure correct + * ordering of memory operations to local memory + */ +#define CLK_LOCAL_MEM_FENCE 0x01 + +/** + * Queue a memory fence to ensure correct + * ordering of memory operations to global memory + */ +#define CLK_GLOBAL_MEM_FENCE 0x02 + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +typedef enum memory_scope { + memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM, + memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP, + memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE, +#if defined(__opencl_c_atomic_scope_all_devices) + memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES, +#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0) + memory_scope_all_devices = memory_scope_all_svm_devices, +#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif // defined(__opencl_c_atomic_scope_all_devices) +/** + * Subgroups have different requirements on forward progress, so just test + * all the relevant macros. + * CL 3.0 sub-groups "they are not guaranteed to make independent forward progress" + * KHR subgroups "Subgroups within a workgroup are independent, make forward progress with respect to each other" + */ +#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups) + memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP +#endif +} memory_scope; + +/** + * Queue a memory fence to ensure correct ordering of memory + * operations between work-items of a work-group to + * image memory. + */ +#define CLK_IMAGE_MEM_FENCE 0x04 + +#ifndef ATOMIC_VAR_INIT +#define ATOMIC_VAR_INIT(x) (x) +#endif //ATOMIC_VAR_INIT +#define ATOMIC_FLAG_INIT 0 + +// enum values aligned with what clang uses in EmitAtomicExpr() +typedef enum memory_order +{ + memory_order_relaxed = __ATOMIC_RELAXED, + memory_order_acquire = __ATOMIC_ACQUIRE, + memory_order_release = __ATOMIC_RELEASE, + memory_order_acq_rel = __ATOMIC_ACQ_REL, +#if defined(__opencl_c_atomic_order_seq_cst) + memory_order_seq_cst = __ATOMIC_SEQ_CST +#endif +} memory_order; + +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions + +// These values need to match the runtime equivalent +// +// Addressing Mode. +// +#define CLK_ADDRESS_NONE 0 +#define CLK_ADDRESS_CLAMP_TO_EDGE 2 +#define CLK_ADDRESS_CLAMP 4 +#define CLK_ADDRESS_REPEAT 6 +#define CLK_ADDRESS_MIRRORED_REPEAT 8 + +// +// Coordination Normalization +// +#define CLK_NORMALIZED_COORDS_FALSE 0 +#define CLK_NORMALIZED_COORDS_TRUE 1 + +// +// Filtering Mode. +// +#define CLK_FILTER_NEAREST 0x10 +#define CLK_FILTER_LINEAR 0x20 + +#ifdef cl_khr_gl_msaa_sharing +#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable +#endif //cl_khr_gl_msaa_sharing + +// +// Channel Datatype. +// +#define CLK_SNORM_INT8 0x10D0 +#define CLK_SNORM_INT16 0x10D1 +#define CLK_UNORM_INT8 0x10D2 +#define CLK_UNORM_INT16 0x10D3 +#define CLK_UNORM_SHORT_565 0x10D4 +#define CLK_UNORM_SHORT_555 0x10D5 +#define CLK_UNORM_INT_101010 0x10D6 +#define CLK_SIGNED_INT8 0x10D7 +#define CLK_SIGNED_INT16 0x10D8 +#define CLK_SIGNED_INT32 0x10D9 +#define CLK_UNSIGNED_INT8 0x10DA +#define CLK_UNSIGNED_INT16 0x10DB +#define CLK_UNSIGNED_INT32 0x10DC +#define CLK_HALF_FLOAT 0x10DD +#define CLK_FLOAT 0x10DE +#define CLK_UNORM_INT24 0x10DF + +// Channel order, numbering must be aligned with cl_channel_order in cl.h +// +#define CLK_R 0x10B0 +#define CLK_A 0x10B1 +#define CLK_RG 0x10B2 +#define CLK_RA 0x10B3 +#define CLK_RGB 0x10B4 +#define CLK_RGBA 0x10B5 +#define CLK_BGRA 0x10B6 +#define CLK_ARGB 0x10B7 +#define CLK_INTENSITY 0x10B8 +#define CLK_LUMINANCE 0x10B9 +#define CLK_Rx 0x10BA +#define CLK_RGx 0x10BB +#define CLK_RGBx 0x10BC +#define CLK_DEPTH 0x10BD +#define CLK_DEPTH_STENCIL 0x10BE +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#define CLK_sRGB 0x10BF +#define CLK_sRGBx 0x10C0 +#define CLK_sRGBA 0x10C1 +#define CLK_sBGRA 0x10C2 +#define CLK_ABGR 0x10C3 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v2.0 s6.13.16 - Pipe Functions +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t)) + +// OpenCL v2.0 s6.13.17 - Enqueue Kernels +#define CL_COMPLETE 0x0 +#define CL_RUNNING 0x1 +#define CL_SUBMITTED 0x2 +#define CL_QUEUED 0x3 + +#define CLK_SUCCESS 0 +#define CLK_ENQUEUE_FAILURE -101 +#define CLK_INVALID_QUEUE -102 +#define CLK_INVALID_NDRANGE -160 +#define CLK_INVALID_EVENT_WAIT_LIST -57 +#define CLK_DEVICE_QUEUE_FULL -161 +#define CLK_INVALID_ARG_SIZE -51 +#define CLK_EVENT_ALLOCATION_FAILURE -100 +#define CLK_OUT_OF_RESOURCES -5 + +#define CLK_NULL_QUEUE 0 +#define CLK_NULL_EVENT (__builtin_astype(((__SIZE_MAX__)), clk_event_t)) + +// execution model related definitions +#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0 +#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1 +#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2 + +typedef int kernel_enqueue_flags_t; +typedef int clk_profiling_info; + +// Profiling info name (see capture_event_profiling_info) +#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1 + +#define MAX_WORK_DIM 3 + +typedef struct { + unsigned int workDimension; + size_t globalWorkOffset[MAX_WORK_DIM]; + size_t globalWorkSize[MAX_WORK_DIM]; + size_t localWorkSize[MAX_WORK_DIM]; +} ndrange_t; + +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators + * Reinterprets a data type as another data type of the same size + */ +#define as_char(x) __builtin_astype((x), char) +#define as_char2(x) __builtin_astype((x), char2) +#define as_char3(x) __builtin_astype((x), char3) +#define as_char4(x) __builtin_astype((x), char4) +#define as_char8(x) __builtin_astype((x), char8) +#define as_char16(x) __builtin_astype((x), char16) + +#define as_uchar(x) __builtin_astype((x), uchar) +#define as_uchar2(x) __builtin_astype((x), uchar2) +#define as_uchar3(x) __builtin_astype((x), uchar3) +#define as_uchar4(x) __builtin_astype((x), uchar4) +#define as_uchar8(x) __builtin_astype((x), uchar8) +#define as_uchar16(x) __builtin_astype((x), uchar16) + +#define as_short(x) __builtin_astype((x), short) +#define as_short2(x) __builtin_astype((x), short2) +#define as_short3(x) __builtin_astype((x), short3) +#define as_short4(x) __builtin_astype((x), short4) +#define as_short8(x) __builtin_astype((x), short8) +#define as_short16(x) __builtin_astype((x), short16) + +#define as_ushort(x) __builtin_astype((x), ushort) +#define as_ushort2(x) __builtin_astype((x), ushort2) +#define as_ushort3(x) __builtin_astype((x), ushort3) +#define as_ushort4(x) __builtin_astype((x), ushort4) +#define as_ushort8(x) __builtin_astype((x), ushort8) +#define as_ushort16(x) __builtin_astype((x), ushort16) + +#define as_int(x) __builtin_astype((x), int) +#define as_int2(x) __builtin_astype((x), int2) +#define as_int3(x) __builtin_astype((x), int3) +#define as_int4(x) __builtin_astype((x), int4) +#define as_int8(x) __builtin_astype((x), int8) +#define as_int16(x) __builtin_astype((x), int16) + +#define as_uint(x) __builtin_astype((x), uint) +#define as_uint2(x) __builtin_astype((x), uint2) +#define as_uint3(x) __builtin_astype((x), uint3) +#define as_uint4(x) __builtin_astype((x), uint4) +#define as_uint8(x) __builtin_astype((x), uint8) +#define as_uint16(x) __builtin_astype((x), uint16) + +#define as_long(x) __builtin_astype((x), long) +#define as_long2(x) __builtin_astype((x), long2) +#define as_long3(x) __builtin_astype((x), long3) +#define as_long4(x) __builtin_astype((x), long4) +#define as_long8(x) __builtin_astype((x), long8) +#define as_long16(x) __builtin_astype((x), long16) + +#define as_ulong(x) __builtin_astype((x), ulong) +#define as_ulong2(x) __builtin_astype((x), ulong2) +#define as_ulong3(x) __builtin_astype((x), ulong3) +#define as_ulong4(x) __builtin_astype((x), ulong4) +#define as_ulong8(x) __builtin_astype((x), ulong8) +#define as_ulong16(x) __builtin_astype((x), ulong16) + +#define as_float(x) __builtin_astype((x), float) +#define as_float2(x) __builtin_astype((x), float2) +#define as_float3(x) __builtin_astype((x), float3) +#define as_float4(x) __builtin_astype((x), float4) +#define as_float8(x) __builtin_astype((x), float8) +#define as_float16(x) __builtin_astype((x), float16) + +#ifdef cl_khr_fp64 +#define as_double(x) __builtin_astype((x), double) +#define as_double2(x) __builtin_astype((x), double2) +#define as_double3(x) __builtin_astype((x), double3) +#define as_double4(x) __builtin_astype((x), double4) +#define as_double8(x) __builtin_astype((x), double8) +#define as_double16(x) __builtin_astype((x), double16) +#endif // cl_khr_fp64 + +#ifdef cl_khr_fp16 +#define as_half(x) __builtin_astype((x), half) +#define as_half2(x) __builtin_astype((x), half2) +#define as_half3(x) __builtin_astype((x), half3) +#define as_half4(x) __builtin_astype((x), half4) +#define as_half8(x) __builtin_astype((x), half8) +#define as_half16(x) __builtin_astype((x), half16) +#endif // cl_khr_fp16 + +#define as_size_t(x) __builtin_astype((x), size_t) +#define as_ptrdiff_t(x) __builtin_astype((x), ptrdiff_t) +#define as_intptr_t(x) __builtin_astype((x), intptr_t) +#define as_uintptr_t(x) __builtin_astype((x), uintptr_t) + +// C++ for OpenCL - __remove_address_space +#if defined(__OPENCL_CPP_VERSION__) +template struct __remove_address_space { using type = _Tp; }; +template struct __remove_address_space<__generic _Tp> { + using type = _Tp; +}; +template struct __remove_address_space<__global _Tp> { + using type = _Tp; +}; +template struct __remove_address_space<__private _Tp> { + using type = _Tp; +}; +template struct __remove_address_space<__local _Tp> { + using type = _Tp; +}; +template struct __remove_address_space<__constant _Tp> { + using type = _Tp; +}; +#endif + +// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers + +#define __kernel_exec(X, typen) __kernel \ + __attribute__((work_group_size_hint(X, 1, 1))) \ + __attribute__((vec_type_hint(typen))) + +#define kernel_exec(X, typen) __kernel \ + __attribute__((work_group_size_hint(X, 1, 1))) \ + __attribute__((vec_type_hint(typen))) + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) +// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf + +int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2))); +#endif + +#ifdef cl_intel_device_side_avc_motion_estimation + +#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0 +#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1 +#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2 +#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3 + +#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0 +#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1 +#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2 +#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3 + +#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0 +#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1 +#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2 + +#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0 +#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E +#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D +#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B +#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77 +#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F +#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F +#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F + +#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0 +#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1 +#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2 + +#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0 +#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1 +#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2 +#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3 +#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4 +#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5 +#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6 +#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7 +#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8 + +#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0 +#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2 + +#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0 +#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1 +#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3 + +#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0 +#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1 +#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2 +#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3 + +#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10 +#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15 +#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20 +#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B +#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30 + +#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0 +#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2 +#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4 +#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8 + +#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0 +#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1 +#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2 + +#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0 +#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000 + +#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24) +#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24) +#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30) +#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30) + +#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00 +#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80 + +#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0 +#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6 +#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5 +#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3 + +#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60 +#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10 +#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8 +#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4 + +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7 +#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8 +#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0 +#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 +#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2 +#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3 + +#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1 +#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2 +#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3 + +#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0 +#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1 + +#define CLK_AVC_ME_INITIALIZE_INTEL 0x0 + +#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0 +#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0 +#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0 + +#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0 +#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0 +#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0 + +#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0 +#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0 +#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0 +#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0 + +#endif // cl_intel_device_side_avc_motion_estimation + +// Disable any extensions we may have enabled previously. +#pragma OPENCL EXTENSION all : disable + +#endif //_OPENCL_BASE_H_ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/opencl-c.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/opencl-c.h new file mode 100644 index 0000000..501126b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/opencl-c.h @@ -0,0 +1,18168 @@ +//===--- opencl-c.h - OpenCL C language builtin function header -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _OPENCL_H_ +#define _OPENCL_H_ + +#include "opencl-c-base.h" + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#ifndef cl_khr_depth_images +#define cl_khr_depth_images +#endif //cl_khr_depth_images +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +#if __OPENCL_C_VERSION__ < CL_VERSION_2_0 +#ifdef cl_khr_3d_image_writes +#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable +#endif //cl_khr_3d_image_writes +#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0 + + +#if (defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && defined(__SPIR__) +#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin +#pragma OPENCL EXTENSION cl_intel_planar_yuv : end +#endif // (defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && defined(__SPIR__) + +#define __ovld __attribute__((overloadable)) +#define __conv __attribute__((convergent)) + +// Optimizations +#define __purefn __attribute__((pure)) +#define __cnfn __attribute__((const)) + + +// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions + +char __ovld __cnfn convert_char_rte(char); +char __ovld __cnfn convert_char_sat_rte(char); +char __ovld __cnfn convert_char_rtz(char); +char __ovld __cnfn convert_char_sat_rtz(char); +char __ovld __cnfn convert_char_rtp(char); +char __ovld __cnfn convert_char_sat_rtp(char); +char __ovld __cnfn convert_char_rtn(char); +char __ovld __cnfn convert_char_sat_rtn(char); +char __ovld __cnfn convert_char(char); +char __ovld __cnfn convert_char_sat(char); +char __ovld __cnfn convert_char_rte(uchar); +char __ovld __cnfn convert_char_sat_rte(uchar); +char __ovld __cnfn convert_char_rtz(uchar); +char __ovld __cnfn convert_char_sat_rtz(uchar); +char __ovld __cnfn convert_char_rtp(uchar); +char __ovld __cnfn convert_char_sat_rtp(uchar); +char __ovld __cnfn convert_char_rtn(uchar); +char __ovld __cnfn convert_char_sat_rtn(uchar); +char __ovld __cnfn convert_char(uchar); +char __ovld __cnfn convert_char_sat(uchar); +char __ovld __cnfn convert_char_rte(short); +char __ovld __cnfn convert_char_sat_rte(short); +char __ovld __cnfn convert_char_rtz(short); +char __ovld __cnfn convert_char_sat_rtz(short); +char __ovld __cnfn convert_char_rtp(short); +char __ovld __cnfn convert_char_sat_rtp(short); +char __ovld __cnfn convert_char_rtn(short); +char __ovld __cnfn convert_char_sat_rtn(short); +char __ovld __cnfn convert_char(short); +char __ovld __cnfn convert_char_sat(short); +char __ovld __cnfn convert_char_rte(ushort); +char __ovld __cnfn convert_char_sat_rte(ushort); +char __ovld __cnfn convert_char_rtz(ushort); +char __ovld __cnfn convert_char_sat_rtz(ushort); +char __ovld __cnfn convert_char_rtp(ushort); +char __ovld __cnfn convert_char_sat_rtp(ushort); +char __ovld __cnfn convert_char_rtn(ushort); +char __ovld __cnfn convert_char_sat_rtn(ushort); +char __ovld __cnfn convert_char(ushort); +char __ovld __cnfn convert_char_sat(ushort); +char __ovld __cnfn convert_char_rte(int); +char __ovld __cnfn convert_char_sat_rte(int); +char __ovld __cnfn convert_char_rtz(int); +char __ovld __cnfn convert_char_sat_rtz(int); +char __ovld __cnfn convert_char_rtp(int); +char __ovld __cnfn convert_char_sat_rtp(int); +char __ovld __cnfn convert_char_rtn(int); +char __ovld __cnfn convert_char_sat_rtn(int); +char __ovld __cnfn convert_char(int); +char __ovld __cnfn convert_char_sat(int); +char __ovld __cnfn convert_char_rte(uint); +char __ovld __cnfn convert_char_sat_rte(uint); +char __ovld __cnfn convert_char_rtz(uint); +char __ovld __cnfn convert_char_sat_rtz(uint); +char __ovld __cnfn convert_char_rtp(uint); +char __ovld __cnfn convert_char_sat_rtp(uint); +char __ovld __cnfn convert_char_rtn(uint); +char __ovld __cnfn convert_char_sat_rtn(uint); +char __ovld __cnfn convert_char(uint); +char __ovld __cnfn convert_char_sat(uint); +char __ovld __cnfn convert_char_rte(long); +char __ovld __cnfn convert_char_sat_rte(long); +char __ovld __cnfn convert_char_rtz(long); +char __ovld __cnfn convert_char_sat_rtz(long); +char __ovld __cnfn convert_char_rtp(long); +char __ovld __cnfn convert_char_sat_rtp(long); +char __ovld __cnfn convert_char_rtn(long); +char __ovld __cnfn convert_char_sat_rtn(long); +char __ovld __cnfn convert_char(long); +char __ovld __cnfn convert_char_sat(long); +char __ovld __cnfn convert_char_rte(ulong); +char __ovld __cnfn convert_char_sat_rte(ulong); +char __ovld __cnfn convert_char_rtz(ulong); +char __ovld __cnfn convert_char_sat_rtz(ulong); +char __ovld __cnfn convert_char_rtp(ulong); +char __ovld __cnfn convert_char_sat_rtp(ulong); +char __ovld __cnfn convert_char_rtn(ulong); +char __ovld __cnfn convert_char_sat_rtn(ulong); +char __ovld __cnfn convert_char(ulong); +char __ovld __cnfn convert_char_sat(ulong); +char __ovld __cnfn convert_char_rte(float); +char __ovld __cnfn convert_char_sat_rte(float); +char __ovld __cnfn convert_char_rtz(float); +char __ovld __cnfn convert_char_sat_rtz(float); +char __ovld __cnfn convert_char_rtp(float); +char __ovld __cnfn convert_char_sat_rtp(float); +char __ovld __cnfn convert_char_rtn(float); +char __ovld __cnfn convert_char_sat_rtn(float); +char __ovld __cnfn convert_char(float); +char __ovld __cnfn convert_char_sat(float); +uchar __ovld __cnfn convert_uchar_rte(char); +uchar __ovld __cnfn convert_uchar_sat_rte(char); +uchar __ovld __cnfn convert_uchar_rtz(char); +uchar __ovld __cnfn convert_uchar_sat_rtz(char); +uchar __ovld __cnfn convert_uchar_rtp(char); +uchar __ovld __cnfn convert_uchar_sat_rtp(char); +uchar __ovld __cnfn convert_uchar_rtn(char); +uchar __ovld __cnfn convert_uchar_sat_rtn(char); +uchar __ovld __cnfn convert_uchar(char); +uchar __ovld __cnfn convert_uchar_sat(char); +uchar __ovld __cnfn convert_uchar_rte(uchar); +uchar __ovld __cnfn convert_uchar_sat_rte(uchar); +uchar __ovld __cnfn convert_uchar_rtz(uchar); +uchar __ovld __cnfn convert_uchar_sat_rtz(uchar); +uchar __ovld __cnfn convert_uchar_rtp(uchar); +uchar __ovld __cnfn convert_uchar_sat_rtp(uchar); +uchar __ovld __cnfn convert_uchar_rtn(uchar); +uchar __ovld __cnfn convert_uchar_sat_rtn(uchar); +uchar __ovld __cnfn convert_uchar(uchar); +uchar __ovld __cnfn convert_uchar_sat(uchar); +uchar __ovld __cnfn convert_uchar_rte(short); +uchar __ovld __cnfn convert_uchar_sat_rte(short); +uchar __ovld __cnfn convert_uchar_rtz(short); +uchar __ovld __cnfn convert_uchar_sat_rtz(short); +uchar __ovld __cnfn convert_uchar_rtp(short); +uchar __ovld __cnfn convert_uchar_sat_rtp(short); +uchar __ovld __cnfn convert_uchar_rtn(short); +uchar __ovld __cnfn convert_uchar_sat_rtn(short); +uchar __ovld __cnfn convert_uchar(short); +uchar __ovld __cnfn convert_uchar_sat(short); +uchar __ovld __cnfn convert_uchar_rte(ushort); +uchar __ovld __cnfn convert_uchar_sat_rte(ushort); +uchar __ovld __cnfn convert_uchar_rtz(ushort); +uchar __ovld __cnfn convert_uchar_sat_rtz(ushort); +uchar __ovld __cnfn convert_uchar_rtp(ushort); +uchar __ovld __cnfn convert_uchar_sat_rtp(ushort); +uchar __ovld __cnfn convert_uchar_rtn(ushort); +uchar __ovld __cnfn convert_uchar_sat_rtn(ushort); +uchar __ovld __cnfn convert_uchar(ushort); +uchar __ovld __cnfn convert_uchar_sat(ushort); +uchar __ovld __cnfn convert_uchar_rte(int); +uchar __ovld __cnfn convert_uchar_sat_rte(int); +uchar __ovld __cnfn convert_uchar_rtz(int); +uchar __ovld __cnfn convert_uchar_sat_rtz(int); +uchar __ovld __cnfn convert_uchar_rtp(int); +uchar __ovld __cnfn convert_uchar_sat_rtp(int); +uchar __ovld __cnfn convert_uchar_rtn(int); +uchar __ovld __cnfn convert_uchar_sat_rtn(int); +uchar __ovld __cnfn convert_uchar(int); +uchar __ovld __cnfn convert_uchar_sat(int); +uchar __ovld __cnfn convert_uchar_rte(uint); +uchar __ovld __cnfn convert_uchar_sat_rte(uint); +uchar __ovld __cnfn convert_uchar_rtz(uint); +uchar __ovld __cnfn convert_uchar_sat_rtz(uint); +uchar __ovld __cnfn convert_uchar_rtp(uint); +uchar __ovld __cnfn convert_uchar_sat_rtp(uint); +uchar __ovld __cnfn convert_uchar_rtn(uint); +uchar __ovld __cnfn convert_uchar_sat_rtn(uint); +uchar __ovld __cnfn convert_uchar(uint); +uchar __ovld __cnfn convert_uchar_sat(uint); +uchar __ovld __cnfn convert_uchar_rte(long); +uchar __ovld __cnfn convert_uchar_sat_rte(long); +uchar __ovld __cnfn convert_uchar_rtz(long); +uchar __ovld __cnfn convert_uchar_sat_rtz(long); +uchar __ovld __cnfn convert_uchar_rtp(long); +uchar __ovld __cnfn convert_uchar_sat_rtp(long); +uchar __ovld __cnfn convert_uchar_rtn(long); +uchar __ovld __cnfn convert_uchar_sat_rtn(long); +uchar __ovld __cnfn convert_uchar(long); +uchar __ovld __cnfn convert_uchar_sat(long); +uchar __ovld __cnfn convert_uchar_rte(ulong); +uchar __ovld __cnfn convert_uchar_sat_rte(ulong); +uchar __ovld __cnfn convert_uchar_rtz(ulong); +uchar __ovld __cnfn convert_uchar_sat_rtz(ulong); +uchar __ovld __cnfn convert_uchar_rtp(ulong); +uchar __ovld __cnfn convert_uchar_sat_rtp(ulong); +uchar __ovld __cnfn convert_uchar_rtn(ulong); +uchar __ovld __cnfn convert_uchar_sat_rtn(ulong); +uchar __ovld __cnfn convert_uchar(ulong); +uchar __ovld __cnfn convert_uchar_sat(ulong); +uchar __ovld __cnfn convert_uchar_rte(float); +uchar __ovld __cnfn convert_uchar_sat_rte(float); +uchar __ovld __cnfn convert_uchar_rtz(float); +uchar __ovld __cnfn convert_uchar_sat_rtz(float); +uchar __ovld __cnfn convert_uchar_rtp(float); +uchar __ovld __cnfn convert_uchar_sat_rtp(float); +uchar __ovld __cnfn convert_uchar_rtn(float); +uchar __ovld __cnfn convert_uchar_sat_rtn(float); +uchar __ovld __cnfn convert_uchar(float); +uchar __ovld __cnfn convert_uchar_sat(float); + +short __ovld __cnfn convert_short_rte(char); +short __ovld __cnfn convert_short_sat_rte(char); +short __ovld __cnfn convert_short_rtz(char); +short __ovld __cnfn convert_short_sat_rtz(char); +short __ovld __cnfn convert_short_rtp(char); +short __ovld __cnfn convert_short_sat_rtp(char); +short __ovld __cnfn convert_short_rtn(char); +short __ovld __cnfn convert_short_sat_rtn(char); +short __ovld __cnfn convert_short(char); +short __ovld __cnfn convert_short_sat(char); +short __ovld __cnfn convert_short_rte(uchar); +short __ovld __cnfn convert_short_sat_rte(uchar); +short __ovld __cnfn convert_short_rtz(uchar); +short __ovld __cnfn convert_short_sat_rtz(uchar); +short __ovld __cnfn convert_short_rtp(uchar); +short __ovld __cnfn convert_short_sat_rtp(uchar); +short __ovld __cnfn convert_short_rtn(uchar); +short __ovld __cnfn convert_short_sat_rtn(uchar); +short __ovld __cnfn convert_short(uchar); +short __ovld __cnfn convert_short_sat(uchar); +short __ovld __cnfn convert_short_rte(short); +short __ovld __cnfn convert_short_sat_rte(short); +short __ovld __cnfn convert_short_rtz(short); +short __ovld __cnfn convert_short_sat_rtz(short); +short __ovld __cnfn convert_short_rtp(short); +short __ovld __cnfn convert_short_sat_rtp(short); +short __ovld __cnfn convert_short_rtn(short); +short __ovld __cnfn convert_short_sat_rtn(short); +short __ovld __cnfn convert_short(short); +short __ovld __cnfn convert_short_sat(short); +short __ovld __cnfn convert_short_rte(ushort); +short __ovld __cnfn convert_short_sat_rte(ushort); +short __ovld __cnfn convert_short_rtz(ushort); +short __ovld __cnfn convert_short_sat_rtz(ushort); +short __ovld __cnfn convert_short_rtp(ushort); +short __ovld __cnfn convert_short_sat_rtp(ushort); +short __ovld __cnfn convert_short_rtn(ushort); +short __ovld __cnfn convert_short_sat_rtn(ushort); +short __ovld __cnfn convert_short(ushort); +short __ovld __cnfn convert_short_sat(ushort); +short __ovld __cnfn convert_short_rte(int); +short __ovld __cnfn convert_short_sat_rte(int); +short __ovld __cnfn convert_short_rtz(int); +short __ovld __cnfn convert_short_sat_rtz(int); +short __ovld __cnfn convert_short_rtp(int); +short __ovld __cnfn convert_short_sat_rtp(int); +short __ovld __cnfn convert_short_rtn(int); +short __ovld __cnfn convert_short_sat_rtn(int); +short __ovld __cnfn convert_short(int); +short __ovld __cnfn convert_short_sat(int); +short __ovld __cnfn convert_short_rte(uint); +short __ovld __cnfn convert_short_sat_rte(uint); +short __ovld __cnfn convert_short_rtz(uint); +short __ovld __cnfn convert_short_sat_rtz(uint); +short __ovld __cnfn convert_short_rtp(uint); +short __ovld __cnfn convert_short_sat_rtp(uint); +short __ovld __cnfn convert_short_rtn(uint); +short __ovld __cnfn convert_short_sat_rtn(uint); +short __ovld __cnfn convert_short(uint); +short __ovld __cnfn convert_short_sat(uint); +short __ovld __cnfn convert_short_rte(long); +short __ovld __cnfn convert_short_sat_rte(long); +short __ovld __cnfn convert_short_rtz(long); +short __ovld __cnfn convert_short_sat_rtz(long); +short __ovld __cnfn convert_short_rtp(long); +short __ovld __cnfn convert_short_sat_rtp(long); +short __ovld __cnfn convert_short_rtn(long); +short __ovld __cnfn convert_short_sat_rtn(long); +short __ovld __cnfn convert_short(long); +short __ovld __cnfn convert_short_sat(long); +short __ovld __cnfn convert_short_rte(ulong); +short __ovld __cnfn convert_short_sat_rte(ulong); +short __ovld __cnfn convert_short_rtz(ulong); +short __ovld __cnfn convert_short_sat_rtz(ulong); +short __ovld __cnfn convert_short_rtp(ulong); +short __ovld __cnfn convert_short_sat_rtp(ulong); +short __ovld __cnfn convert_short_rtn(ulong); +short __ovld __cnfn convert_short_sat_rtn(ulong); +short __ovld __cnfn convert_short(ulong); +short __ovld __cnfn convert_short_sat(ulong); +short __ovld __cnfn convert_short_rte(float); +short __ovld __cnfn convert_short_sat_rte(float); +short __ovld __cnfn convert_short_rtz(float); +short __ovld __cnfn convert_short_sat_rtz(float); +short __ovld __cnfn convert_short_rtp(float); +short __ovld __cnfn convert_short_sat_rtp(float); +short __ovld __cnfn convert_short_rtn(float); +short __ovld __cnfn convert_short_sat_rtn(float); +short __ovld __cnfn convert_short(float); +short __ovld __cnfn convert_short_sat(float); +ushort __ovld __cnfn convert_ushort_rte(char); +ushort __ovld __cnfn convert_ushort_sat_rte(char); +ushort __ovld __cnfn convert_ushort_rtz(char); +ushort __ovld __cnfn convert_ushort_sat_rtz(char); +ushort __ovld __cnfn convert_ushort_rtp(char); +ushort __ovld __cnfn convert_ushort_sat_rtp(char); +ushort __ovld __cnfn convert_ushort_rtn(char); +ushort __ovld __cnfn convert_ushort_sat_rtn(char); +ushort __ovld __cnfn convert_ushort(char); +ushort __ovld __cnfn convert_ushort_sat(char); +ushort __ovld __cnfn convert_ushort_rte(uchar); +ushort __ovld __cnfn convert_ushort_sat_rte(uchar); +ushort __ovld __cnfn convert_ushort_rtz(uchar); +ushort __ovld __cnfn convert_ushort_sat_rtz(uchar); +ushort __ovld __cnfn convert_ushort_rtp(uchar); +ushort __ovld __cnfn convert_ushort_sat_rtp(uchar); +ushort __ovld __cnfn convert_ushort_rtn(uchar); +ushort __ovld __cnfn convert_ushort_sat_rtn(uchar); +ushort __ovld __cnfn convert_ushort(uchar); +ushort __ovld __cnfn convert_ushort_sat(uchar); +ushort __ovld __cnfn convert_ushort_rte(short); +ushort __ovld __cnfn convert_ushort_sat_rte(short); +ushort __ovld __cnfn convert_ushort_rtz(short); +ushort __ovld __cnfn convert_ushort_sat_rtz(short); +ushort __ovld __cnfn convert_ushort_rtp(short); +ushort __ovld __cnfn convert_ushort_sat_rtp(short); +ushort __ovld __cnfn convert_ushort_rtn(short); +ushort __ovld __cnfn convert_ushort_sat_rtn(short); +ushort __ovld __cnfn convert_ushort(short); +ushort __ovld __cnfn convert_ushort_sat(short); +ushort __ovld __cnfn convert_ushort_rte(ushort); +ushort __ovld __cnfn convert_ushort_sat_rte(ushort); +ushort __ovld __cnfn convert_ushort_rtz(ushort); +ushort __ovld __cnfn convert_ushort_sat_rtz(ushort); +ushort __ovld __cnfn convert_ushort_rtp(ushort); +ushort __ovld __cnfn convert_ushort_sat_rtp(ushort); +ushort __ovld __cnfn convert_ushort_rtn(ushort); +ushort __ovld __cnfn convert_ushort_sat_rtn(ushort); +ushort __ovld __cnfn convert_ushort(ushort); +ushort __ovld __cnfn convert_ushort_sat(ushort); +ushort __ovld __cnfn convert_ushort_rte(int); +ushort __ovld __cnfn convert_ushort_sat_rte(int); +ushort __ovld __cnfn convert_ushort_rtz(int); +ushort __ovld __cnfn convert_ushort_sat_rtz(int); +ushort __ovld __cnfn convert_ushort_rtp(int); +ushort __ovld __cnfn convert_ushort_sat_rtp(int); +ushort __ovld __cnfn convert_ushort_rtn(int); +ushort __ovld __cnfn convert_ushort_sat_rtn(int); +ushort __ovld __cnfn convert_ushort(int); +ushort __ovld __cnfn convert_ushort_sat(int); +ushort __ovld __cnfn convert_ushort_rte(uint); +ushort __ovld __cnfn convert_ushort_sat_rte(uint); +ushort __ovld __cnfn convert_ushort_rtz(uint); +ushort __ovld __cnfn convert_ushort_sat_rtz(uint); +ushort __ovld __cnfn convert_ushort_rtp(uint); +ushort __ovld __cnfn convert_ushort_sat_rtp(uint); +ushort __ovld __cnfn convert_ushort_rtn(uint); +ushort __ovld __cnfn convert_ushort_sat_rtn(uint); +ushort __ovld __cnfn convert_ushort(uint); +ushort __ovld __cnfn convert_ushort_sat(uint); +ushort __ovld __cnfn convert_ushort_rte(long); +ushort __ovld __cnfn convert_ushort_sat_rte(long); +ushort __ovld __cnfn convert_ushort_rtz(long); +ushort __ovld __cnfn convert_ushort_sat_rtz(long); +ushort __ovld __cnfn convert_ushort_rtp(long); +ushort __ovld __cnfn convert_ushort_sat_rtp(long); +ushort __ovld __cnfn convert_ushort_rtn(long); +ushort __ovld __cnfn convert_ushort_sat_rtn(long); +ushort __ovld __cnfn convert_ushort(long); +ushort __ovld __cnfn convert_ushort_sat(long); +ushort __ovld __cnfn convert_ushort_rte(ulong); +ushort __ovld __cnfn convert_ushort_sat_rte(ulong); +ushort __ovld __cnfn convert_ushort_rtz(ulong); +ushort __ovld __cnfn convert_ushort_sat_rtz(ulong); +ushort __ovld __cnfn convert_ushort_rtp(ulong); +ushort __ovld __cnfn convert_ushort_sat_rtp(ulong); +ushort __ovld __cnfn convert_ushort_rtn(ulong); +ushort __ovld __cnfn convert_ushort_sat_rtn(ulong); +ushort __ovld __cnfn convert_ushort(ulong); +ushort __ovld __cnfn convert_ushort_sat(ulong); +ushort __ovld __cnfn convert_ushort_rte(float); +ushort __ovld __cnfn convert_ushort_sat_rte(float); +ushort __ovld __cnfn convert_ushort_rtz(float); +ushort __ovld __cnfn convert_ushort_sat_rtz(float); +ushort __ovld __cnfn convert_ushort_rtp(float); +ushort __ovld __cnfn convert_ushort_sat_rtp(float); +ushort __ovld __cnfn convert_ushort_rtn(float); +ushort __ovld __cnfn convert_ushort_sat_rtn(float); +ushort __ovld __cnfn convert_ushort(float); +ushort __ovld __cnfn convert_ushort_sat(float); +int __ovld __cnfn convert_int_rte(char); +int __ovld __cnfn convert_int_sat_rte(char); +int __ovld __cnfn convert_int_rtz(char); +int __ovld __cnfn convert_int_sat_rtz(char); +int __ovld __cnfn convert_int_rtp(char); +int __ovld __cnfn convert_int_sat_rtp(char); +int __ovld __cnfn convert_int_rtn(char); +int __ovld __cnfn convert_int_sat_rtn(char); +int __ovld __cnfn convert_int(char); +int __ovld __cnfn convert_int_sat(char); +int __ovld __cnfn convert_int_rte(uchar); +int __ovld __cnfn convert_int_sat_rte(uchar); +int __ovld __cnfn convert_int_rtz(uchar); +int __ovld __cnfn convert_int_sat_rtz(uchar); +int __ovld __cnfn convert_int_rtp(uchar); +int __ovld __cnfn convert_int_sat_rtp(uchar); +int __ovld __cnfn convert_int_rtn(uchar); +int __ovld __cnfn convert_int_sat_rtn(uchar); +int __ovld __cnfn convert_int(uchar); +int __ovld __cnfn convert_int_sat(uchar); +int __ovld __cnfn convert_int_rte(short); +int __ovld __cnfn convert_int_sat_rte(short); +int __ovld __cnfn convert_int_rtz(short); +int __ovld __cnfn convert_int_sat_rtz(short); +int __ovld __cnfn convert_int_rtp(short); +int __ovld __cnfn convert_int_sat_rtp(short); +int __ovld __cnfn convert_int_rtn(short); +int __ovld __cnfn convert_int_sat_rtn(short); +int __ovld __cnfn convert_int(short); +int __ovld __cnfn convert_int_sat(short); +int __ovld __cnfn convert_int_rte(ushort); +int __ovld __cnfn convert_int_sat_rte(ushort); +int __ovld __cnfn convert_int_rtz(ushort); +int __ovld __cnfn convert_int_sat_rtz(ushort); +int __ovld __cnfn convert_int_rtp(ushort); +int __ovld __cnfn convert_int_sat_rtp(ushort); +int __ovld __cnfn convert_int_rtn(ushort); +int __ovld __cnfn convert_int_sat_rtn(ushort); +int __ovld __cnfn convert_int(ushort); +int __ovld __cnfn convert_int_sat(ushort); +int __ovld __cnfn convert_int_rte(int); +int __ovld __cnfn convert_int_sat_rte(int); +int __ovld __cnfn convert_int_rtz(int); +int __ovld __cnfn convert_int_sat_rtz(int); +int __ovld __cnfn convert_int_rtp(int); +int __ovld __cnfn convert_int_sat_rtp(int); +int __ovld __cnfn convert_int_rtn(int); +int __ovld __cnfn convert_int_sat_rtn(int); +int __ovld __cnfn convert_int(int); +int __ovld __cnfn convert_int_sat(int); +int __ovld __cnfn convert_int_rte(uint); +int __ovld __cnfn convert_int_sat_rte(uint); +int __ovld __cnfn convert_int_rtz(uint); +int __ovld __cnfn convert_int_sat_rtz(uint); +int __ovld __cnfn convert_int_rtp(uint); +int __ovld __cnfn convert_int_sat_rtp(uint); +int __ovld __cnfn convert_int_rtn(uint); +int __ovld __cnfn convert_int_sat_rtn(uint); +int __ovld __cnfn convert_int(uint); +int __ovld __cnfn convert_int_sat(uint); +int __ovld __cnfn convert_int_rte(long); +int __ovld __cnfn convert_int_sat_rte(long); +int __ovld __cnfn convert_int_rtz(long); +int __ovld __cnfn convert_int_sat_rtz(long); +int __ovld __cnfn convert_int_rtp(long); +int __ovld __cnfn convert_int_sat_rtp(long); +int __ovld __cnfn convert_int_rtn(long); +int __ovld __cnfn convert_int_sat_rtn(long); +int __ovld __cnfn convert_int(long); +int __ovld __cnfn convert_int_sat(long); +int __ovld __cnfn convert_int_rte(ulong); +int __ovld __cnfn convert_int_sat_rte(ulong); +int __ovld __cnfn convert_int_rtz(ulong); +int __ovld __cnfn convert_int_sat_rtz(ulong); +int __ovld __cnfn convert_int_rtp(ulong); +int __ovld __cnfn convert_int_sat_rtp(ulong); +int __ovld __cnfn convert_int_rtn(ulong); +int __ovld __cnfn convert_int_sat_rtn(ulong); +int __ovld __cnfn convert_int(ulong); +int __ovld __cnfn convert_int_sat(ulong); +int __ovld __cnfn convert_int_rte(float); +int __ovld __cnfn convert_int_sat_rte(float); +int __ovld __cnfn convert_int_rtz(float); +int __ovld __cnfn convert_int_sat_rtz(float); +int __ovld __cnfn convert_int_rtp(float); +int __ovld __cnfn convert_int_sat_rtp(float); +int __ovld __cnfn convert_int_rtn(float); +int __ovld __cnfn convert_int_sat_rtn(float); +int __ovld __cnfn convert_int(float); +int __ovld __cnfn convert_int_sat(float); +uint __ovld __cnfn convert_uint_rte(char); +uint __ovld __cnfn convert_uint_sat_rte(char); +uint __ovld __cnfn convert_uint_rtz(char); +uint __ovld __cnfn convert_uint_sat_rtz(char); +uint __ovld __cnfn convert_uint_rtp(char); +uint __ovld __cnfn convert_uint_sat_rtp(char); +uint __ovld __cnfn convert_uint_rtn(char); +uint __ovld __cnfn convert_uint_sat_rtn(char); +uint __ovld __cnfn convert_uint(char); +uint __ovld __cnfn convert_uint_sat(char); +uint __ovld __cnfn convert_uint_rte(uchar); +uint __ovld __cnfn convert_uint_sat_rte(uchar); +uint __ovld __cnfn convert_uint_rtz(uchar); +uint __ovld __cnfn convert_uint_sat_rtz(uchar); +uint __ovld __cnfn convert_uint_rtp(uchar); +uint __ovld __cnfn convert_uint_sat_rtp(uchar); +uint __ovld __cnfn convert_uint_rtn(uchar); +uint __ovld __cnfn convert_uint_sat_rtn(uchar); +uint __ovld __cnfn convert_uint(uchar); +uint __ovld __cnfn convert_uint_sat(uchar); +uint __ovld __cnfn convert_uint_rte(short); +uint __ovld __cnfn convert_uint_sat_rte(short); +uint __ovld __cnfn convert_uint_rtz(short); +uint __ovld __cnfn convert_uint_sat_rtz(short); +uint __ovld __cnfn convert_uint_rtp(short); +uint __ovld __cnfn convert_uint_sat_rtp(short); +uint __ovld __cnfn convert_uint_rtn(short); +uint __ovld __cnfn convert_uint_sat_rtn(short); +uint __ovld __cnfn convert_uint(short); +uint __ovld __cnfn convert_uint_sat(short); +uint __ovld __cnfn convert_uint_rte(ushort); +uint __ovld __cnfn convert_uint_sat_rte(ushort); +uint __ovld __cnfn convert_uint_rtz(ushort); +uint __ovld __cnfn convert_uint_sat_rtz(ushort); +uint __ovld __cnfn convert_uint_rtp(ushort); +uint __ovld __cnfn convert_uint_sat_rtp(ushort); +uint __ovld __cnfn convert_uint_rtn(ushort); +uint __ovld __cnfn convert_uint_sat_rtn(ushort); +uint __ovld __cnfn convert_uint(ushort); +uint __ovld __cnfn convert_uint_sat(ushort); +uint __ovld __cnfn convert_uint_rte(int); +uint __ovld __cnfn convert_uint_sat_rte(int); +uint __ovld __cnfn convert_uint_rtz(int); +uint __ovld __cnfn convert_uint_sat_rtz(int); +uint __ovld __cnfn convert_uint_rtp(int); +uint __ovld __cnfn convert_uint_sat_rtp(int); +uint __ovld __cnfn convert_uint_rtn(int); +uint __ovld __cnfn convert_uint_sat_rtn(int); +uint __ovld __cnfn convert_uint(int); +uint __ovld __cnfn convert_uint_sat(int); +uint __ovld __cnfn convert_uint_rte(uint); +uint __ovld __cnfn convert_uint_sat_rte(uint); +uint __ovld __cnfn convert_uint_rtz(uint); +uint __ovld __cnfn convert_uint_sat_rtz(uint); +uint __ovld __cnfn convert_uint_rtp(uint); +uint __ovld __cnfn convert_uint_sat_rtp(uint); +uint __ovld __cnfn convert_uint_rtn(uint); +uint __ovld __cnfn convert_uint_sat_rtn(uint); +uint __ovld __cnfn convert_uint(uint); +uint __ovld __cnfn convert_uint_sat(uint); +uint __ovld __cnfn convert_uint_rte(long); +uint __ovld __cnfn convert_uint_sat_rte(long); +uint __ovld __cnfn convert_uint_rtz(long); +uint __ovld __cnfn convert_uint_sat_rtz(long); +uint __ovld __cnfn convert_uint_rtp(long); +uint __ovld __cnfn convert_uint_sat_rtp(long); +uint __ovld __cnfn convert_uint_rtn(long); +uint __ovld __cnfn convert_uint_sat_rtn(long); +uint __ovld __cnfn convert_uint(long); +uint __ovld __cnfn convert_uint_sat(long); +uint __ovld __cnfn convert_uint_rte(ulong); +uint __ovld __cnfn convert_uint_sat_rte(ulong); +uint __ovld __cnfn convert_uint_rtz(ulong); +uint __ovld __cnfn convert_uint_sat_rtz(ulong); +uint __ovld __cnfn convert_uint_rtp(ulong); +uint __ovld __cnfn convert_uint_sat_rtp(ulong); +uint __ovld __cnfn convert_uint_rtn(ulong); +uint __ovld __cnfn convert_uint_sat_rtn(ulong); +uint __ovld __cnfn convert_uint(ulong); +uint __ovld __cnfn convert_uint_sat(ulong); +uint __ovld __cnfn convert_uint_rte(float); +uint __ovld __cnfn convert_uint_sat_rte(float); +uint __ovld __cnfn convert_uint_rtz(float); +uint __ovld __cnfn convert_uint_sat_rtz(float); +uint __ovld __cnfn convert_uint_rtp(float); +uint __ovld __cnfn convert_uint_sat_rtp(float); +uint __ovld __cnfn convert_uint_rtn(float); +uint __ovld __cnfn convert_uint_sat_rtn(float); +uint __ovld __cnfn convert_uint(float); +uint __ovld __cnfn convert_uint_sat(float); +long __ovld __cnfn convert_long_rte(char); +long __ovld __cnfn convert_long_sat_rte(char); +long __ovld __cnfn convert_long_rtz(char); +long __ovld __cnfn convert_long_sat_rtz(char); +long __ovld __cnfn convert_long_rtp(char); +long __ovld __cnfn convert_long_sat_rtp(char); +long __ovld __cnfn convert_long_rtn(char); +long __ovld __cnfn convert_long_sat_rtn(char); +long __ovld __cnfn convert_long(char); +long __ovld __cnfn convert_long_sat(char); +long __ovld __cnfn convert_long_rte(uchar); +long __ovld __cnfn convert_long_sat_rte(uchar); +long __ovld __cnfn convert_long_rtz(uchar); +long __ovld __cnfn convert_long_sat_rtz(uchar); +long __ovld __cnfn convert_long_rtp(uchar); +long __ovld __cnfn convert_long_sat_rtp(uchar); +long __ovld __cnfn convert_long_rtn(uchar); +long __ovld __cnfn convert_long_sat_rtn(uchar); +long __ovld __cnfn convert_long(uchar); +long __ovld __cnfn convert_long_sat(uchar); +long __ovld __cnfn convert_long_rte(short); +long __ovld __cnfn convert_long_sat_rte(short); +long __ovld __cnfn convert_long_rtz(short); +long __ovld __cnfn convert_long_sat_rtz(short); +long __ovld __cnfn convert_long_rtp(short); +long __ovld __cnfn convert_long_sat_rtp(short); +long __ovld __cnfn convert_long_rtn(short); +long __ovld __cnfn convert_long_sat_rtn(short); +long __ovld __cnfn convert_long(short); +long __ovld __cnfn convert_long_sat(short); +long __ovld __cnfn convert_long_rte(ushort); +long __ovld __cnfn convert_long_sat_rte(ushort); +long __ovld __cnfn convert_long_rtz(ushort); +long __ovld __cnfn convert_long_sat_rtz(ushort); +long __ovld __cnfn convert_long_rtp(ushort); +long __ovld __cnfn convert_long_sat_rtp(ushort); +long __ovld __cnfn convert_long_rtn(ushort); +long __ovld __cnfn convert_long_sat_rtn(ushort); +long __ovld __cnfn convert_long(ushort); +long __ovld __cnfn convert_long_sat(ushort); +long __ovld __cnfn convert_long_rte(int); +long __ovld __cnfn convert_long_sat_rte(int); +long __ovld __cnfn convert_long_rtz(int); +long __ovld __cnfn convert_long_sat_rtz(int); +long __ovld __cnfn convert_long_rtp(int); +long __ovld __cnfn convert_long_sat_rtp(int); +long __ovld __cnfn convert_long_rtn(int); +long __ovld __cnfn convert_long_sat_rtn(int); +long __ovld __cnfn convert_long(int); +long __ovld __cnfn convert_long_sat(int); +long __ovld __cnfn convert_long_rte(uint); +long __ovld __cnfn convert_long_sat_rte(uint); +long __ovld __cnfn convert_long_rtz(uint); +long __ovld __cnfn convert_long_sat_rtz(uint); +long __ovld __cnfn convert_long_rtp(uint); +long __ovld __cnfn convert_long_sat_rtp(uint); +long __ovld __cnfn convert_long_rtn(uint); +long __ovld __cnfn convert_long_sat_rtn(uint); +long __ovld __cnfn convert_long(uint); +long __ovld __cnfn convert_long_sat(uint); +long __ovld __cnfn convert_long_rte(long); +long __ovld __cnfn convert_long_sat_rte(long); +long __ovld __cnfn convert_long_rtz(long); +long __ovld __cnfn convert_long_sat_rtz(long); +long __ovld __cnfn convert_long_rtp(long); +long __ovld __cnfn convert_long_sat_rtp(long); +long __ovld __cnfn convert_long_rtn(long); +long __ovld __cnfn convert_long_sat_rtn(long); +long __ovld __cnfn convert_long(long); +long __ovld __cnfn convert_long_sat(long); +long __ovld __cnfn convert_long_rte(ulong); +long __ovld __cnfn convert_long_sat_rte(ulong); +long __ovld __cnfn convert_long_rtz(ulong); +long __ovld __cnfn convert_long_sat_rtz(ulong); +long __ovld __cnfn convert_long_rtp(ulong); +long __ovld __cnfn convert_long_sat_rtp(ulong); +long __ovld __cnfn convert_long_rtn(ulong); +long __ovld __cnfn convert_long_sat_rtn(ulong); +long __ovld __cnfn convert_long(ulong); +long __ovld __cnfn convert_long_sat(ulong); +long __ovld __cnfn convert_long_rte(float); +long __ovld __cnfn convert_long_sat_rte(float); +long __ovld __cnfn convert_long_rtz(float); +long __ovld __cnfn convert_long_sat_rtz(float); +long __ovld __cnfn convert_long_rtp(float); +long __ovld __cnfn convert_long_sat_rtp(float); +long __ovld __cnfn convert_long_rtn(float); +long __ovld __cnfn convert_long_sat_rtn(float); +long __ovld __cnfn convert_long(float); +long __ovld __cnfn convert_long_sat(float); +ulong __ovld __cnfn convert_ulong_rte(char); +ulong __ovld __cnfn convert_ulong_sat_rte(char); +ulong __ovld __cnfn convert_ulong_rtz(char); +ulong __ovld __cnfn convert_ulong_sat_rtz(char); +ulong __ovld __cnfn convert_ulong_rtp(char); +ulong __ovld __cnfn convert_ulong_sat_rtp(char); +ulong __ovld __cnfn convert_ulong_rtn(char); +ulong __ovld __cnfn convert_ulong_sat_rtn(char); +ulong __ovld __cnfn convert_ulong(char); +ulong __ovld __cnfn convert_ulong_sat(char); +ulong __ovld __cnfn convert_ulong_rte(uchar); +ulong __ovld __cnfn convert_ulong_sat_rte(uchar); +ulong __ovld __cnfn convert_ulong_rtz(uchar); +ulong __ovld __cnfn convert_ulong_sat_rtz(uchar); +ulong __ovld __cnfn convert_ulong_rtp(uchar); +ulong __ovld __cnfn convert_ulong_sat_rtp(uchar); +ulong __ovld __cnfn convert_ulong_rtn(uchar); +ulong __ovld __cnfn convert_ulong_sat_rtn(uchar); +ulong __ovld __cnfn convert_ulong(uchar); +ulong __ovld __cnfn convert_ulong_sat(uchar); +ulong __ovld __cnfn convert_ulong_rte(short); +ulong __ovld __cnfn convert_ulong_sat_rte(short); +ulong __ovld __cnfn convert_ulong_rtz(short); +ulong __ovld __cnfn convert_ulong_sat_rtz(short); +ulong __ovld __cnfn convert_ulong_rtp(short); +ulong __ovld __cnfn convert_ulong_sat_rtp(short); +ulong __ovld __cnfn convert_ulong_rtn(short); +ulong __ovld __cnfn convert_ulong_sat_rtn(short); +ulong __ovld __cnfn convert_ulong(short); +ulong __ovld __cnfn convert_ulong_sat(short); +ulong __ovld __cnfn convert_ulong_rte(ushort); +ulong __ovld __cnfn convert_ulong_sat_rte(ushort); +ulong __ovld __cnfn convert_ulong_rtz(ushort); +ulong __ovld __cnfn convert_ulong_sat_rtz(ushort); +ulong __ovld __cnfn convert_ulong_rtp(ushort); +ulong __ovld __cnfn convert_ulong_sat_rtp(ushort); +ulong __ovld __cnfn convert_ulong_rtn(ushort); +ulong __ovld __cnfn convert_ulong_sat_rtn(ushort); +ulong __ovld __cnfn convert_ulong(ushort); +ulong __ovld __cnfn convert_ulong_sat(ushort); +ulong __ovld __cnfn convert_ulong_rte(int); +ulong __ovld __cnfn convert_ulong_sat_rte(int); +ulong __ovld __cnfn convert_ulong_rtz(int); +ulong __ovld __cnfn convert_ulong_sat_rtz(int); +ulong __ovld __cnfn convert_ulong_rtp(int); +ulong __ovld __cnfn convert_ulong_sat_rtp(int); +ulong __ovld __cnfn convert_ulong_rtn(int); +ulong __ovld __cnfn convert_ulong_sat_rtn(int); +ulong __ovld __cnfn convert_ulong(int); +ulong __ovld __cnfn convert_ulong_sat(int); +ulong __ovld __cnfn convert_ulong_rte(uint); +ulong __ovld __cnfn convert_ulong_sat_rte(uint); +ulong __ovld __cnfn convert_ulong_rtz(uint); +ulong __ovld __cnfn convert_ulong_sat_rtz(uint); +ulong __ovld __cnfn convert_ulong_rtp(uint); +ulong __ovld __cnfn convert_ulong_sat_rtp(uint); +ulong __ovld __cnfn convert_ulong_rtn(uint); +ulong __ovld __cnfn convert_ulong_sat_rtn(uint); +ulong __ovld __cnfn convert_ulong(uint); +ulong __ovld __cnfn convert_ulong_sat(uint); +ulong __ovld __cnfn convert_ulong_rte(long); +ulong __ovld __cnfn convert_ulong_sat_rte(long); +ulong __ovld __cnfn convert_ulong_rtz(long); +ulong __ovld __cnfn convert_ulong_sat_rtz(long); +ulong __ovld __cnfn convert_ulong_rtp(long); +ulong __ovld __cnfn convert_ulong_sat_rtp(long); +ulong __ovld __cnfn convert_ulong_rtn(long); +ulong __ovld __cnfn convert_ulong_sat_rtn(long); +ulong __ovld __cnfn convert_ulong(long); +ulong __ovld __cnfn convert_ulong_sat(long); +ulong __ovld __cnfn convert_ulong_rte(ulong); +ulong __ovld __cnfn convert_ulong_sat_rte(ulong); +ulong __ovld __cnfn convert_ulong_rtz(ulong); +ulong __ovld __cnfn convert_ulong_sat_rtz(ulong); +ulong __ovld __cnfn convert_ulong_rtp(ulong); +ulong __ovld __cnfn convert_ulong_sat_rtp(ulong); +ulong __ovld __cnfn convert_ulong_rtn(ulong); +ulong __ovld __cnfn convert_ulong_sat_rtn(ulong); +ulong __ovld __cnfn convert_ulong(ulong); +ulong __ovld __cnfn convert_ulong_sat(ulong); +ulong __ovld __cnfn convert_ulong_rte(float); +ulong __ovld __cnfn convert_ulong_sat_rte(float); +ulong __ovld __cnfn convert_ulong_rtz(float); +ulong __ovld __cnfn convert_ulong_sat_rtz(float); +ulong __ovld __cnfn convert_ulong_rtp(float); +ulong __ovld __cnfn convert_ulong_sat_rtp(float); +ulong __ovld __cnfn convert_ulong_rtn(float); +ulong __ovld __cnfn convert_ulong_sat_rtn(float); +ulong __ovld __cnfn convert_ulong(float); +ulong __ovld __cnfn convert_ulong_sat(float); +float __ovld __cnfn convert_float_rte(char); +float __ovld __cnfn convert_float_rtz(char); +float __ovld __cnfn convert_float_rtp(char); +float __ovld __cnfn convert_float_rtn(char); +float __ovld __cnfn convert_float(char); +float __ovld __cnfn convert_float_rte(uchar); +float __ovld __cnfn convert_float_rtz(uchar); +float __ovld __cnfn convert_float_rtp(uchar); +float __ovld __cnfn convert_float_rtn(uchar); +float __ovld __cnfn convert_float(uchar); +float __ovld __cnfn convert_float_rte(short); +float __ovld __cnfn convert_float_rtz(short); +float __ovld __cnfn convert_float_rtp(short); +float __ovld __cnfn convert_float_rtn(short); +float __ovld __cnfn convert_float(short); +float __ovld __cnfn convert_float_rte(ushort); +float __ovld __cnfn convert_float_rtz(ushort); +float __ovld __cnfn convert_float_rtp(ushort); +float __ovld __cnfn convert_float_rtn(ushort); +float __ovld __cnfn convert_float(ushort); +float __ovld __cnfn convert_float_rte(int); +float __ovld __cnfn convert_float_rtz(int); +float __ovld __cnfn convert_float_rtp(int); +float __ovld __cnfn convert_float_rtn(int); +float __ovld __cnfn convert_float(int); +float __ovld __cnfn convert_float_rte(uint); +float __ovld __cnfn convert_float_rtz(uint); +float __ovld __cnfn convert_float_rtp(uint); +float __ovld __cnfn convert_float_rtn(uint); +float __ovld __cnfn convert_float(uint); +float __ovld __cnfn convert_float_rte(long); +float __ovld __cnfn convert_float_rtz(long); +float __ovld __cnfn convert_float_rtp(long); +float __ovld __cnfn convert_float_rtn(long); +float __ovld __cnfn convert_float(long); +float __ovld __cnfn convert_float_rte(ulong); +float __ovld __cnfn convert_float_rtz(ulong); +float __ovld __cnfn convert_float_rtp(ulong); +float __ovld __cnfn convert_float_rtn(ulong); +float __ovld __cnfn convert_float(ulong); +float __ovld __cnfn convert_float_rte(float); +float __ovld __cnfn convert_float_rtz(float); +float __ovld __cnfn convert_float_rtp(float); +float __ovld __cnfn convert_float_rtn(float); +float __ovld __cnfn convert_float(float); +char2 __ovld __cnfn convert_char2_rte(char2); +char2 __ovld __cnfn convert_char2_sat_rte(char2); +char2 __ovld __cnfn convert_char2_rtz(char2); +char2 __ovld __cnfn convert_char2_sat_rtz(char2); +char2 __ovld __cnfn convert_char2_rtp(char2); +char2 __ovld __cnfn convert_char2_sat_rtp(char2); +char2 __ovld __cnfn convert_char2_rtn(char2); +char2 __ovld __cnfn convert_char2_sat_rtn(char2); +char2 __ovld __cnfn convert_char2(char2); +char2 __ovld __cnfn convert_char2_sat(char2); +char2 __ovld __cnfn convert_char2_rte(uchar2); +char2 __ovld __cnfn convert_char2_sat_rte(uchar2); +char2 __ovld __cnfn convert_char2_rtz(uchar2); +char2 __ovld __cnfn convert_char2_sat_rtz(uchar2); +char2 __ovld __cnfn convert_char2_rtp(uchar2); +char2 __ovld __cnfn convert_char2_sat_rtp(uchar2); +char2 __ovld __cnfn convert_char2_rtn(uchar2); +char2 __ovld __cnfn convert_char2_sat_rtn(uchar2); +char2 __ovld __cnfn convert_char2(uchar2); +char2 __ovld __cnfn convert_char2_sat(uchar2); +char2 __ovld __cnfn convert_char2_rte(short2); +char2 __ovld __cnfn convert_char2_sat_rte(short2); +char2 __ovld __cnfn convert_char2_rtz(short2); +char2 __ovld __cnfn convert_char2_sat_rtz(short2); +char2 __ovld __cnfn convert_char2_rtp(short2); +char2 __ovld __cnfn convert_char2_sat_rtp(short2); +char2 __ovld __cnfn convert_char2_rtn(short2); +char2 __ovld __cnfn convert_char2_sat_rtn(short2); +char2 __ovld __cnfn convert_char2(short2); +char2 __ovld __cnfn convert_char2_sat(short2); +char2 __ovld __cnfn convert_char2_rte(ushort2); +char2 __ovld __cnfn convert_char2_sat_rte(ushort2); +char2 __ovld __cnfn convert_char2_rtz(ushort2); +char2 __ovld __cnfn convert_char2_sat_rtz(ushort2); +char2 __ovld __cnfn convert_char2_rtp(ushort2); +char2 __ovld __cnfn convert_char2_sat_rtp(ushort2); +char2 __ovld __cnfn convert_char2_rtn(ushort2); +char2 __ovld __cnfn convert_char2_sat_rtn(ushort2); +char2 __ovld __cnfn convert_char2(ushort2); +char2 __ovld __cnfn convert_char2_sat(ushort2); +char2 __ovld __cnfn convert_char2_rte(int2); +char2 __ovld __cnfn convert_char2_sat_rte(int2); +char2 __ovld __cnfn convert_char2_rtz(int2); +char2 __ovld __cnfn convert_char2_sat_rtz(int2); +char2 __ovld __cnfn convert_char2_rtp(int2); +char2 __ovld __cnfn convert_char2_sat_rtp(int2); +char2 __ovld __cnfn convert_char2_rtn(int2); +char2 __ovld __cnfn convert_char2_sat_rtn(int2); +char2 __ovld __cnfn convert_char2(int2); +char2 __ovld __cnfn convert_char2_sat(int2); +char2 __ovld __cnfn convert_char2_rte(uint2); +char2 __ovld __cnfn convert_char2_sat_rte(uint2); +char2 __ovld __cnfn convert_char2_rtz(uint2); +char2 __ovld __cnfn convert_char2_sat_rtz(uint2); +char2 __ovld __cnfn convert_char2_rtp(uint2); +char2 __ovld __cnfn convert_char2_sat_rtp(uint2); +char2 __ovld __cnfn convert_char2_rtn(uint2); +char2 __ovld __cnfn convert_char2_sat_rtn(uint2); +char2 __ovld __cnfn convert_char2(uint2); +char2 __ovld __cnfn convert_char2_sat(uint2); +char2 __ovld __cnfn convert_char2_rte(long2); +char2 __ovld __cnfn convert_char2_sat_rte(long2); +char2 __ovld __cnfn convert_char2_rtz(long2); +char2 __ovld __cnfn convert_char2_sat_rtz(long2); +char2 __ovld __cnfn convert_char2_rtp(long2); +char2 __ovld __cnfn convert_char2_sat_rtp(long2); +char2 __ovld __cnfn convert_char2_rtn(long2); +char2 __ovld __cnfn convert_char2_sat_rtn(long2); +char2 __ovld __cnfn convert_char2(long2); +char2 __ovld __cnfn convert_char2_sat(long2); +char2 __ovld __cnfn convert_char2_rte(ulong2); +char2 __ovld __cnfn convert_char2_sat_rte(ulong2); +char2 __ovld __cnfn convert_char2_rtz(ulong2); +char2 __ovld __cnfn convert_char2_sat_rtz(ulong2); +char2 __ovld __cnfn convert_char2_rtp(ulong2); +char2 __ovld __cnfn convert_char2_sat_rtp(ulong2); +char2 __ovld __cnfn convert_char2_rtn(ulong2); +char2 __ovld __cnfn convert_char2_sat_rtn(ulong2); +char2 __ovld __cnfn convert_char2(ulong2); +char2 __ovld __cnfn convert_char2_sat(ulong2); +char2 __ovld __cnfn convert_char2_rte(float2); +char2 __ovld __cnfn convert_char2_sat_rte(float2); +char2 __ovld __cnfn convert_char2_rtz(float2); +char2 __ovld __cnfn convert_char2_sat_rtz(float2); +char2 __ovld __cnfn convert_char2_rtp(float2); +char2 __ovld __cnfn convert_char2_sat_rtp(float2); +char2 __ovld __cnfn convert_char2_rtn(float2); +char2 __ovld __cnfn convert_char2_sat_rtn(float2); +char2 __ovld __cnfn convert_char2(float2); +char2 __ovld __cnfn convert_char2_sat(float2); +uchar2 __ovld __cnfn convert_uchar2_rte(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2); +uchar2 __ovld __cnfn convert_uchar2_rtz(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2); +uchar2 __ovld __cnfn convert_uchar2_rtp(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2); +uchar2 __ovld __cnfn convert_uchar2_rtn(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2); +uchar2 __ovld __cnfn convert_uchar2(char2); +uchar2 __ovld __cnfn convert_uchar2_sat(char2); +uchar2 __ovld __cnfn convert_uchar2_rte(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2); +uchar2 __ovld __cnfn convert_uchar2(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rte(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2); +uchar2 __ovld __cnfn convert_uchar2_rtz(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2); +uchar2 __ovld __cnfn convert_uchar2_rtp(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2); +uchar2 __ovld __cnfn convert_uchar2_rtn(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2); +uchar2 __ovld __cnfn convert_uchar2(short2); +uchar2 __ovld __cnfn convert_uchar2_sat(short2); +uchar2 __ovld __cnfn convert_uchar2_rte(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2); +uchar2 __ovld __cnfn convert_uchar2(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rte(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2); +uchar2 __ovld __cnfn convert_uchar2_rtz(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2); +uchar2 __ovld __cnfn convert_uchar2_rtp(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2); +uchar2 __ovld __cnfn convert_uchar2_rtn(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2); +uchar2 __ovld __cnfn convert_uchar2(int2); +uchar2 __ovld __cnfn convert_uchar2_sat(int2); +uchar2 __ovld __cnfn convert_uchar2_rte(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2); +uchar2 __ovld __cnfn convert_uchar2_rtz(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2); +uchar2 __ovld __cnfn convert_uchar2_rtp(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2); +uchar2 __ovld __cnfn convert_uchar2_rtn(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2); +uchar2 __ovld __cnfn convert_uchar2(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat(uint2); +uchar2 __ovld __cnfn convert_uchar2_rte(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2); +uchar2 __ovld __cnfn convert_uchar2_rtz(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2); +uchar2 __ovld __cnfn convert_uchar2_rtp(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2); +uchar2 __ovld __cnfn convert_uchar2_rtn(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2); +uchar2 __ovld __cnfn convert_uchar2(long2); +uchar2 __ovld __cnfn convert_uchar2_sat(long2); +uchar2 __ovld __cnfn convert_uchar2_rte(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2); +uchar2 __ovld __cnfn convert_uchar2(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rte(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2); +uchar2 __ovld __cnfn convert_uchar2_rtz(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2); +uchar2 __ovld __cnfn convert_uchar2_rtp(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2); +uchar2 __ovld __cnfn convert_uchar2_rtn(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2); +uchar2 __ovld __cnfn convert_uchar2(float2); +uchar2 __ovld __cnfn convert_uchar2_sat(float2); +short2 __ovld __cnfn convert_short2_rte(char2); +short2 __ovld __cnfn convert_short2_sat_rte(char2); +short2 __ovld __cnfn convert_short2_rtz(char2); +short2 __ovld __cnfn convert_short2_sat_rtz(char2); +short2 __ovld __cnfn convert_short2_rtp(char2); +short2 __ovld __cnfn convert_short2_sat_rtp(char2); +short2 __ovld __cnfn convert_short2_rtn(char2); +short2 __ovld __cnfn convert_short2_sat_rtn(char2); +short2 __ovld __cnfn convert_short2(char2); +short2 __ovld __cnfn convert_short2_sat(char2); +short2 __ovld __cnfn convert_short2_rte(uchar2); +short2 __ovld __cnfn convert_short2_sat_rte(uchar2); +short2 __ovld __cnfn convert_short2_rtz(uchar2); +short2 __ovld __cnfn convert_short2_sat_rtz(uchar2); +short2 __ovld __cnfn convert_short2_rtp(uchar2); +short2 __ovld __cnfn convert_short2_sat_rtp(uchar2); +short2 __ovld __cnfn convert_short2_rtn(uchar2); +short2 __ovld __cnfn convert_short2_sat_rtn(uchar2); +short2 __ovld __cnfn convert_short2(uchar2); +short2 __ovld __cnfn convert_short2_sat(uchar2); +short2 __ovld __cnfn convert_short2_rte(short2); +short2 __ovld __cnfn convert_short2_sat_rte(short2); +short2 __ovld __cnfn convert_short2_rtz(short2); +short2 __ovld __cnfn convert_short2_sat_rtz(short2); +short2 __ovld __cnfn convert_short2_rtp(short2); +short2 __ovld __cnfn convert_short2_sat_rtp(short2); +short2 __ovld __cnfn convert_short2_rtn(short2); +short2 __ovld __cnfn convert_short2_sat_rtn(short2); +short2 __ovld __cnfn convert_short2(short2); +short2 __ovld __cnfn convert_short2_sat(short2); +short2 __ovld __cnfn convert_short2_rte(ushort2); +short2 __ovld __cnfn convert_short2_sat_rte(ushort2); +short2 __ovld __cnfn convert_short2_rtz(ushort2); +short2 __ovld __cnfn convert_short2_sat_rtz(ushort2); +short2 __ovld __cnfn convert_short2_rtp(ushort2); +short2 __ovld __cnfn convert_short2_sat_rtp(ushort2); +short2 __ovld __cnfn convert_short2_rtn(ushort2); +short2 __ovld __cnfn convert_short2_sat_rtn(ushort2); +short2 __ovld __cnfn convert_short2(ushort2); +short2 __ovld __cnfn convert_short2_sat(ushort2); +short2 __ovld __cnfn convert_short2_rte(int2); +short2 __ovld __cnfn convert_short2_sat_rte(int2); +short2 __ovld __cnfn convert_short2_rtz(int2); +short2 __ovld __cnfn convert_short2_sat_rtz(int2); +short2 __ovld __cnfn convert_short2_rtp(int2); +short2 __ovld __cnfn convert_short2_sat_rtp(int2); +short2 __ovld __cnfn convert_short2_rtn(int2); +short2 __ovld __cnfn convert_short2_sat_rtn(int2); +short2 __ovld __cnfn convert_short2(int2); +short2 __ovld __cnfn convert_short2_sat(int2); +short2 __ovld __cnfn convert_short2_rte(uint2); +short2 __ovld __cnfn convert_short2_sat_rte(uint2); +short2 __ovld __cnfn convert_short2_rtz(uint2); +short2 __ovld __cnfn convert_short2_sat_rtz(uint2); +short2 __ovld __cnfn convert_short2_rtp(uint2); +short2 __ovld __cnfn convert_short2_sat_rtp(uint2); +short2 __ovld __cnfn convert_short2_rtn(uint2); +short2 __ovld __cnfn convert_short2_sat_rtn(uint2); +short2 __ovld __cnfn convert_short2(uint2); +short2 __ovld __cnfn convert_short2_sat(uint2); +short2 __ovld __cnfn convert_short2_rte(long2); +short2 __ovld __cnfn convert_short2_sat_rte(long2); +short2 __ovld __cnfn convert_short2_rtz(long2); +short2 __ovld __cnfn convert_short2_sat_rtz(long2); +short2 __ovld __cnfn convert_short2_rtp(long2); +short2 __ovld __cnfn convert_short2_sat_rtp(long2); +short2 __ovld __cnfn convert_short2_rtn(long2); +short2 __ovld __cnfn convert_short2_sat_rtn(long2); +short2 __ovld __cnfn convert_short2(long2); +short2 __ovld __cnfn convert_short2_sat(long2); +short2 __ovld __cnfn convert_short2_rte(ulong2); +short2 __ovld __cnfn convert_short2_sat_rte(ulong2); +short2 __ovld __cnfn convert_short2_rtz(ulong2); +short2 __ovld __cnfn convert_short2_sat_rtz(ulong2); +short2 __ovld __cnfn convert_short2_rtp(ulong2); +short2 __ovld __cnfn convert_short2_sat_rtp(ulong2); +short2 __ovld __cnfn convert_short2_rtn(ulong2); +short2 __ovld __cnfn convert_short2_sat_rtn(ulong2); +short2 __ovld __cnfn convert_short2(ulong2); +short2 __ovld __cnfn convert_short2_sat(ulong2); +short2 __ovld __cnfn convert_short2_rte(float2); +short2 __ovld __cnfn convert_short2_sat_rte(float2); +short2 __ovld __cnfn convert_short2_rtz(float2); +short2 __ovld __cnfn convert_short2_sat_rtz(float2); +short2 __ovld __cnfn convert_short2_rtp(float2); +short2 __ovld __cnfn convert_short2_sat_rtp(float2); +short2 __ovld __cnfn convert_short2_rtn(float2); +short2 __ovld __cnfn convert_short2_sat_rtn(float2); +short2 __ovld __cnfn convert_short2(float2); +short2 __ovld __cnfn convert_short2_sat(float2); +ushort2 __ovld __cnfn convert_ushort2_rte(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2); +ushort2 __ovld __cnfn convert_ushort2_rtz(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2); +ushort2 __ovld __cnfn convert_ushort2_rtp(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2); +ushort2 __ovld __cnfn convert_ushort2_rtn(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2); +ushort2 __ovld __cnfn convert_ushort2(char2); +ushort2 __ovld __cnfn convert_ushort2_sat(char2); +ushort2 __ovld __cnfn convert_ushort2_rte(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2); +ushort2 __ovld __cnfn convert_ushort2(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rte(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2); +ushort2 __ovld __cnfn convert_ushort2_rtz(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2); +ushort2 __ovld __cnfn convert_ushort2_rtp(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2); +ushort2 __ovld __cnfn convert_ushort2_rtn(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2); +ushort2 __ovld __cnfn convert_ushort2(short2); +ushort2 __ovld __cnfn convert_ushort2_sat(short2); +ushort2 __ovld __cnfn convert_ushort2_rte(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2); +ushort2 __ovld __cnfn convert_ushort2(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rte(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2); +ushort2 __ovld __cnfn convert_ushort2_rtz(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2); +ushort2 __ovld __cnfn convert_ushort2_rtp(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2); +ushort2 __ovld __cnfn convert_ushort2_rtn(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2); +ushort2 __ovld __cnfn convert_ushort2(int2); +ushort2 __ovld __cnfn convert_ushort2_sat(int2); +ushort2 __ovld __cnfn convert_ushort2_rte(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2); +ushort2 __ovld __cnfn convert_ushort2_rtz(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2); +ushort2 __ovld __cnfn convert_ushort2_rtp(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2); +ushort2 __ovld __cnfn convert_ushort2_rtn(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2); +ushort2 __ovld __cnfn convert_ushort2(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat(uint2); +ushort2 __ovld __cnfn convert_ushort2_rte(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2); +ushort2 __ovld __cnfn convert_ushort2_rtz(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2); +ushort2 __ovld __cnfn convert_ushort2_rtp(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2); +ushort2 __ovld __cnfn convert_ushort2_rtn(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2); +ushort2 __ovld __cnfn convert_ushort2(long2); +ushort2 __ovld __cnfn convert_ushort2_sat(long2); +ushort2 __ovld __cnfn convert_ushort2_rte(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2); +ushort2 __ovld __cnfn convert_ushort2(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rte(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2); +ushort2 __ovld __cnfn convert_ushort2_rtz(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2); +ushort2 __ovld __cnfn convert_ushort2_rtp(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2); +ushort2 __ovld __cnfn convert_ushort2_rtn(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2); +ushort2 __ovld __cnfn convert_ushort2(float2); +ushort2 __ovld __cnfn convert_ushort2_sat(float2); +int2 __ovld __cnfn convert_int2_rte(char2); +int2 __ovld __cnfn convert_int2_sat_rte(char2); +int2 __ovld __cnfn convert_int2_rtz(char2); +int2 __ovld __cnfn convert_int2_sat_rtz(char2); +int2 __ovld __cnfn convert_int2_rtp(char2); +int2 __ovld __cnfn convert_int2_sat_rtp(char2); +int2 __ovld __cnfn convert_int2_rtn(char2); +int2 __ovld __cnfn convert_int2_sat_rtn(char2); +int2 __ovld __cnfn convert_int2(char2); +int2 __ovld __cnfn convert_int2_sat(char2); +int2 __ovld __cnfn convert_int2_rte(uchar2); +int2 __ovld __cnfn convert_int2_sat_rte(uchar2); +int2 __ovld __cnfn convert_int2_rtz(uchar2); +int2 __ovld __cnfn convert_int2_sat_rtz(uchar2); +int2 __ovld __cnfn convert_int2_rtp(uchar2); +int2 __ovld __cnfn convert_int2_sat_rtp(uchar2); +int2 __ovld __cnfn convert_int2_rtn(uchar2); +int2 __ovld __cnfn convert_int2_sat_rtn(uchar2); +int2 __ovld __cnfn convert_int2(uchar2); +int2 __ovld __cnfn convert_int2_sat(uchar2); +int2 __ovld __cnfn convert_int2_rte(short2); +int2 __ovld __cnfn convert_int2_sat_rte(short2); +int2 __ovld __cnfn convert_int2_rtz(short2); +int2 __ovld __cnfn convert_int2_sat_rtz(short2); +int2 __ovld __cnfn convert_int2_rtp(short2); +int2 __ovld __cnfn convert_int2_sat_rtp(short2); +int2 __ovld __cnfn convert_int2_rtn(short2); +int2 __ovld __cnfn convert_int2_sat_rtn(short2); +int2 __ovld __cnfn convert_int2(short2); +int2 __ovld __cnfn convert_int2_sat(short2); +int2 __ovld __cnfn convert_int2_rte(ushort2); +int2 __ovld __cnfn convert_int2_sat_rte(ushort2); +int2 __ovld __cnfn convert_int2_rtz(ushort2); +int2 __ovld __cnfn convert_int2_sat_rtz(ushort2); +int2 __ovld __cnfn convert_int2_rtp(ushort2); +int2 __ovld __cnfn convert_int2_sat_rtp(ushort2); +int2 __ovld __cnfn convert_int2_rtn(ushort2); +int2 __ovld __cnfn convert_int2_sat_rtn(ushort2); +int2 __ovld __cnfn convert_int2(ushort2); +int2 __ovld __cnfn convert_int2_sat(ushort2); +int2 __ovld __cnfn convert_int2_rte(int2); +int2 __ovld __cnfn convert_int2_sat_rte(int2); +int2 __ovld __cnfn convert_int2_rtz(int2); +int2 __ovld __cnfn convert_int2_sat_rtz(int2); +int2 __ovld __cnfn convert_int2_rtp(int2); +int2 __ovld __cnfn convert_int2_sat_rtp(int2); +int2 __ovld __cnfn convert_int2_rtn(int2); +int2 __ovld __cnfn convert_int2_sat_rtn(int2); +int2 __ovld __cnfn convert_int2(int2); +int2 __ovld __cnfn convert_int2_sat(int2); +int2 __ovld __cnfn convert_int2_rte(uint2); +int2 __ovld __cnfn convert_int2_sat_rte(uint2); +int2 __ovld __cnfn convert_int2_rtz(uint2); +int2 __ovld __cnfn convert_int2_sat_rtz(uint2); +int2 __ovld __cnfn convert_int2_rtp(uint2); +int2 __ovld __cnfn convert_int2_sat_rtp(uint2); +int2 __ovld __cnfn convert_int2_rtn(uint2); +int2 __ovld __cnfn convert_int2_sat_rtn(uint2); +int2 __ovld __cnfn convert_int2(uint2); +int2 __ovld __cnfn convert_int2_sat(uint2); +int2 __ovld __cnfn convert_int2_rte(long2); +int2 __ovld __cnfn convert_int2_sat_rte(long2); +int2 __ovld __cnfn convert_int2_rtz(long2); +int2 __ovld __cnfn convert_int2_sat_rtz(long2); +int2 __ovld __cnfn convert_int2_rtp(long2); +int2 __ovld __cnfn convert_int2_sat_rtp(long2); +int2 __ovld __cnfn convert_int2_rtn(long2); +int2 __ovld __cnfn convert_int2_sat_rtn(long2); +int2 __ovld __cnfn convert_int2(long2); +int2 __ovld __cnfn convert_int2_sat(long2); +int2 __ovld __cnfn convert_int2_rte(ulong2); +int2 __ovld __cnfn convert_int2_sat_rte(ulong2); +int2 __ovld __cnfn convert_int2_rtz(ulong2); +int2 __ovld __cnfn convert_int2_sat_rtz(ulong2); +int2 __ovld __cnfn convert_int2_rtp(ulong2); +int2 __ovld __cnfn convert_int2_sat_rtp(ulong2); +int2 __ovld __cnfn convert_int2_rtn(ulong2); +int2 __ovld __cnfn convert_int2_sat_rtn(ulong2); +int2 __ovld __cnfn convert_int2(ulong2); +int2 __ovld __cnfn convert_int2_sat(ulong2); +int2 __ovld __cnfn convert_int2_rte(float2); +int2 __ovld __cnfn convert_int2_sat_rte(float2); +int2 __ovld __cnfn convert_int2_rtz(float2); +int2 __ovld __cnfn convert_int2_sat_rtz(float2); +int2 __ovld __cnfn convert_int2_rtp(float2); +int2 __ovld __cnfn convert_int2_sat_rtp(float2); +int2 __ovld __cnfn convert_int2_rtn(float2); +int2 __ovld __cnfn convert_int2_sat_rtn(float2); +int2 __ovld __cnfn convert_int2(float2); +int2 __ovld __cnfn convert_int2_sat(float2); +uint2 __ovld __cnfn convert_uint2_rte(char2); +uint2 __ovld __cnfn convert_uint2_sat_rte(char2); +uint2 __ovld __cnfn convert_uint2_rtz(char2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(char2); +uint2 __ovld __cnfn convert_uint2_rtp(char2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(char2); +uint2 __ovld __cnfn convert_uint2_rtn(char2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(char2); +uint2 __ovld __cnfn convert_uint2(char2); +uint2 __ovld __cnfn convert_uint2_sat(char2); +uint2 __ovld __cnfn convert_uint2_rte(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2); +uint2 __ovld __cnfn convert_uint2_rtz(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2); +uint2 __ovld __cnfn convert_uint2_rtp(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2); +uint2 __ovld __cnfn convert_uint2_rtn(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2); +uint2 __ovld __cnfn convert_uint2(uchar2); +uint2 __ovld __cnfn convert_uint2_sat(uchar2); +uint2 __ovld __cnfn convert_uint2_rte(short2); +uint2 __ovld __cnfn convert_uint2_sat_rte(short2); +uint2 __ovld __cnfn convert_uint2_rtz(short2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(short2); +uint2 __ovld __cnfn convert_uint2_rtp(short2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(short2); +uint2 __ovld __cnfn convert_uint2_rtn(short2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(short2); +uint2 __ovld __cnfn convert_uint2(short2); +uint2 __ovld __cnfn convert_uint2_sat(short2); +uint2 __ovld __cnfn convert_uint2_rte(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2); +uint2 __ovld __cnfn convert_uint2_rtz(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2); +uint2 __ovld __cnfn convert_uint2_rtp(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2); +uint2 __ovld __cnfn convert_uint2_rtn(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2); +uint2 __ovld __cnfn convert_uint2(ushort2); +uint2 __ovld __cnfn convert_uint2_sat(ushort2); +uint2 __ovld __cnfn convert_uint2_rte(int2); +uint2 __ovld __cnfn convert_uint2_sat_rte(int2); +uint2 __ovld __cnfn convert_uint2_rtz(int2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(int2); +uint2 __ovld __cnfn convert_uint2_rtp(int2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(int2); +uint2 __ovld __cnfn convert_uint2_rtn(int2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(int2); +uint2 __ovld __cnfn convert_uint2(int2); +uint2 __ovld __cnfn convert_uint2_sat(int2); +uint2 __ovld __cnfn convert_uint2_rte(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rte(uint2); +uint2 __ovld __cnfn convert_uint2_rtz(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2); +uint2 __ovld __cnfn convert_uint2_rtp(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2); +uint2 __ovld __cnfn convert_uint2_rtn(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2); +uint2 __ovld __cnfn convert_uint2(uint2); +uint2 __ovld __cnfn convert_uint2_sat(uint2); +uint2 __ovld __cnfn convert_uint2_rte(long2); +uint2 __ovld __cnfn convert_uint2_sat_rte(long2); +uint2 __ovld __cnfn convert_uint2_rtz(long2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(long2); +uint2 __ovld __cnfn convert_uint2_rtp(long2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(long2); +uint2 __ovld __cnfn convert_uint2_rtn(long2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(long2); +uint2 __ovld __cnfn convert_uint2(long2); +uint2 __ovld __cnfn convert_uint2_sat(long2); +uint2 __ovld __cnfn convert_uint2_rte(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2); +uint2 __ovld __cnfn convert_uint2_rtz(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2); +uint2 __ovld __cnfn convert_uint2_rtp(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2); +uint2 __ovld __cnfn convert_uint2_rtn(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2); +uint2 __ovld __cnfn convert_uint2(ulong2); +uint2 __ovld __cnfn convert_uint2_sat(ulong2); +uint2 __ovld __cnfn convert_uint2_rte(float2); +uint2 __ovld __cnfn convert_uint2_sat_rte(float2); +uint2 __ovld __cnfn convert_uint2_rtz(float2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(float2); +uint2 __ovld __cnfn convert_uint2_rtp(float2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(float2); +uint2 __ovld __cnfn convert_uint2_rtn(float2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(float2); +uint2 __ovld __cnfn convert_uint2(float2); +uint2 __ovld __cnfn convert_uint2_sat(float2); +long2 __ovld __cnfn convert_long2_rte(char2); +long2 __ovld __cnfn convert_long2_sat_rte(char2); +long2 __ovld __cnfn convert_long2_rtz(char2); +long2 __ovld __cnfn convert_long2_sat_rtz(char2); +long2 __ovld __cnfn convert_long2_rtp(char2); +long2 __ovld __cnfn convert_long2_sat_rtp(char2); +long2 __ovld __cnfn convert_long2_rtn(char2); +long2 __ovld __cnfn convert_long2_sat_rtn(char2); +long2 __ovld __cnfn convert_long2(char2); +long2 __ovld __cnfn convert_long2_sat(char2); +long2 __ovld __cnfn convert_long2_rte(uchar2); +long2 __ovld __cnfn convert_long2_sat_rte(uchar2); +long2 __ovld __cnfn convert_long2_rtz(uchar2); +long2 __ovld __cnfn convert_long2_sat_rtz(uchar2); +long2 __ovld __cnfn convert_long2_rtp(uchar2); +long2 __ovld __cnfn convert_long2_sat_rtp(uchar2); +long2 __ovld __cnfn convert_long2_rtn(uchar2); +long2 __ovld __cnfn convert_long2_sat_rtn(uchar2); +long2 __ovld __cnfn convert_long2(uchar2); +long2 __ovld __cnfn convert_long2_sat(uchar2); +long2 __ovld __cnfn convert_long2_rte(short2); +long2 __ovld __cnfn convert_long2_sat_rte(short2); +long2 __ovld __cnfn convert_long2_rtz(short2); +long2 __ovld __cnfn convert_long2_sat_rtz(short2); +long2 __ovld __cnfn convert_long2_rtp(short2); +long2 __ovld __cnfn convert_long2_sat_rtp(short2); +long2 __ovld __cnfn convert_long2_rtn(short2); +long2 __ovld __cnfn convert_long2_sat_rtn(short2); +long2 __ovld __cnfn convert_long2(short2); +long2 __ovld __cnfn convert_long2_sat(short2); +long2 __ovld __cnfn convert_long2_rte(ushort2); +long2 __ovld __cnfn convert_long2_sat_rte(ushort2); +long2 __ovld __cnfn convert_long2_rtz(ushort2); +long2 __ovld __cnfn convert_long2_sat_rtz(ushort2); +long2 __ovld __cnfn convert_long2_rtp(ushort2); +long2 __ovld __cnfn convert_long2_sat_rtp(ushort2); +long2 __ovld __cnfn convert_long2_rtn(ushort2); +long2 __ovld __cnfn convert_long2_sat_rtn(ushort2); +long2 __ovld __cnfn convert_long2(ushort2); +long2 __ovld __cnfn convert_long2_sat(ushort2); +long2 __ovld __cnfn convert_long2_rte(int2); +long2 __ovld __cnfn convert_long2_sat_rte(int2); +long2 __ovld __cnfn convert_long2_rtz(int2); +long2 __ovld __cnfn convert_long2_sat_rtz(int2); +long2 __ovld __cnfn convert_long2_rtp(int2); +long2 __ovld __cnfn convert_long2_sat_rtp(int2); +long2 __ovld __cnfn convert_long2_rtn(int2); +long2 __ovld __cnfn convert_long2_sat_rtn(int2); +long2 __ovld __cnfn convert_long2(int2); +long2 __ovld __cnfn convert_long2_sat(int2); +long2 __ovld __cnfn convert_long2_rte(uint2); +long2 __ovld __cnfn convert_long2_sat_rte(uint2); +long2 __ovld __cnfn convert_long2_rtz(uint2); +long2 __ovld __cnfn convert_long2_sat_rtz(uint2); +long2 __ovld __cnfn convert_long2_rtp(uint2); +long2 __ovld __cnfn convert_long2_sat_rtp(uint2); +long2 __ovld __cnfn convert_long2_rtn(uint2); +long2 __ovld __cnfn convert_long2_sat_rtn(uint2); +long2 __ovld __cnfn convert_long2(uint2); +long2 __ovld __cnfn convert_long2_sat(uint2); +long2 __ovld __cnfn convert_long2_rte(long2); +long2 __ovld __cnfn convert_long2_sat_rte(long2); +long2 __ovld __cnfn convert_long2_rtz(long2); +long2 __ovld __cnfn convert_long2_sat_rtz(long2); +long2 __ovld __cnfn convert_long2_rtp(long2); +long2 __ovld __cnfn convert_long2_sat_rtp(long2); +long2 __ovld __cnfn convert_long2_rtn(long2); +long2 __ovld __cnfn convert_long2_sat_rtn(long2); +long2 __ovld __cnfn convert_long2(long2); +long2 __ovld __cnfn convert_long2_sat(long2); +long2 __ovld __cnfn convert_long2_rte(ulong2); +long2 __ovld __cnfn convert_long2_sat_rte(ulong2); +long2 __ovld __cnfn convert_long2_rtz(ulong2); +long2 __ovld __cnfn convert_long2_sat_rtz(ulong2); +long2 __ovld __cnfn convert_long2_rtp(ulong2); +long2 __ovld __cnfn convert_long2_sat_rtp(ulong2); +long2 __ovld __cnfn convert_long2_rtn(ulong2); +long2 __ovld __cnfn convert_long2_sat_rtn(ulong2); +long2 __ovld __cnfn convert_long2(ulong2); +long2 __ovld __cnfn convert_long2_sat(ulong2); +long2 __ovld __cnfn convert_long2_rte(float2); +long2 __ovld __cnfn convert_long2_sat_rte(float2); +long2 __ovld __cnfn convert_long2_rtz(float2); +long2 __ovld __cnfn convert_long2_sat_rtz(float2); +long2 __ovld __cnfn convert_long2_rtp(float2); +long2 __ovld __cnfn convert_long2_sat_rtp(float2); +long2 __ovld __cnfn convert_long2_rtn(float2); +long2 __ovld __cnfn convert_long2_sat_rtn(float2); +long2 __ovld __cnfn convert_long2(float2); +long2 __ovld __cnfn convert_long2_sat(float2); +ulong2 __ovld __cnfn convert_ulong2_rte(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2); +ulong2 __ovld __cnfn convert_ulong2_rtz(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2); +ulong2 __ovld __cnfn convert_ulong2_rtp(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2); +ulong2 __ovld __cnfn convert_ulong2_rtn(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2); +ulong2 __ovld __cnfn convert_ulong2(char2); +ulong2 __ovld __cnfn convert_ulong2_sat(char2); +ulong2 __ovld __cnfn convert_ulong2_rte(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2); +ulong2 __ovld __cnfn convert_ulong2(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rte(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2); +ulong2 __ovld __cnfn convert_ulong2_rtz(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2); +ulong2 __ovld __cnfn convert_ulong2_rtp(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2); +ulong2 __ovld __cnfn convert_ulong2_rtn(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2); +ulong2 __ovld __cnfn convert_ulong2(short2); +ulong2 __ovld __cnfn convert_ulong2_sat(short2); +ulong2 __ovld __cnfn convert_ulong2_rte(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2); +ulong2 __ovld __cnfn convert_ulong2(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rte(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2); +ulong2 __ovld __cnfn convert_ulong2_rtz(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2); +ulong2 __ovld __cnfn convert_ulong2_rtp(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2); +ulong2 __ovld __cnfn convert_ulong2_rtn(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2); +ulong2 __ovld __cnfn convert_ulong2(int2); +ulong2 __ovld __cnfn convert_ulong2_sat(int2); +ulong2 __ovld __cnfn convert_ulong2_rte(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2); +ulong2 __ovld __cnfn convert_ulong2_rtz(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2); +ulong2 __ovld __cnfn convert_ulong2_rtp(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2); +ulong2 __ovld __cnfn convert_ulong2_rtn(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2); +ulong2 __ovld __cnfn convert_ulong2(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat(uint2); +ulong2 __ovld __cnfn convert_ulong2_rte(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2); +ulong2 __ovld __cnfn convert_ulong2_rtz(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2); +ulong2 __ovld __cnfn convert_ulong2_rtp(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2); +ulong2 __ovld __cnfn convert_ulong2_rtn(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2); +ulong2 __ovld __cnfn convert_ulong2(long2); +ulong2 __ovld __cnfn convert_ulong2_sat(long2); +ulong2 __ovld __cnfn convert_ulong2_rte(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2); +ulong2 __ovld __cnfn convert_ulong2(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rte(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2); +ulong2 __ovld __cnfn convert_ulong2_rtz(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2); +ulong2 __ovld __cnfn convert_ulong2_rtp(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2); +ulong2 __ovld __cnfn convert_ulong2_rtn(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2); +ulong2 __ovld __cnfn convert_ulong2(float2); +ulong2 __ovld __cnfn convert_ulong2_sat(float2); +float2 __ovld __cnfn convert_float2_rte(char2); +float2 __ovld __cnfn convert_float2_rtz(char2); +float2 __ovld __cnfn convert_float2_rtp(char2); +float2 __ovld __cnfn convert_float2_rtn(char2); +float2 __ovld __cnfn convert_float2(char2); +float2 __ovld __cnfn convert_float2_rte(uchar2); +float2 __ovld __cnfn convert_float2_rtz(uchar2); +float2 __ovld __cnfn convert_float2_rtp(uchar2); +float2 __ovld __cnfn convert_float2_rtn(uchar2); +float2 __ovld __cnfn convert_float2(uchar2); +float2 __ovld __cnfn convert_float2_rte(short2); +float2 __ovld __cnfn convert_float2_rtz(short2); +float2 __ovld __cnfn convert_float2_rtp(short2); +float2 __ovld __cnfn convert_float2_rtn(short2); +float2 __ovld __cnfn convert_float2(short2); +float2 __ovld __cnfn convert_float2_rte(ushort2); +float2 __ovld __cnfn convert_float2_rtz(ushort2); +float2 __ovld __cnfn convert_float2_rtp(ushort2); +float2 __ovld __cnfn convert_float2_rtn(ushort2); +float2 __ovld __cnfn convert_float2(ushort2); +float2 __ovld __cnfn convert_float2_rte(int2); +float2 __ovld __cnfn convert_float2_rtz(int2); +float2 __ovld __cnfn convert_float2_rtp(int2); +float2 __ovld __cnfn convert_float2_rtn(int2); +float2 __ovld __cnfn convert_float2(int2); +float2 __ovld __cnfn convert_float2_rte(uint2); +float2 __ovld __cnfn convert_float2_rtz(uint2); +float2 __ovld __cnfn convert_float2_rtp(uint2); +float2 __ovld __cnfn convert_float2_rtn(uint2); +float2 __ovld __cnfn convert_float2(uint2); +float2 __ovld __cnfn convert_float2_rte(long2); +float2 __ovld __cnfn convert_float2_rtz(long2); +float2 __ovld __cnfn convert_float2_rtp(long2); +float2 __ovld __cnfn convert_float2_rtn(long2); +float2 __ovld __cnfn convert_float2(long2); +float2 __ovld __cnfn convert_float2_rte(ulong2); +float2 __ovld __cnfn convert_float2_rtz(ulong2); +float2 __ovld __cnfn convert_float2_rtp(ulong2); +float2 __ovld __cnfn convert_float2_rtn(ulong2); +float2 __ovld __cnfn convert_float2(ulong2); +float2 __ovld __cnfn convert_float2_rte(float2); +float2 __ovld __cnfn convert_float2_rtz(float2); +float2 __ovld __cnfn convert_float2_rtp(float2); +float2 __ovld __cnfn convert_float2_rtn(float2); +float2 __ovld __cnfn convert_float2(float2); +char3 __ovld __cnfn convert_char3_rte(char3); +char3 __ovld __cnfn convert_char3_sat_rte(char3); +char3 __ovld __cnfn convert_char3_rtz(char3); +char3 __ovld __cnfn convert_char3_sat_rtz(char3); +char3 __ovld __cnfn convert_char3_rtp(char3); +char3 __ovld __cnfn convert_char3_sat_rtp(char3); +char3 __ovld __cnfn convert_char3_rtn(char3); +char3 __ovld __cnfn convert_char3_sat_rtn(char3); +char3 __ovld __cnfn convert_char3(char3); +char3 __ovld __cnfn convert_char3_sat(char3); +char3 __ovld __cnfn convert_char3_rte(uchar3); +char3 __ovld __cnfn convert_char3_sat_rte(uchar3); +char3 __ovld __cnfn convert_char3_rtz(uchar3); +char3 __ovld __cnfn convert_char3_sat_rtz(uchar3); +char3 __ovld __cnfn convert_char3_rtp(uchar3); +char3 __ovld __cnfn convert_char3_sat_rtp(uchar3); +char3 __ovld __cnfn convert_char3_rtn(uchar3); +char3 __ovld __cnfn convert_char3_sat_rtn(uchar3); +char3 __ovld __cnfn convert_char3(uchar3); +char3 __ovld __cnfn convert_char3_sat(uchar3); +char3 __ovld __cnfn convert_char3_rte(short3); +char3 __ovld __cnfn convert_char3_sat_rte(short3); +char3 __ovld __cnfn convert_char3_rtz(short3); +char3 __ovld __cnfn convert_char3_sat_rtz(short3); +char3 __ovld __cnfn convert_char3_rtp(short3); +char3 __ovld __cnfn convert_char3_sat_rtp(short3); +char3 __ovld __cnfn convert_char3_rtn(short3); +char3 __ovld __cnfn convert_char3_sat_rtn(short3); +char3 __ovld __cnfn convert_char3(short3); +char3 __ovld __cnfn convert_char3_sat(short3); +char3 __ovld __cnfn convert_char3_rte(ushort3); +char3 __ovld __cnfn convert_char3_sat_rte(ushort3); +char3 __ovld __cnfn convert_char3_rtz(ushort3); +char3 __ovld __cnfn convert_char3_sat_rtz(ushort3); +char3 __ovld __cnfn convert_char3_rtp(ushort3); +char3 __ovld __cnfn convert_char3_sat_rtp(ushort3); +char3 __ovld __cnfn convert_char3_rtn(ushort3); +char3 __ovld __cnfn convert_char3_sat_rtn(ushort3); +char3 __ovld __cnfn convert_char3(ushort3); +char3 __ovld __cnfn convert_char3_sat(ushort3); +char3 __ovld __cnfn convert_char3_rte(int3); +char3 __ovld __cnfn convert_char3_sat_rte(int3); +char3 __ovld __cnfn convert_char3_rtz(int3); +char3 __ovld __cnfn convert_char3_sat_rtz(int3); +char3 __ovld __cnfn convert_char3_rtp(int3); +char3 __ovld __cnfn convert_char3_sat_rtp(int3); +char3 __ovld __cnfn convert_char3_rtn(int3); +char3 __ovld __cnfn convert_char3_sat_rtn(int3); +char3 __ovld __cnfn convert_char3(int3); +char3 __ovld __cnfn convert_char3_sat(int3); +char3 __ovld __cnfn convert_char3_rte(uint3); +char3 __ovld __cnfn convert_char3_sat_rte(uint3); +char3 __ovld __cnfn convert_char3_rtz(uint3); +char3 __ovld __cnfn convert_char3_sat_rtz(uint3); +char3 __ovld __cnfn convert_char3_rtp(uint3); +char3 __ovld __cnfn convert_char3_sat_rtp(uint3); +char3 __ovld __cnfn convert_char3_rtn(uint3); +char3 __ovld __cnfn convert_char3_sat_rtn(uint3); +char3 __ovld __cnfn convert_char3(uint3); +char3 __ovld __cnfn convert_char3_sat(uint3); +char3 __ovld __cnfn convert_char3_rte(long3); +char3 __ovld __cnfn convert_char3_sat_rte(long3); +char3 __ovld __cnfn convert_char3_rtz(long3); +char3 __ovld __cnfn convert_char3_sat_rtz(long3); +char3 __ovld __cnfn convert_char3_rtp(long3); +char3 __ovld __cnfn convert_char3_sat_rtp(long3); +char3 __ovld __cnfn convert_char3_rtn(long3); +char3 __ovld __cnfn convert_char3_sat_rtn(long3); +char3 __ovld __cnfn convert_char3(long3); +char3 __ovld __cnfn convert_char3_sat(long3); +char3 __ovld __cnfn convert_char3_rte(ulong3); +char3 __ovld __cnfn convert_char3_sat_rte(ulong3); +char3 __ovld __cnfn convert_char3_rtz(ulong3); +char3 __ovld __cnfn convert_char3_sat_rtz(ulong3); +char3 __ovld __cnfn convert_char3_rtp(ulong3); +char3 __ovld __cnfn convert_char3_sat_rtp(ulong3); +char3 __ovld __cnfn convert_char3_rtn(ulong3); +char3 __ovld __cnfn convert_char3_sat_rtn(ulong3); +char3 __ovld __cnfn convert_char3(ulong3); +char3 __ovld __cnfn convert_char3_sat(ulong3); +char3 __ovld __cnfn convert_char3_rte(float3); +char3 __ovld __cnfn convert_char3_sat_rte(float3); +char3 __ovld __cnfn convert_char3_rtz(float3); +char3 __ovld __cnfn convert_char3_sat_rtz(float3); +char3 __ovld __cnfn convert_char3_rtp(float3); +char3 __ovld __cnfn convert_char3_sat_rtp(float3); +char3 __ovld __cnfn convert_char3_rtn(float3); +char3 __ovld __cnfn convert_char3_sat_rtn(float3); +char3 __ovld __cnfn convert_char3(float3); +char3 __ovld __cnfn convert_char3_sat(float3); +uchar3 __ovld __cnfn convert_uchar3_rte(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3); +uchar3 __ovld __cnfn convert_uchar3_rtz(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3); +uchar3 __ovld __cnfn convert_uchar3_rtp(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3); +uchar3 __ovld __cnfn convert_uchar3_rtn(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3); +uchar3 __ovld __cnfn convert_uchar3(char3); +uchar3 __ovld __cnfn convert_uchar3_sat(char3); +uchar3 __ovld __cnfn convert_uchar3_rte(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3); +uchar3 __ovld __cnfn convert_uchar3(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rte(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3); +uchar3 __ovld __cnfn convert_uchar3_rtz(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3); +uchar3 __ovld __cnfn convert_uchar3_rtp(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3); +uchar3 __ovld __cnfn convert_uchar3_rtn(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3); +uchar3 __ovld __cnfn convert_uchar3(short3); +uchar3 __ovld __cnfn convert_uchar3_sat(short3); +uchar3 __ovld __cnfn convert_uchar3_rte(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3); +uchar3 __ovld __cnfn convert_uchar3(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rte(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3); +uchar3 __ovld __cnfn convert_uchar3_rtz(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3); +uchar3 __ovld __cnfn convert_uchar3_rtp(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3); +uchar3 __ovld __cnfn convert_uchar3_rtn(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3); +uchar3 __ovld __cnfn convert_uchar3(int3); +uchar3 __ovld __cnfn convert_uchar3_sat(int3); +uchar3 __ovld __cnfn convert_uchar3_rte(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3); +uchar3 __ovld __cnfn convert_uchar3_rtz(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3); +uchar3 __ovld __cnfn convert_uchar3_rtp(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3); +uchar3 __ovld __cnfn convert_uchar3_rtn(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3); +uchar3 __ovld __cnfn convert_uchar3(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat(uint3); +uchar3 __ovld __cnfn convert_uchar3_rte(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3); +uchar3 __ovld __cnfn convert_uchar3_rtz(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3); +uchar3 __ovld __cnfn convert_uchar3_rtp(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3); +uchar3 __ovld __cnfn convert_uchar3_rtn(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3); +uchar3 __ovld __cnfn convert_uchar3(long3); +uchar3 __ovld __cnfn convert_uchar3_sat(long3); +uchar3 __ovld __cnfn convert_uchar3_rte(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3); +uchar3 __ovld __cnfn convert_uchar3(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rte(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3); +uchar3 __ovld __cnfn convert_uchar3_rtz(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3); +uchar3 __ovld __cnfn convert_uchar3_rtp(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3); +uchar3 __ovld __cnfn convert_uchar3_rtn(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3); +uchar3 __ovld __cnfn convert_uchar3(float3); +uchar3 __ovld __cnfn convert_uchar3_sat(float3); +short3 __ovld __cnfn convert_short3_rte(char3); +short3 __ovld __cnfn convert_short3_sat_rte(char3); +short3 __ovld __cnfn convert_short3_rtz(char3); +short3 __ovld __cnfn convert_short3_sat_rtz(char3); +short3 __ovld __cnfn convert_short3_rtp(char3); +short3 __ovld __cnfn convert_short3_sat_rtp(char3); +short3 __ovld __cnfn convert_short3_rtn(char3); +short3 __ovld __cnfn convert_short3_sat_rtn(char3); +short3 __ovld __cnfn convert_short3(char3); +short3 __ovld __cnfn convert_short3_sat(char3); +short3 __ovld __cnfn convert_short3_rte(uchar3); +short3 __ovld __cnfn convert_short3_sat_rte(uchar3); +short3 __ovld __cnfn convert_short3_rtz(uchar3); +short3 __ovld __cnfn convert_short3_sat_rtz(uchar3); +short3 __ovld __cnfn convert_short3_rtp(uchar3); +short3 __ovld __cnfn convert_short3_sat_rtp(uchar3); +short3 __ovld __cnfn convert_short3_rtn(uchar3); +short3 __ovld __cnfn convert_short3_sat_rtn(uchar3); +short3 __ovld __cnfn convert_short3(uchar3); +short3 __ovld __cnfn convert_short3_sat(uchar3); +short3 __ovld __cnfn convert_short3_rte(short3); +short3 __ovld __cnfn convert_short3_sat_rte(short3); +short3 __ovld __cnfn convert_short3_rtz(short3); +short3 __ovld __cnfn convert_short3_sat_rtz(short3); +short3 __ovld __cnfn convert_short3_rtp(short3); +short3 __ovld __cnfn convert_short3_sat_rtp(short3); +short3 __ovld __cnfn convert_short3_rtn(short3); +short3 __ovld __cnfn convert_short3_sat_rtn(short3); +short3 __ovld __cnfn convert_short3(short3); +short3 __ovld __cnfn convert_short3_sat(short3); +short3 __ovld __cnfn convert_short3_rte(ushort3); +short3 __ovld __cnfn convert_short3_sat_rte(ushort3); +short3 __ovld __cnfn convert_short3_rtz(ushort3); +short3 __ovld __cnfn convert_short3_sat_rtz(ushort3); +short3 __ovld __cnfn convert_short3_rtp(ushort3); +short3 __ovld __cnfn convert_short3_sat_rtp(ushort3); +short3 __ovld __cnfn convert_short3_rtn(ushort3); +short3 __ovld __cnfn convert_short3_sat_rtn(ushort3); +short3 __ovld __cnfn convert_short3(ushort3); +short3 __ovld __cnfn convert_short3_sat(ushort3); +short3 __ovld __cnfn convert_short3_rte(int3); +short3 __ovld __cnfn convert_short3_sat_rte(int3); +short3 __ovld __cnfn convert_short3_rtz(int3); +short3 __ovld __cnfn convert_short3_sat_rtz(int3); +short3 __ovld __cnfn convert_short3_rtp(int3); +short3 __ovld __cnfn convert_short3_sat_rtp(int3); +short3 __ovld __cnfn convert_short3_rtn(int3); +short3 __ovld __cnfn convert_short3_sat_rtn(int3); +short3 __ovld __cnfn convert_short3(int3); +short3 __ovld __cnfn convert_short3_sat(int3); +short3 __ovld __cnfn convert_short3_rte(uint3); +short3 __ovld __cnfn convert_short3_sat_rte(uint3); +short3 __ovld __cnfn convert_short3_rtz(uint3); +short3 __ovld __cnfn convert_short3_sat_rtz(uint3); +short3 __ovld __cnfn convert_short3_rtp(uint3); +short3 __ovld __cnfn convert_short3_sat_rtp(uint3); +short3 __ovld __cnfn convert_short3_rtn(uint3); +short3 __ovld __cnfn convert_short3_sat_rtn(uint3); +short3 __ovld __cnfn convert_short3(uint3); +short3 __ovld __cnfn convert_short3_sat(uint3); +short3 __ovld __cnfn convert_short3_rte(long3); +short3 __ovld __cnfn convert_short3_sat_rte(long3); +short3 __ovld __cnfn convert_short3_rtz(long3); +short3 __ovld __cnfn convert_short3_sat_rtz(long3); +short3 __ovld __cnfn convert_short3_rtp(long3); +short3 __ovld __cnfn convert_short3_sat_rtp(long3); +short3 __ovld __cnfn convert_short3_rtn(long3); +short3 __ovld __cnfn convert_short3_sat_rtn(long3); +short3 __ovld __cnfn convert_short3(long3); +short3 __ovld __cnfn convert_short3_sat(long3); +short3 __ovld __cnfn convert_short3_rte(ulong3); +short3 __ovld __cnfn convert_short3_sat_rte(ulong3); +short3 __ovld __cnfn convert_short3_rtz(ulong3); +short3 __ovld __cnfn convert_short3_sat_rtz(ulong3); +short3 __ovld __cnfn convert_short3_rtp(ulong3); +short3 __ovld __cnfn convert_short3_sat_rtp(ulong3); +short3 __ovld __cnfn convert_short3_rtn(ulong3); +short3 __ovld __cnfn convert_short3_sat_rtn(ulong3); +short3 __ovld __cnfn convert_short3(ulong3); +short3 __ovld __cnfn convert_short3_sat(ulong3); +short3 __ovld __cnfn convert_short3_rte(float3); +short3 __ovld __cnfn convert_short3_sat_rte(float3); +short3 __ovld __cnfn convert_short3_rtz(float3); +short3 __ovld __cnfn convert_short3_sat_rtz(float3); +short3 __ovld __cnfn convert_short3_rtp(float3); +short3 __ovld __cnfn convert_short3_sat_rtp(float3); +short3 __ovld __cnfn convert_short3_rtn(float3); +short3 __ovld __cnfn convert_short3_sat_rtn(float3); +short3 __ovld __cnfn convert_short3(float3); +short3 __ovld __cnfn convert_short3_sat(float3); +ushort3 __ovld __cnfn convert_ushort3_rte(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3); +ushort3 __ovld __cnfn convert_ushort3_rtz(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3); +ushort3 __ovld __cnfn convert_ushort3_rtp(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3); +ushort3 __ovld __cnfn convert_ushort3_rtn(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3); +ushort3 __ovld __cnfn convert_ushort3(char3); +ushort3 __ovld __cnfn convert_ushort3_sat(char3); +ushort3 __ovld __cnfn convert_ushort3_rte(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3); +ushort3 __ovld __cnfn convert_ushort3(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rte(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3); +ushort3 __ovld __cnfn convert_ushort3_rtz(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3); +ushort3 __ovld __cnfn convert_ushort3_rtp(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3); +ushort3 __ovld __cnfn convert_ushort3_rtn(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3); +ushort3 __ovld __cnfn convert_ushort3(short3); +ushort3 __ovld __cnfn convert_ushort3_sat(short3); +ushort3 __ovld __cnfn convert_ushort3_rte(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3); +ushort3 __ovld __cnfn convert_ushort3(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rte(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3); +ushort3 __ovld __cnfn convert_ushort3_rtz(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3); +ushort3 __ovld __cnfn convert_ushort3_rtp(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3); +ushort3 __ovld __cnfn convert_ushort3_rtn(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3); +ushort3 __ovld __cnfn convert_ushort3(int3); +ushort3 __ovld __cnfn convert_ushort3_sat(int3); +ushort3 __ovld __cnfn convert_ushort3_rte(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3); +ushort3 __ovld __cnfn convert_ushort3_rtz(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3); +ushort3 __ovld __cnfn convert_ushort3_rtp(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3); +ushort3 __ovld __cnfn convert_ushort3_rtn(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3); +ushort3 __ovld __cnfn convert_ushort3(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat(uint3); +ushort3 __ovld __cnfn convert_ushort3_rte(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3); +ushort3 __ovld __cnfn convert_ushort3_rtz(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3); +ushort3 __ovld __cnfn convert_ushort3_rtp(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3); +ushort3 __ovld __cnfn convert_ushort3_rtn(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3); +ushort3 __ovld __cnfn convert_ushort3(long3); +ushort3 __ovld __cnfn convert_ushort3_sat(long3); +ushort3 __ovld __cnfn convert_ushort3_rte(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3); +ushort3 __ovld __cnfn convert_ushort3(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rte(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3); +ushort3 __ovld __cnfn convert_ushort3_rtz(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3); +ushort3 __ovld __cnfn convert_ushort3_rtp(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3); +ushort3 __ovld __cnfn convert_ushort3_rtn(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3); +ushort3 __ovld __cnfn convert_ushort3(float3); +ushort3 __ovld __cnfn convert_ushort3_sat(float3); +int3 __ovld __cnfn convert_int3_rte(char3); +int3 __ovld __cnfn convert_int3_sat_rte(char3); +int3 __ovld __cnfn convert_int3_rtz(char3); +int3 __ovld __cnfn convert_int3_sat_rtz(char3); +int3 __ovld __cnfn convert_int3_rtp(char3); +int3 __ovld __cnfn convert_int3_sat_rtp(char3); +int3 __ovld __cnfn convert_int3_rtn(char3); +int3 __ovld __cnfn convert_int3_sat_rtn(char3); +int3 __ovld __cnfn convert_int3(char3); +int3 __ovld __cnfn convert_int3_sat(char3); +int3 __ovld __cnfn convert_int3_rte(uchar3); +int3 __ovld __cnfn convert_int3_sat_rte(uchar3); +int3 __ovld __cnfn convert_int3_rtz(uchar3); +int3 __ovld __cnfn convert_int3_sat_rtz(uchar3); +int3 __ovld __cnfn convert_int3_rtp(uchar3); +int3 __ovld __cnfn convert_int3_sat_rtp(uchar3); +int3 __ovld __cnfn convert_int3_rtn(uchar3); +int3 __ovld __cnfn convert_int3_sat_rtn(uchar3); +int3 __ovld __cnfn convert_int3(uchar3); +int3 __ovld __cnfn convert_int3_sat(uchar3); +int3 __ovld __cnfn convert_int3_rte(short3); +int3 __ovld __cnfn convert_int3_sat_rte(short3); +int3 __ovld __cnfn convert_int3_rtz(short3); +int3 __ovld __cnfn convert_int3_sat_rtz(short3); +int3 __ovld __cnfn convert_int3_rtp(short3); +int3 __ovld __cnfn convert_int3_sat_rtp(short3); +int3 __ovld __cnfn convert_int3_rtn(short3); +int3 __ovld __cnfn convert_int3_sat_rtn(short3); +int3 __ovld __cnfn convert_int3(short3); +int3 __ovld __cnfn convert_int3_sat(short3); +int3 __ovld __cnfn convert_int3_rte(ushort3); +int3 __ovld __cnfn convert_int3_sat_rte(ushort3); +int3 __ovld __cnfn convert_int3_rtz(ushort3); +int3 __ovld __cnfn convert_int3_sat_rtz(ushort3); +int3 __ovld __cnfn convert_int3_rtp(ushort3); +int3 __ovld __cnfn convert_int3_sat_rtp(ushort3); +int3 __ovld __cnfn convert_int3_rtn(ushort3); +int3 __ovld __cnfn convert_int3_sat_rtn(ushort3); +int3 __ovld __cnfn convert_int3(ushort3); +int3 __ovld __cnfn convert_int3_sat(ushort3); +int3 __ovld __cnfn convert_int3_rte(int3); +int3 __ovld __cnfn convert_int3_sat_rte(int3); +int3 __ovld __cnfn convert_int3_rtz(int3); +int3 __ovld __cnfn convert_int3_sat_rtz(int3); +int3 __ovld __cnfn convert_int3_rtp(int3); +int3 __ovld __cnfn convert_int3_sat_rtp(int3); +int3 __ovld __cnfn convert_int3_rtn(int3); +int3 __ovld __cnfn convert_int3_sat_rtn(int3); +int3 __ovld __cnfn convert_int3(int3); +int3 __ovld __cnfn convert_int3_sat(int3); +int3 __ovld __cnfn convert_int3_rte(uint3); +int3 __ovld __cnfn convert_int3_sat_rte(uint3); +int3 __ovld __cnfn convert_int3_rtz(uint3); +int3 __ovld __cnfn convert_int3_sat_rtz(uint3); +int3 __ovld __cnfn convert_int3_rtp(uint3); +int3 __ovld __cnfn convert_int3_sat_rtp(uint3); +int3 __ovld __cnfn convert_int3_rtn(uint3); +int3 __ovld __cnfn convert_int3_sat_rtn(uint3); +int3 __ovld __cnfn convert_int3(uint3); +int3 __ovld __cnfn convert_int3_sat(uint3); +int3 __ovld __cnfn convert_int3_rte(long3); +int3 __ovld __cnfn convert_int3_sat_rte(long3); +int3 __ovld __cnfn convert_int3_rtz(long3); +int3 __ovld __cnfn convert_int3_sat_rtz(long3); +int3 __ovld __cnfn convert_int3_rtp(long3); +int3 __ovld __cnfn convert_int3_sat_rtp(long3); +int3 __ovld __cnfn convert_int3_rtn(long3); +int3 __ovld __cnfn convert_int3_sat_rtn(long3); +int3 __ovld __cnfn convert_int3(long3); +int3 __ovld __cnfn convert_int3_sat(long3); +int3 __ovld __cnfn convert_int3_rte(ulong3); +int3 __ovld __cnfn convert_int3_sat_rte(ulong3); +int3 __ovld __cnfn convert_int3_rtz(ulong3); +int3 __ovld __cnfn convert_int3_sat_rtz(ulong3); +int3 __ovld __cnfn convert_int3_rtp(ulong3); +int3 __ovld __cnfn convert_int3_sat_rtp(ulong3); +int3 __ovld __cnfn convert_int3_rtn(ulong3); +int3 __ovld __cnfn convert_int3_sat_rtn(ulong3); +int3 __ovld __cnfn convert_int3(ulong3); +int3 __ovld __cnfn convert_int3_sat(ulong3); +int3 __ovld __cnfn convert_int3_rte(float3); +int3 __ovld __cnfn convert_int3_sat_rte(float3); +int3 __ovld __cnfn convert_int3_rtz(float3); +int3 __ovld __cnfn convert_int3_sat_rtz(float3); +int3 __ovld __cnfn convert_int3_rtp(float3); +int3 __ovld __cnfn convert_int3_sat_rtp(float3); +int3 __ovld __cnfn convert_int3_rtn(float3); +int3 __ovld __cnfn convert_int3_sat_rtn(float3); +int3 __ovld __cnfn convert_int3(float3); +int3 __ovld __cnfn convert_int3_sat(float3); +uint3 __ovld __cnfn convert_uint3_rte(char3); +uint3 __ovld __cnfn convert_uint3_sat_rte(char3); +uint3 __ovld __cnfn convert_uint3_rtz(char3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(char3); +uint3 __ovld __cnfn convert_uint3_rtp(char3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(char3); +uint3 __ovld __cnfn convert_uint3_rtn(char3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(char3); +uint3 __ovld __cnfn convert_uint3(char3); +uint3 __ovld __cnfn convert_uint3_sat(char3); +uint3 __ovld __cnfn convert_uint3_rte(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3); +uint3 __ovld __cnfn convert_uint3_rtz(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3); +uint3 __ovld __cnfn convert_uint3_rtp(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3); +uint3 __ovld __cnfn convert_uint3_rtn(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3); +uint3 __ovld __cnfn convert_uint3(uchar3); +uint3 __ovld __cnfn convert_uint3_sat(uchar3); +uint3 __ovld __cnfn convert_uint3_rte(short3); +uint3 __ovld __cnfn convert_uint3_sat_rte(short3); +uint3 __ovld __cnfn convert_uint3_rtz(short3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(short3); +uint3 __ovld __cnfn convert_uint3_rtp(short3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(short3); +uint3 __ovld __cnfn convert_uint3_rtn(short3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(short3); +uint3 __ovld __cnfn convert_uint3(short3); +uint3 __ovld __cnfn convert_uint3_sat(short3); +uint3 __ovld __cnfn convert_uint3_rte(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3); +uint3 __ovld __cnfn convert_uint3_rtz(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3); +uint3 __ovld __cnfn convert_uint3_rtp(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3); +uint3 __ovld __cnfn convert_uint3_rtn(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3); +uint3 __ovld __cnfn convert_uint3(ushort3); +uint3 __ovld __cnfn convert_uint3_sat(ushort3); +uint3 __ovld __cnfn convert_uint3_rte(int3); +uint3 __ovld __cnfn convert_uint3_sat_rte(int3); +uint3 __ovld __cnfn convert_uint3_rtz(int3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(int3); +uint3 __ovld __cnfn convert_uint3_rtp(int3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(int3); +uint3 __ovld __cnfn convert_uint3_rtn(int3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(int3); +uint3 __ovld __cnfn convert_uint3(int3); +uint3 __ovld __cnfn convert_uint3_sat(int3); +uint3 __ovld __cnfn convert_uint3_rte(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rte(uint3); +uint3 __ovld __cnfn convert_uint3_rtz(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3); +uint3 __ovld __cnfn convert_uint3_rtp(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3); +uint3 __ovld __cnfn convert_uint3_rtn(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3); +uint3 __ovld __cnfn convert_uint3(uint3); +uint3 __ovld __cnfn convert_uint3_sat(uint3); +uint3 __ovld __cnfn convert_uint3_rte(long3); +uint3 __ovld __cnfn convert_uint3_sat_rte(long3); +uint3 __ovld __cnfn convert_uint3_rtz(long3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(long3); +uint3 __ovld __cnfn convert_uint3_rtp(long3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(long3); +uint3 __ovld __cnfn convert_uint3_rtn(long3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(long3); +uint3 __ovld __cnfn convert_uint3(long3); +uint3 __ovld __cnfn convert_uint3_sat(long3); +uint3 __ovld __cnfn convert_uint3_rte(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3); +uint3 __ovld __cnfn convert_uint3_rtz(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3); +uint3 __ovld __cnfn convert_uint3_rtp(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3); +uint3 __ovld __cnfn convert_uint3_rtn(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3); +uint3 __ovld __cnfn convert_uint3(ulong3); +uint3 __ovld __cnfn convert_uint3_sat(ulong3); +uint3 __ovld __cnfn convert_uint3_rte(float3); +uint3 __ovld __cnfn convert_uint3_sat_rte(float3); +uint3 __ovld __cnfn convert_uint3_rtz(float3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(float3); +uint3 __ovld __cnfn convert_uint3_rtp(float3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(float3); +uint3 __ovld __cnfn convert_uint3_rtn(float3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(float3); +uint3 __ovld __cnfn convert_uint3(float3); +uint3 __ovld __cnfn convert_uint3_sat(float3); +long3 __ovld __cnfn convert_long3_rte(char3); +long3 __ovld __cnfn convert_long3_sat_rte(char3); +long3 __ovld __cnfn convert_long3_rtz(char3); +long3 __ovld __cnfn convert_long3_sat_rtz(char3); +long3 __ovld __cnfn convert_long3_rtp(char3); +long3 __ovld __cnfn convert_long3_sat_rtp(char3); +long3 __ovld __cnfn convert_long3_rtn(char3); +long3 __ovld __cnfn convert_long3_sat_rtn(char3); +long3 __ovld __cnfn convert_long3(char3); +long3 __ovld __cnfn convert_long3_sat(char3); +long3 __ovld __cnfn convert_long3_rte(uchar3); +long3 __ovld __cnfn convert_long3_sat_rte(uchar3); +long3 __ovld __cnfn convert_long3_rtz(uchar3); +long3 __ovld __cnfn convert_long3_sat_rtz(uchar3); +long3 __ovld __cnfn convert_long3_rtp(uchar3); +long3 __ovld __cnfn convert_long3_sat_rtp(uchar3); +long3 __ovld __cnfn convert_long3_rtn(uchar3); +long3 __ovld __cnfn convert_long3_sat_rtn(uchar3); +long3 __ovld __cnfn convert_long3(uchar3); +long3 __ovld __cnfn convert_long3_sat(uchar3); +long3 __ovld __cnfn convert_long3_rte(short3); +long3 __ovld __cnfn convert_long3_sat_rte(short3); +long3 __ovld __cnfn convert_long3_rtz(short3); +long3 __ovld __cnfn convert_long3_sat_rtz(short3); +long3 __ovld __cnfn convert_long3_rtp(short3); +long3 __ovld __cnfn convert_long3_sat_rtp(short3); +long3 __ovld __cnfn convert_long3_rtn(short3); +long3 __ovld __cnfn convert_long3_sat_rtn(short3); +long3 __ovld __cnfn convert_long3(short3); +long3 __ovld __cnfn convert_long3_sat(short3); +long3 __ovld __cnfn convert_long3_rte(ushort3); +long3 __ovld __cnfn convert_long3_sat_rte(ushort3); +long3 __ovld __cnfn convert_long3_rtz(ushort3); +long3 __ovld __cnfn convert_long3_sat_rtz(ushort3); +long3 __ovld __cnfn convert_long3_rtp(ushort3); +long3 __ovld __cnfn convert_long3_sat_rtp(ushort3); +long3 __ovld __cnfn convert_long3_rtn(ushort3); +long3 __ovld __cnfn convert_long3_sat_rtn(ushort3); +long3 __ovld __cnfn convert_long3(ushort3); +long3 __ovld __cnfn convert_long3_sat(ushort3); +long3 __ovld __cnfn convert_long3_rte(int3); +long3 __ovld __cnfn convert_long3_sat_rte(int3); +long3 __ovld __cnfn convert_long3_rtz(int3); +long3 __ovld __cnfn convert_long3_sat_rtz(int3); +long3 __ovld __cnfn convert_long3_rtp(int3); +long3 __ovld __cnfn convert_long3_sat_rtp(int3); +long3 __ovld __cnfn convert_long3_rtn(int3); +long3 __ovld __cnfn convert_long3_sat_rtn(int3); +long3 __ovld __cnfn convert_long3(int3); +long3 __ovld __cnfn convert_long3_sat(int3); +long3 __ovld __cnfn convert_long3_rte(uint3); +long3 __ovld __cnfn convert_long3_sat_rte(uint3); +long3 __ovld __cnfn convert_long3_rtz(uint3); +long3 __ovld __cnfn convert_long3_sat_rtz(uint3); +long3 __ovld __cnfn convert_long3_rtp(uint3); +long3 __ovld __cnfn convert_long3_sat_rtp(uint3); +long3 __ovld __cnfn convert_long3_rtn(uint3); +long3 __ovld __cnfn convert_long3_sat_rtn(uint3); +long3 __ovld __cnfn convert_long3(uint3); +long3 __ovld __cnfn convert_long3_sat(uint3); +long3 __ovld __cnfn convert_long3_rte(long3); +long3 __ovld __cnfn convert_long3_sat_rte(long3); +long3 __ovld __cnfn convert_long3_rtz(long3); +long3 __ovld __cnfn convert_long3_sat_rtz(long3); +long3 __ovld __cnfn convert_long3_rtp(long3); +long3 __ovld __cnfn convert_long3_sat_rtp(long3); +long3 __ovld __cnfn convert_long3_rtn(long3); +long3 __ovld __cnfn convert_long3_sat_rtn(long3); +long3 __ovld __cnfn convert_long3(long3); +long3 __ovld __cnfn convert_long3_sat(long3); +long3 __ovld __cnfn convert_long3_rte(ulong3); +long3 __ovld __cnfn convert_long3_sat_rte(ulong3); +long3 __ovld __cnfn convert_long3_rtz(ulong3); +long3 __ovld __cnfn convert_long3_sat_rtz(ulong3); +long3 __ovld __cnfn convert_long3_rtp(ulong3); +long3 __ovld __cnfn convert_long3_sat_rtp(ulong3); +long3 __ovld __cnfn convert_long3_rtn(ulong3); +long3 __ovld __cnfn convert_long3_sat_rtn(ulong3); +long3 __ovld __cnfn convert_long3(ulong3); +long3 __ovld __cnfn convert_long3_sat(ulong3); +long3 __ovld __cnfn convert_long3_rte(float3); +long3 __ovld __cnfn convert_long3_sat_rte(float3); +long3 __ovld __cnfn convert_long3_rtz(float3); +long3 __ovld __cnfn convert_long3_sat_rtz(float3); +long3 __ovld __cnfn convert_long3_rtp(float3); +long3 __ovld __cnfn convert_long3_sat_rtp(float3); +long3 __ovld __cnfn convert_long3_rtn(float3); +long3 __ovld __cnfn convert_long3_sat_rtn(float3); +long3 __ovld __cnfn convert_long3(float3); +long3 __ovld __cnfn convert_long3_sat(float3); +ulong3 __ovld __cnfn convert_ulong3_rte(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3); +ulong3 __ovld __cnfn convert_ulong3_rtz(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3); +ulong3 __ovld __cnfn convert_ulong3_rtp(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3); +ulong3 __ovld __cnfn convert_ulong3_rtn(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3); +ulong3 __ovld __cnfn convert_ulong3(char3); +ulong3 __ovld __cnfn convert_ulong3_sat(char3); +ulong3 __ovld __cnfn convert_ulong3_rte(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3); +ulong3 __ovld __cnfn convert_ulong3(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rte(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3); +ulong3 __ovld __cnfn convert_ulong3_rtz(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3); +ulong3 __ovld __cnfn convert_ulong3_rtp(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3); +ulong3 __ovld __cnfn convert_ulong3_rtn(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3); +ulong3 __ovld __cnfn convert_ulong3(short3); +ulong3 __ovld __cnfn convert_ulong3_sat(short3); +ulong3 __ovld __cnfn convert_ulong3_rte(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3); +ulong3 __ovld __cnfn convert_ulong3(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rte(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3); +ulong3 __ovld __cnfn convert_ulong3_rtz(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3); +ulong3 __ovld __cnfn convert_ulong3_rtp(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3); +ulong3 __ovld __cnfn convert_ulong3_rtn(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3); +ulong3 __ovld __cnfn convert_ulong3(int3); +ulong3 __ovld __cnfn convert_ulong3_sat(int3); +ulong3 __ovld __cnfn convert_ulong3_rte(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3); +ulong3 __ovld __cnfn convert_ulong3_rtz(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3); +ulong3 __ovld __cnfn convert_ulong3_rtp(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3); +ulong3 __ovld __cnfn convert_ulong3_rtn(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3); +ulong3 __ovld __cnfn convert_ulong3(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat(uint3); +ulong3 __ovld __cnfn convert_ulong3_rte(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3); +ulong3 __ovld __cnfn convert_ulong3_rtz(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3); +ulong3 __ovld __cnfn convert_ulong3_rtp(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3); +ulong3 __ovld __cnfn convert_ulong3_rtn(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3); +ulong3 __ovld __cnfn convert_ulong3(long3); +ulong3 __ovld __cnfn convert_ulong3_sat(long3); +ulong3 __ovld __cnfn convert_ulong3_rte(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3); +ulong3 __ovld __cnfn convert_ulong3(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rte(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3); +ulong3 __ovld __cnfn convert_ulong3_rtz(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3); +ulong3 __ovld __cnfn convert_ulong3_rtp(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3); +ulong3 __ovld __cnfn convert_ulong3_rtn(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3); +ulong3 __ovld __cnfn convert_ulong3(float3); +ulong3 __ovld __cnfn convert_ulong3_sat(float3); +float3 __ovld __cnfn convert_float3_rte(char3); +float3 __ovld __cnfn convert_float3_rtz(char3); +float3 __ovld __cnfn convert_float3_rtp(char3); +float3 __ovld __cnfn convert_float3_rtn(char3); +float3 __ovld __cnfn convert_float3(char3); +float3 __ovld __cnfn convert_float3_rte(uchar3); +float3 __ovld __cnfn convert_float3_rtz(uchar3); +float3 __ovld __cnfn convert_float3_rtp(uchar3); +float3 __ovld __cnfn convert_float3_rtn(uchar3); +float3 __ovld __cnfn convert_float3(uchar3); +float3 __ovld __cnfn convert_float3_rte(short3); +float3 __ovld __cnfn convert_float3_rtz(short3); +float3 __ovld __cnfn convert_float3_rtp(short3); +float3 __ovld __cnfn convert_float3_rtn(short3); +float3 __ovld __cnfn convert_float3(short3); +float3 __ovld __cnfn convert_float3_rte(ushort3); +float3 __ovld __cnfn convert_float3_rtz(ushort3); +float3 __ovld __cnfn convert_float3_rtp(ushort3); +float3 __ovld __cnfn convert_float3_rtn(ushort3); +float3 __ovld __cnfn convert_float3(ushort3); +float3 __ovld __cnfn convert_float3_rte(int3); +float3 __ovld __cnfn convert_float3_rtz(int3); +float3 __ovld __cnfn convert_float3_rtp(int3); +float3 __ovld __cnfn convert_float3_rtn(int3); +float3 __ovld __cnfn convert_float3(int3); +float3 __ovld __cnfn convert_float3_rte(uint3); +float3 __ovld __cnfn convert_float3_rtz(uint3); +float3 __ovld __cnfn convert_float3_rtp(uint3); +float3 __ovld __cnfn convert_float3_rtn(uint3); +float3 __ovld __cnfn convert_float3(uint3); +float3 __ovld __cnfn convert_float3_rte(long3); +float3 __ovld __cnfn convert_float3_rtz(long3); +float3 __ovld __cnfn convert_float3_rtp(long3); +float3 __ovld __cnfn convert_float3_rtn(long3); +float3 __ovld __cnfn convert_float3(long3); +float3 __ovld __cnfn convert_float3_rte(ulong3); +float3 __ovld __cnfn convert_float3_rtz(ulong3); +float3 __ovld __cnfn convert_float3_rtp(ulong3); +float3 __ovld __cnfn convert_float3_rtn(ulong3); +float3 __ovld __cnfn convert_float3(ulong3); +float3 __ovld __cnfn convert_float3_rte(float3); +float3 __ovld __cnfn convert_float3_rtz(float3); +float3 __ovld __cnfn convert_float3_rtp(float3); +float3 __ovld __cnfn convert_float3_rtn(float3); +float3 __ovld __cnfn convert_float3(float3); +char4 __ovld __cnfn convert_char4_rte(char4); +char4 __ovld __cnfn convert_char4_sat_rte(char4); +char4 __ovld __cnfn convert_char4_rtz(char4); +char4 __ovld __cnfn convert_char4_sat_rtz(char4); +char4 __ovld __cnfn convert_char4_rtp(char4); +char4 __ovld __cnfn convert_char4_sat_rtp(char4); +char4 __ovld __cnfn convert_char4_rtn(char4); +char4 __ovld __cnfn convert_char4_sat_rtn(char4); +char4 __ovld __cnfn convert_char4(char4); +char4 __ovld __cnfn convert_char4_sat(char4); +char4 __ovld __cnfn convert_char4_rte(uchar4); +char4 __ovld __cnfn convert_char4_sat_rte(uchar4); +char4 __ovld __cnfn convert_char4_rtz(uchar4); +char4 __ovld __cnfn convert_char4_sat_rtz(uchar4); +char4 __ovld __cnfn convert_char4_rtp(uchar4); +char4 __ovld __cnfn convert_char4_sat_rtp(uchar4); +char4 __ovld __cnfn convert_char4_rtn(uchar4); +char4 __ovld __cnfn convert_char4_sat_rtn(uchar4); +char4 __ovld __cnfn convert_char4(uchar4); +char4 __ovld __cnfn convert_char4_sat(uchar4); +char4 __ovld __cnfn convert_char4_rte(short4); +char4 __ovld __cnfn convert_char4_sat_rte(short4); +char4 __ovld __cnfn convert_char4_rtz(short4); +char4 __ovld __cnfn convert_char4_sat_rtz(short4); +char4 __ovld __cnfn convert_char4_rtp(short4); +char4 __ovld __cnfn convert_char4_sat_rtp(short4); +char4 __ovld __cnfn convert_char4_rtn(short4); +char4 __ovld __cnfn convert_char4_sat_rtn(short4); +char4 __ovld __cnfn convert_char4(short4); +char4 __ovld __cnfn convert_char4_sat(short4); +char4 __ovld __cnfn convert_char4_rte(ushort4); +char4 __ovld __cnfn convert_char4_sat_rte(ushort4); +char4 __ovld __cnfn convert_char4_rtz(ushort4); +char4 __ovld __cnfn convert_char4_sat_rtz(ushort4); +char4 __ovld __cnfn convert_char4_rtp(ushort4); +char4 __ovld __cnfn convert_char4_sat_rtp(ushort4); +char4 __ovld __cnfn convert_char4_rtn(ushort4); +char4 __ovld __cnfn convert_char4_sat_rtn(ushort4); +char4 __ovld __cnfn convert_char4(ushort4); +char4 __ovld __cnfn convert_char4_sat(ushort4); +char4 __ovld __cnfn convert_char4_rte(int4); +char4 __ovld __cnfn convert_char4_sat_rte(int4); +char4 __ovld __cnfn convert_char4_rtz(int4); +char4 __ovld __cnfn convert_char4_sat_rtz(int4); +char4 __ovld __cnfn convert_char4_rtp(int4); +char4 __ovld __cnfn convert_char4_sat_rtp(int4); +char4 __ovld __cnfn convert_char4_rtn(int4); +char4 __ovld __cnfn convert_char4_sat_rtn(int4); +char4 __ovld __cnfn convert_char4(int4); +char4 __ovld __cnfn convert_char4_sat(int4); +char4 __ovld __cnfn convert_char4_rte(uint4); +char4 __ovld __cnfn convert_char4_sat_rte(uint4); +char4 __ovld __cnfn convert_char4_rtz(uint4); +char4 __ovld __cnfn convert_char4_sat_rtz(uint4); +char4 __ovld __cnfn convert_char4_rtp(uint4); +char4 __ovld __cnfn convert_char4_sat_rtp(uint4); +char4 __ovld __cnfn convert_char4_rtn(uint4); +char4 __ovld __cnfn convert_char4_sat_rtn(uint4); +char4 __ovld __cnfn convert_char4(uint4); +char4 __ovld __cnfn convert_char4_sat(uint4); +char4 __ovld __cnfn convert_char4_rte(long4); +char4 __ovld __cnfn convert_char4_sat_rte(long4); +char4 __ovld __cnfn convert_char4_rtz(long4); +char4 __ovld __cnfn convert_char4_sat_rtz(long4); +char4 __ovld __cnfn convert_char4_rtp(long4); +char4 __ovld __cnfn convert_char4_sat_rtp(long4); +char4 __ovld __cnfn convert_char4_rtn(long4); +char4 __ovld __cnfn convert_char4_sat_rtn(long4); +char4 __ovld __cnfn convert_char4(long4); +char4 __ovld __cnfn convert_char4_sat(long4); +char4 __ovld __cnfn convert_char4_rte(ulong4); +char4 __ovld __cnfn convert_char4_sat_rte(ulong4); +char4 __ovld __cnfn convert_char4_rtz(ulong4); +char4 __ovld __cnfn convert_char4_sat_rtz(ulong4); +char4 __ovld __cnfn convert_char4_rtp(ulong4); +char4 __ovld __cnfn convert_char4_sat_rtp(ulong4); +char4 __ovld __cnfn convert_char4_rtn(ulong4); +char4 __ovld __cnfn convert_char4_sat_rtn(ulong4); +char4 __ovld __cnfn convert_char4(ulong4); +char4 __ovld __cnfn convert_char4_sat(ulong4); +char4 __ovld __cnfn convert_char4_rte(float4); +char4 __ovld __cnfn convert_char4_sat_rte(float4); +char4 __ovld __cnfn convert_char4_rtz(float4); +char4 __ovld __cnfn convert_char4_sat_rtz(float4); +char4 __ovld __cnfn convert_char4_rtp(float4); +char4 __ovld __cnfn convert_char4_sat_rtp(float4); +char4 __ovld __cnfn convert_char4_rtn(float4); +char4 __ovld __cnfn convert_char4_sat_rtn(float4); +char4 __ovld __cnfn convert_char4(float4); +char4 __ovld __cnfn convert_char4_sat(float4); +uchar4 __ovld __cnfn convert_uchar4_rte(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4); +uchar4 __ovld __cnfn convert_uchar4_rtz(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4); +uchar4 __ovld __cnfn convert_uchar4_rtp(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4); +uchar4 __ovld __cnfn convert_uchar4_rtn(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4); +uchar4 __ovld __cnfn convert_uchar4(char4); +uchar4 __ovld __cnfn convert_uchar4_sat(char4); +uchar4 __ovld __cnfn convert_uchar4_rte(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4); +uchar4 __ovld __cnfn convert_uchar4(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rte(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4); +uchar4 __ovld __cnfn convert_uchar4_rtz(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4); +uchar4 __ovld __cnfn convert_uchar4_rtp(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4); +uchar4 __ovld __cnfn convert_uchar4_rtn(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4); +uchar4 __ovld __cnfn convert_uchar4(short4); +uchar4 __ovld __cnfn convert_uchar4_sat(short4); +uchar4 __ovld __cnfn convert_uchar4_rte(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4); +uchar4 __ovld __cnfn convert_uchar4(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rte(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4); +uchar4 __ovld __cnfn convert_uchar4_rtz(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4); +uchar4 __ovld __cnfn convert_uchar4_rtp(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4); +uchar4 __ovld __cnfn convert_uchar4_rtn(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4); +uchar4 __ovld __cnfn convert_uchar4(int4); +uchar4 __ovld __cnfn convert_uchar4_sat(int4); +uchar4 __ovld __cnfn convert_uchar4_rte(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4); +uchar4 __ovld __cnfn convert_uchar4_rtz(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4); +uchar4 __ovld __cnfn convert_uchar4_rtp(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4); +uchar4 __ovld __cnfn convert_uchar4_rtn(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4); +uchar4 __ovld __cnfn convert_uchar4(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat(uint4); +uchar4 __ovld __cnfn convert_uchar4_rte(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4); +uchar4 __ovld __cnfn convert_uchar4_rtz(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4); +uchar4 __ovld __cnfn convert_uchar4_rtp(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4); +uchar4 __ovld __cnfn convert_uchar4_rtn(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4); +uchar4 __ovld __cnfn convert_uchar4(long4); +uchar4 __ovld __cnfn convert_uchar4_sat(long4); +uchar4 __ovld __cnfn convert_uchar4_rte(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4); +uchar4 __ovld __cnfn convert_uchar4(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rte(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4); +uchar4 __ovld __cnfn convert_uchar4_rtz(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4); +uchar4 __ovld __cnfn convert_uchar4_rtp(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4); +uchar4 __ovld __cnfn convert_uchar4_rtn(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4); +uchar4 __ovld __cnfn convert_uchar4(float4); +uchar4 __ovld __cnfn convert_uchar4_sat(float4); +short4 __ovld __cnfn convert_short4_rte(char4); +short4 __ovld __cnfn convert_short4_sat_rte(char4); +short4 __ovld __cnfn convert_short4_rtz(char4); +short4 __ovld __cnfn convert_short4_sat_rtz(char4); +short4 __ovld __cnfn convert_short4_rtp(char4); +short4 __ovld __cnfn convert_short4_sat_rtp(char4); +short4 __ovld __cnfn convert_short4_rtn(char4); +short4 __ovld __cnfn convert_short4_sat_rtn(char4); +short4 __ovld __cnfn convert_short4(char4); +short4 __ovld __cnfn convert_short4_sat(char4); +short4 __ovld __cnfn convert_short4_rte(uchar4); +short4 __ovld __cnfn convert_short4_sat_rte(uchar4); +short4 __ovld __cnfn convert_short4_rtz(uchar4); +short4 __ovld __cnfn convert_short4_sat_rtz(uchar4); +short4 __ovld __cnfn convert_short4_rtp(uchar4); +short4 __ovld __cnfn convert_short4_sat_rtp(uchar4); +short4 __ovld __cnfn convert_short4_rtn(uchar4); +short4 __ovld __cnfn convert_short4_sat_rtn(uchar4); +short4 __ovld __cnfn convert_short4(uchar4); +short4 __ovld __cnfn convert_short4_sat(uchar4); +short4 __ovld __cnfn convert_short4_rte(short4); +short4 __ovld __cnfn convert_short4_sat_rte(short4); +short4 __ovld __cnfn convert_short4_rtz(short4); +short4 __ovld __cnfn convert_short4_sat_rtz(short4); +short4 __ovld __cnfn convert_short4_rtp(short4); +short4 __ovld __cnfn convert_short4_sat_rtp(short4); +short4 __ovld __cnfn convert_short4_rtn(short4); +short4 __ovld __cnfn convert_short4_sat_rtn(short4); +short4 __ovld __cnfn convert_short4(short4); +short4 __ovld __cnfn convert_short4_sat(short4); +short4 __ovld __cnfn convert_short4_rte(ushort4); +short4 __ovld __cnfn convert_short4_sat_rte(ushort4); +short4 __ovld __cnfn convert_short4_rtz(ushort4); +short4 __ovld __cnfn convert_short4_sat_rtz(ushort4); +short4 __ovld __cnfn convert_short4_rtp(ushort4); +short4 __ovld __cnfn convert_short4_sat_rtp(ushort4); +short4 __ovld __cnfn convert_short4_rtn(ushort4); +short4 __ovld __cnfn convert_short4_sat_rtn(ushort4); +short4 __ovld __cnfn convert_short4(ushort4); +short4 __ovld __cnfn convert_short4_sat(ushort4); +short4 __ovld __cnfn convert_short4_rte(int4); +short4 __ovld __cnfn convert_short4_sat_rte(int4); +short4 __ovld __cnfn convert_short4_rtz(int4); +short4 __ovld __cnfn convert_short4_sat_rtz(int4); +short4 __ovld __cnfn convert_short4_rtp(int4); +short4 __ovld __cnfn convert_short4_sat_rtp(int4); +short4 __ovld __cnfn convert_short4_rtn(int4); +short4 __ovld __cnfn convert_short4_sat_rtn(int4); +short4 __ovld __cnfn convert_short4(int4); +short4 __ovld __cnfn convert_short4_sat(int4); +short4 __ovld __cnfn convert_short4_rte(uint4); +short4 __ovld __cnfn convert_short4_sat_rte(uint4); +short4 __ovld __cnfn convert_short4_rtz(uint4); +short4 __ovld __cnfn convert_short4_sat_rtz(uint4); +short4 __ovld __cnfn convert_short4_rtp(uint4); +short4 __ovld __cnfn convert_short4_sat_rtp(uint4); +short4 __ovld __cnfn convert_short4_rtn(uint4); +short4 __ovld __cnfn convert_short4_sat_rtn(uint4); +short4 __ovld __cnfn convert_short4(uint4); +short4 __ovld __cnfn convert_short4_sat(uint4); +short4 __ovld __cnfn convert_short4_rte(long4); +short4 __ovld __cnfn convert_short4_sat_rte(long4); +short4 __ovld __cnfn convert_short4_rtz(long4); +short4 __ovld __cnfn convert_short4_sat_rtz(long4); +short4 __ovld __cnfn convert_short4_rtp(long4); +short4 __ovld __cnfn convert_short4_sat_rtp(long4); +short4 __ovld __cnfn convert_short4_rtn(long4); +short4 __ovld __cnfn convert_short4_sat_rtn(long4); +short4 __ovld __cnfn convert_short4(long4); +short4 __ovld __cnfn convert_short4_sat(long4); +short4 __ovld __cnfn convert_short4_rte(ulong4); +short4 __ovld __cnfn convert_short4_sat_rte(ulong4); +short4 __ovld __cnfn convert_short4_rtz(ulong4); +short4 __ovld __cnfn convert_short4_sat_rtz(ulong4); +short4 __ovld __cnfn convert_short4_rtp(ulong4); +short4 __ovld __cnfn convert_short4_sat_rtp(ulong4); +short4 __ovld __cnfn convert_short4_rtn(ulong4); +short4 __ovld __cnfn convert_short4_sat_rtn(ulong4); +short4 __ovld __cnfn convert_short4(ulong4); +short4 __ovld __cnfn convert_short4_sat(ulong4); +short4 __ovld __cnfn convert_short4_rte(float4); +short4 __ovld __cnfn convert_short4_sat_rte(float4); +short4 __ovld __cnfn convert_short4_rtz(float4); +short4 __ovld __cnfn convert_short4_sat_rtz(float4); +short4 __ovld __cnfn convert_short4_rtp(float4); +short4 __ovld __cnfn convert_short4_sat_rtp(float4); +short4 __ovld __cnfn convert_short4_rtn(float4); +short4 __ovld __cnfn convert_short4_sat_rtn(float4); +short4 __ovld __cnfn convert_short4(float4); +short4 __ovld __cnfn convert_short4_sat(float4); +ushort4 __ovld __cnfn convert_ushort4_rte(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4); +ushort4 __ovld __cnfn convert_ushort4_rtz(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4); +ushort4 __ovld __cnfn convert_ushort4_rtp(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4); +ushort4 __ovld __cnfn convert_ushort4_rtn(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4); +ushort4 __ovld __cnfn convert_ushort4(char4); +ushort4 __ovld __cnfn convert_ushort4_sat(char4); +ushort4 __ovld __cnfn convert_ushort4_rte(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4); +ushort4 __ovld __cnfn convert_ushort4(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rte(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4); +ushort4 __ovld __cnfn convert_ushort4_rtz(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4); +ushort4 __ovld __cnfn convert_ushort4_rtp(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4); +ushort4 __ovld __cnfn convert_ushort4_rtn(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4); +ushort4 __ovld __cnfn convert_ushort4(short4); +ushort4 __ovld __cnfn convert_ushort4_sat(short4); +ushort4 __ovld __cnfn convert_ushort4_rte(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4); +ushort4 __ovld __cnfn convert_ushort4(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rte(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4); +ushort4 __ovld __cnfn convert_ushort4_rtz(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4); +ushort4 __ovld __cnfn convert_ushort4_rtp(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4); +ushort4 __ovld __cnfn convert_ushort4_rtn(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4); +ushort4 __ovld __cnfn convert_ushort4(int4); +ushort4 __ovld __cnfn convert_ushort4_sat(int4); +ushort4 __ovld __cnfn convert_ushort4_rte(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4); +ushort4 __ovld __cnfn convert_ushort4_rtz(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4); +ushort4 __ovld __cnfn convert_ushort4_rtp(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4); +ushort4 __ovld __cnfn convert_ushort4_rtn(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4); +ushort4 __ovld __cnfn convert_ushort4(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat(uint4); +ushort4 __ovld __cnfn convert_ushort4_rte(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4); +ushort4 __ovld __cnfn convert_ushort4_rtz(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4); +ushort4 __ovld __cnfn convert_ushort4_rtp(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4); +ushort4 __ovld __cnfn convert_ushort4_rtn(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4); +ushort4 __ovld __cnfn convert_ushort4(long4); +ushort4 __ovld __cnfn convert_ushort4_sat(long4); +ushort4 __ovld __cnfn convert_ushort4_rte(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4); +ushort4 __ovld __cnfn convert_ushort4(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rte(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4); +ushort4 __ovld __cnfn convert_ushort4_rtz(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4); +ushort4 __ovld __cnfn convert_ushort4_rtp(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4); +ushort4 __ovld __cnfn convert_ushort4_rtn(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4); +ushort4 __ovld __cnfn convert_ushort4(float4); +ushort4 __ovld __cnfn convert_ushort4_sat(float4); +int4 __ovld __cnfn convert_int4_rte(char4); +int4 __ovld __cnfn convert_int4_sat_rte(char4); +int4 __ovld __cnfn convert_int4_rtz(char4); +int4 __ovld __cnfn convert_int4_sat_rtz(char4); +int4 __ovld __cnfn convert_int4_rtp(char4); +int4 __ovld __cnfn convert_int4_sat_rtp(char4); +int4 __ovld __cnfn convert_int4_rtn(char4); +int4 __ovld __cnfn convert_int4_sat_rtn(char4); +int4 __ovld __cnfn convert_int4(char4); +int4 __ovld __cnfn convert_int4_sat(char4); +int4 __ovld __cnfn convert_int4_rte(uchar4); +int4 __ovld __cnfn convert_int4_sat_rte(uchar4); +int4 __ovld __cnfn convert_int4_rtz(uchar4); +int4 __ovld __cnfn convert_int4_sat_rtz(uchar4); +int4 __ovld __cnfn convert_int4_rtp(uchar4); +int4 __ovld __cnfn convert_int4_sat_rtp(uchar4); +int4 __ovld __cnfn convert_int4_rtn(uchar4); +int4 __ovld __cnfn convert_int4_sat_rtn(uchar4); +int4 __ovld __cnfn convert_int4(uchar4); +int4 __ovld __cnfn convert_int4_sat(uchar4); +int4 __ovld __cnfn convert_int4_rte(short4); +int4 __ovld __cnfn convert_int4_sat_rte(short4); +int4 __ovld __cnfn convert_int4_rtz(short4); +int4 __ovld __cnfn convert_int4_sat_rtz(short4); +int4 __ovld __cnfn convert_int4_rtp(short4); +int4 __ovld __cnfn convert_int4_sat_rtp(short4); +int4 __ovld __cnfn convert_int4_rtn(short4); +int4 __ovld __cnfn convert_int4_sat_rtn(short4); +int4 __ovld __cnfn convert_int4(short4); +int4 __ovld __cnfn convert_int4_sat(short4); +int4 __ovld __cnfn convert_int4_rte(ushort4); +int4 __ovld __cnfn convert_int4_sat_rte(ushort4); +int4 __ovld __cnfn convert_int4_rtz(ushort4); +int4 __ovld __cnfn convert_int4_sat_rtz(ushort4); +int4 __ovld __cnfn convert_int4_rtp(ushort4); +int4 __ovld __cnfn convert_int4_sat_rtp(ushort4); +int4 __ovld __cnfn convert_int4_rtn(ushort4); +int4 __ovld __cnfn convert_int4_sat_rtn(ushort4); +int4 __ovld __cnfn convert_int4(ushort4); +int4 __ovld __cnfn convert_int4_sat(ushort4); +int4 __ovld __cnfn convert_int4_rte(int4); +int4 __ovld __cnfn convert_int4_sat_rte(int4); +int4 __ovld __cnfn convert_int4_rtz(int4); +int4 __ovld __cnfn convert_int4_sat_rtz(int4); +int4 __ovld __cnfn convert_int4_rtp(int4); +int4 __ovld __cnfn convert_int4_sat_rtp(int4); +int4 __ovld __cnfn convert_int4_rtn(int4); +int4 __ovld __cnfn convert_int4_sat_rtn(int4); +int4 __ovld __cnfn convert_int4(int4); +int4 __ovld __cnfn convert_int4_sat(int4); +int4 __ovld __cnfn convert_int4_rte(uint4); +int4 __ovld __cnfn convert_int4_sat_rte(uint4); +int4 __ovld __cnfn convert_int4_rtz(uint4); +int4 __ovld __cnfn convert_int4_sat_rtz(uint4); +int4 __ovld __cnfn convert_int4_rtp(uint4); +int4 __ovld __cnfn convert_int4_sat_rtp(uint4); +int4 __ovld __cnfn convert_int4_rtn(uint4); +int4 __ovld __cnfn convert_int4_sat_rtn(uint4); +int4 __ovld __cnfn convert_int4(uint4); +int4 __ovld __cnfn convert_int4_sat(uint4); +int4 __ovld __cnfn convert_int4_rte(long4); +int4 __ovld __cnfn convert_int4_sat_rte(long4); +int4 __ovld __cnfn convert_int4_rtz(long4); +int4 __ovld __cnfn convert_int4_sat_rtz(long4); +int4 __ovld __cnfn convert_int4_rtp(long4); +int4 __ovld __cnfn convert_int4_sat_rtp(long4); +int4 __ovld __cnfn convert_int4_rtn(long4); +int4 __ovld __cnfn convert_int4_sat_rtn(long4); +int4 __ovld __cnfn convert_int4(long4); +int4 __ovld __cnfn convert_int4_sat(long4); +int4 __ovld __cnfn convert_int4_rte(ulong4); +int4 __ovld __cnfn convert_int4_sat_rte(ulong4); +int4 __ovld __cnfn convert_int4_rtz(ulong4); +int4 __ovld __cnfn convert_int4_sat_rtz(ulong4); +int4 __ovld __cnfn convert_int4_rtp(ulong4); +int4 __ovld __cnfn convert_int4_sat_rtp(ulong4); +int4 __ovld __cnfn convert_int4_rtn(ulong4); +int4 __ovld __cnfn convert_int4_sat_rtn(ulong4); +int4 __ovld __cnfn convert_int4(ulong4); +int4 __ovld __cnfn convert_int4_sat(ulong4); +int4 __ovld __cnfn convert_int4_rte(float4); +int4 __ovld __cnfn convert_int4_sat_rte(float4); +int4 __ovld __cnfn convert_int4_rtz(float4); +int4 __ovld __cnfn convert_int4_sat_rtz(float4); +int4 __ovld __cnfn convert_int4_rtp(float4); +int4 __ovld __cnfn convert_int4_sat_rtp(float4); +int4 __ovld __cnfn convert_int4_rtn(float4); +int4 __ovld __cnfn convert_int4_sat_rtn(float4); +int4 __ovld __cnfn convert_int4(float4); +int4 __ovld __cnfn convert_int4_sat(float4); +uint4 __ovld __cnfn convert_uint4_rte(char4); +uint4 __ovld __cnfn convert_uint4_sat_rte(char4); +uint4 __ovld __cnfn convert_uint4_rtz(char4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(char4); +uint4 __ovld __cnfn convert_uint4_rtp(char4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(char4); +uint4 __ovld __cnfn convert_uint4_rtn(char4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(char4); +uint4 __ovld __cnfn convert_uint4(char4); +uint4 __ovld __cnfn convert_uint4_sat(char4); +uint4 __ovld __cnfn convert_uint4_rte(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4); +uint4 __ovld __cnfn convert_uint4_rtz(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4); +uint4 __ovld __cnfn convert_uint4_rtp(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4); +uint4 __ovld __cnfn convert_uint4_rtn(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4); +uint4 __ovld __cnfn convert_uint4(uchar4); +uint4 __ovld __cnfn convert_uint4_sat(uchar4); +uint4 __ovld __cnfn convert_uint4_rte(short4); +uint4 __ovld __cnfn convert_uint4_sat_rte(short4); +uint4 __ovld __cnfn convert_uint4_rtz(short4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(short4); +uint4 __ovld __cnfn convert_uint4_rtp(short4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(short4); +uint4 __ovld __cnfn convert_uint4_rtn(short4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(short4); +uint4 __ovld __cnfn convert_uint4(short4); +uint4 __ovld __cnfn convert_uint4_sat(short4); +uint4 __ovld __cnfn convert_uint4_rte(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4); +uint4 __ovld __cnfn convert_uint4_rtz(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4); +uint4 __ovld __cnfn convert_uint4_rtp(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4); +uint4 __ovld __cnfn convert_uint4_rtn(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4); +uint4 __ovld __cnfn convert_uint4(ushort4); +uint4 __ovld __cnfn convert_uint4_sat(ushort4); +uint4 __ovld __cnfn convert_uint4_rte(int4); +uint4 __ovld __cnfn convert_uint4_sat_rte(int4); +uint4 __ovld __cnfn convert_uint4_rtz(int4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(int4); +uint4 __ovld __cnfn convert_uint4_rtp(int4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(int4); +uint4 __ovld __cnfn convert_uint4_rtn(int4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(int4); +uint4 __ovld __cnfn convert_uint4(int4); +uint4 __ovld __cnfn convert_uint4_sat(int4); +uint4 __ovld __cnfn convert_uint4_rte(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rte(uint4); +uint4 __ovld __cnfn convert_uint4_rtz(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4); +uint4 __ovld __cnfn convert_uint4_rtp(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4); +uint4 __ovld __cnfn convert_uint4_rtn(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4); +uint4 __ovld __cnfn convert_uint4(uint4); +uint4 __ovld __cnfn convert_uint4_sat(uint4); +uint4 __ovld __cnfn convert_uint4_rte(long4); +uint4 __ovld __cnfn convert_uint4_sat_rte(long4); +uint4 __ovld __cnfn convert_uint4_rtz(long4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(long4); +uint4 __ovld __cnfn convert_uint4_rtp(long4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(long4); +uint4 __ovld __cnfn convert_uint4_rtn(long4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(long4); +uint4 __ovld __cnfn convert_uint4(long4); +uint4 __ovld __cnfn convert_uint4_sat(long4); +uint4 __ovld __cnfn convert_uint4_rte(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4); +uint4 __ovld __cnfn convert_uint4_rtz(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4); +uint4 __ovld __cnfn convert_uint4_rtp(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4); +uint4 __ovld __cnfn convert_uint4_rtn(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4); +uint4 __ovld __cnfn convert_uint4(ulong4); +uint4 __ovld __cnfn convert_uint4_sat(ulong4); +uint4 __ovld __cnfn convert_uint4_rte(float4); +uint4 __ovld __cnfn convert_uint4_sat_rte(float4); +uint4 __ovld __cnfn convert_uint4_rtz(float4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(float4); +uint4 __ovld __cnfn convert_uint4_rtp(float4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(float4); +uint4 __ovld __cnfn convert_uint4_rtn(float4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(float4); +uint4 __ovld __cnfn convert_uint4(float4); +uint4 __ovld __cnfn convert_uint4_sat(float4); +long4 __ovld __cnfn convert_long4_rte(char4); +long4 __ovld __cnfn convert_long4_sat_rte(char4); +long4 __ovld __cnfn convert_long4_rtz(char4); +long4 __ovld __cnfn convert_long4_sat_rtz(char4); +long4 __ovld __cnfn convert_long4_rtp(char4); +long4 __ovld __cnfn convert_long4_sat_rtp(char4); +long4 __ovld __cnfn convert_long4_rtn(char4); +long4 __ovld __cnfn convert_long4_sat_rtn(char4); +long4 __ovld __cnfn convert_long4(char4); +long4 __ovld __cnfn convert_long4_sat(char4); +long4 __ovld __cnfn convert_long4_rte(uchar4); +long4 __ovld __cnfn convert_long4_sat_rte(uchar4); +long4 __ovld __cnfn convert_long4_rtz(uchar4); +long4 __ovld __cnfn convert_long4_sat_rtz(uchar4); +long4 __ovld __cnfn convert_long4_rtp(uchar4); +long4 __ovld __cnfn convert_long4_sat_rtp(uchar4); +long4 __ovld __cnfn convert_long4_rtn(uchar4); +long4 __ovld __cnfn convert_long4_sat_rtn(uchar4); +long4 __ovld __cnfn convert_long4(uchar4); +long4 __ovld __cnfn convert_long4_sat(uchar4); +long4 __ovld __cnfn convert_long4_rte(short4); +long4 __ovld __cnfn convert_long4_sat_rte(short4); +long4 __ovld __cnfn convert_long4_rtz(short4); +long4 __ovld __cnfn convert_long4_sat_rtz(short4); +long4 __ovld __cnfn convert_long4_rtp(short4); +long4 __ovld __cnfn convert_long4_sat_rtp(short4); +long4 __ovld __cnfn convert_long4_rtn(short4); +long4 __ovld __cnfn convert_long4_sat_rtn(short4); +long4 __ovld __cnfn convert_long4(short4); +long4 __ovld __cnfn convert_long4_sat(short4); +long4 __ovld __cnfn convert_long4_rte(ushort4); +long4 __ovld __cnfn convert_long4_sat_rte(ushort4); +long4 __ovld __cnfn convert_long4_rtz(ushort4); +long4 __ovld __cnfn convert_long4_sat_rtz(ushort4); +long4 __ovld __cnfn convert_long4_rtp(ushort4); +long4 __ovld __cnfn convert_long4_sat_rtp(ushort4); +long4 __ovld __cnfn convert_long4_rtn(ushort4); +long4 __ovld __cnfn convert_long4_sat_rtn(ushort4); +long4 __ovld __cnfn convert_long4(ushort4); +long4 __ovld __cnfn convert_long4_sat(ushort4); +long4 __ovld __cnfn convert_long4_rte(int4); +long4 __ovld __cnfn convert_long4_sat_rte(int4); +long4 __ovld __cnfn convert_long4_rtz(int4); +long4 __ovld __cnfn convert_long4_sat_rtz(int4); +long4 __ovld __cnfn convert_long4_rtp(int4); +long4 __ovld __cnfn convert_long4_sat_rtp(int4); +long4 __ovld __cnfn convert_long4_rtn(int4); +long4 __ovld __cnfn convert_long4_sat_rtn(int4); +long4 __ovld __cnfn convert_long4(int4); +long4 __ovld __cnfn convert_long4_sat(int4); +long4 __ovld __cnfn convert_long4_rte(uint4); +long4 __ovld __cnfn convert_long4_sat_rte(uint4); +long4 __ovld __cnfn convert_long4_rtz(uint4); +long4 __ovld __cnfn convert_long4_sat_rtz(uint4); +long4 __ovld __cnfn convert_long4_rtp(uint4); +long4 __ovld __cnfn convert_long4_sat_rtp(uint4); +long4 __ovld __cnfn convert_long4_rtn(uint4); +long4 __ovld __cnfn convert_long4_sat_rtn(uint4); +long4 __ovld __cnfn convert_long4(uint4); +long4 __ovld __cnfn convert_long4_sat(uint4); +long4 __ovld __cnfn convert_long4_rte(long4); +long4 __ovld __cnfn convert_long4_sat_rte(long4); +long4 __ovld __cnfn convert_long4_rtz(long4); +long4 __ovld __cnfn convert_long4_sat_rtz(long4); +long4 __ovld __cnfn convert_long4_rtp(long4); +long4 __ovld __cnfn convert_long4_sat_rtp(long4); +long4 __ovld __cnfn convert_long4_rtn(long4); +long4 __ovld __cnfn convert_long4_sat_rtn(long4); +long4 __ovld __cnfn convert_long4(long4); +long4 __ovld __cnfn convert_long4_sat(long4); +long4 __ovld __cnfn convert_long4_rte(ulong4); +long4 __ovld __cnfn convert_long4_sat_rte(ulong4); +long4 __ovld __cnfn convert_long4_rtz(ulong4); +long4 __ovld __cnfn convert_long4_sat_rtz(ulong4); +long4 __ovld __cnfn convert_long4_rtp(ulong4); +long4 __ovld __cnfn convert_long4_sat_rtp(ulong4); +long4 __ovld __cnfn convert_long4_rtn(ulong4); +long4 __ovld __cnfn convert_long4_sat_rtn(ulong4); +long4 __ovld __cnfn convert_long4(ulong4); +long4 __ovld __cnfn convert_long4_sat(ulong4); +long4 __ovld __cnfn convert_long4_rte(float4); +long4 __ovld __cnfn convert_long4_sat_rte(float4); +long4 __ovld __cnfn convert_long4_rtz(float4); +long4 __ovld __cnfn convert_long4_sat_rtz(float4); +long4 __ovld __cnfn convert_long4_rtp(float4); +long4 __ovld __cnfn convert_long4_sat_rtp(float4); +long4 __ovld __cnfn convert_long4_rtn(float4); +long4 __ovld __cnfn convert_long4_sat_rtn(float4); +long4 __ovld __cnfn convert_long4(float4); +long4 __ovld __cnfn convert_long4_sat(float4); +ulong4 __ovld __cnfn convert_ulong4_rte(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4); +ulong4 __ovld __cnfn convert_ulong4_rtz(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4); +ulong4 __ovld __cnfn convert_ulong4_rtp(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4); +ulong4 __ovld __cnfn convert_ulong4_rtn(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4); +ulong4 __ovld __cnfn convert_ulong4(char4); +ulong4 __ovld __cnfn convert_ulong4_sat(char4); +ulong4 __ovld __cnfn convert_ulong4_rte(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4); +ulong4 __ovld __cnfn convert_ulong4(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rte(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4); +ulong4 __ovld __cnfn convert_ulong4_rtz(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4); +ulong4 __ovld __cnfn convert_ulong4_rtp(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4); +ulong4 __ovld __cnfn convert_ulong4_rtn(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4); +ulong4 __ovld __cnfn convert_ulong4(short4); +ulong4 __ovld __cnfn convert_ulong4_sat(short4); +ulong4 __ovld __cnfn convert_ulong4_rte(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4); +ulong4 __ovld __cnfn convert_ulong4(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rte(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4); +ulong4 __ovld __cnfn convert_ulong4_rtz(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4); +ulong4 __ovld __cnfn convert_ulong4_rtp(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4); +ulong4 __ovld __cnfn convert_ulong4_rtn(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4); +ulong4 __ovld __cnfn convert_ulong4(int4); +ulong4 __ovld __cnfn convert_ulong4_sat(int4); +ulong4 __ovld __cnfn convert_ulong4_rte(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4); +ulong4 __ovld __cnfn convert_ulong4_rtz(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4); +ulong4 __ovld __cnfn convert_ulong4_rtp(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4); +ulong4 __ovld __cnfn convert_ulong4_rtn(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4); +ulong4 __ovld __cnfn convert_ulong4(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat(uint4); +ulong4 __ovld __cnfn convert_ulong4_rte(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4); +ulong4 __ovld __cnfn convert_ulong4_rtz(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4); +ulong4 __ovld __cnfn convert_ulong4_rtp(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4); +ulong4 __ovld __cnfn convert_ulong4_rtn(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4); +ulong4 __ovld __cnfn convert_ulong4(long4); +ulong4 __ovld __cnfn convert_ulong4_sat(long4); +ulong4 __ovld __cnfn convert_ulong4_rte(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4); +ulong4 __ovld __cnfn convert_ulong4(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rte(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4); +ulong4 __ovld __cnfn convert_ulong4_rtz(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4); +ulong4 __ovld __cnfn convert_ulong4_rtp(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4); +ulong4 __ovld __cnfn convert_ulong4_rtn(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4); +ulong4 __ovld __cnfn convert_ulong4(float4); +ulong4 __ovld __cnfn convert_ulong4_sat(float4); +float4 __ovld __cnfn convert_float4_rte(char4); +float4 __ovld __cnfn convert_float4_rtz(char4); +float4 __ovld __cnfn convert_float4_rtp(char4); +float4 __ovld __cnfn convert_float4_rtn(char4); +float4 __ovld __cnfn convert_float4(char4); +float4 __ovld __cnfn convert_float4_rte(uchar4); +float4 __ovld __cnfn convert_float4_rtz(uchar4); +float4 __ovld __cnfn convert_float4_rtp(uchar4); +float4 __ovld __cnfn convert_float4_rtn(uchar4); +float4 __ovld __cnfn convert_float4(uchar4); +float4 __ovld __cnfn convert_float4_rte(short4); +float4 __ovld __cnfn convert_float4_rtz(short4); +float4 __ovld __cnfn convert_float4_rtp(short4); +float4 __ovld __cnfn convert_float4_rtn(short4); +float4 __ovld __cnfn convert_float4(short4); +float4 __ovld __cnfn convert_float4_rte(ushort4); +float4 __ovld __cnfn convert_float4_rtz(ushort4); +float4 __ovld __cnfn convert_float4_rtp(ushort4); +float4 __ovld __cnfn convert_float4_rtn(ushort4); +float4 __ovld __cnfn convert_float4(ushort4); +float4 __ovld __cnfn convert_float4_rte(int4); +float4 __ovld __cnfn convert_float4_rtz(int4); +float4 __ovld __cnfn convert_float4_rtp(int4); +float4 __ovld __cnfn convert_float4_rtn(int4); +float4 __ovld __cnfn convert_float4(int4); +float4 __ovld __cnfn convert_float4_rte(uint4); +float4 __ovld __cnfn convert_float4_rtz(uint4); +float4 __ovld __cnfn convert_float4_rtp(uint4); +float4 __ovld __cnfn convert_float4_rtn(uint4); +float4 __ovld __cnfn convert_float4(uint4); +float4 __ovld __cnfn convert_float4_rte(long4); +float4 __ovld __cnfn convert_float4_rtz(long4); +float4 __ovld __cnfn convert_float4_rtp(long4); +float4 __ovld __cnfn convert_float4_rtn(long4); +float4 __ovld __cnfn convert_float4(long4); +float4 __ovld __cnfn convert_float4_rte(ulong4); +float4 __ovld __cnfn convert_float4_rtz(ulong4); +float4 __ovld __cnfn convert_float4_rtp(ulong4); +float4 __ovld __cnfn convert_float4_rtn(ulong4); +float4 __ovld __cnfn convert_float4(ulong4); +float4 __ovld __cnfn convert_float4_rte(float4); +float4 __ovld __cnfn convert_float4_rtz(float4); +float4 __ovld __cnfn convert_float4_rtp(float4); +float4 __ovld __cnfn convert_float4_rtn(float4); +float4 __ovld __cnfn convert_float4(float4); +char8 __ovld __cnfn convert_char8_rte(char8); +char8 __ovld __cnfn convert_char8_sat_rte(char8); +char8 __ovld __cnfn convert_char8_rtz(char8); +char8 __ovld __cnfn convert_char8_sat_rtz(char8); +char8 __ovld __cnfn convert_char8_rtp(char8); +char8 __ovld __cnfn convert_char8_sat_rtp(char8); +char8 __ovld __cnfn convert_char8_rtn(char8); +char8 __ovld __cnfn convert_char8_sat_rtn(char8); +char8 __ovld __cnfn convert_char8(char8); +char8 __ovld __cnfn convert_char8_sat(char8); +char8 __ovld __cnfn convert_char8_rte(uchar8); +char8 __ovld __cnfn convert_char8_sat_rte(uchar8); +char8 __ovld __cnfn convert_char8_rtz(uchar8); +char8 __ovld __cnfn convert_char8_sat_rtz(uchar8); +char8 __ovld __cnfn convert_char8_rtp(uchar8); +char8 __ovld __cnfn convert_char8_sat_rtp(uchar8); +char8 __ovld __cnfn convert_char8_rtn(uchar8); +char8 __ovld __cnfn convert_char8_sat_rtn(uchar8); +char8 __ovld __cnfn convert_char8(uchar8); +char8 __ovld __cnfn convert_char8_sat(uchar8); +char8 __ovld __cnfn convert_char8_rte(short8); +char8 __ovld __cnfn convert_char8_sat_rte(short8); +char8 __ovld __cnfn convert_char8_rtz(short8); +char8 __ovld __cnfn convert_char8_sat_rtz(short8); +char8 __ovld __cnfn convert_char8_rtp(short8); +char8 __ovld __cnfn convert_char8_sat_rtp(short8); +char8 __ovld __cnfn convert_char8_rtn(short8); +char8 __ovld __cnfn convert_char8_sat_rtn(short8); +char8 __ovld __cnfn convert_char8(short8); +char8 __ovld __cnfn convert_char8_sat(short8); +char8 __ovld __cnfn convert_char8_rte(ushort8); +char8 __ovld __cnfn convert_char8_sat_rte(ushort8); +char8 __ovld __cnfn convert_char8_rtz(ushort8); +char8 __ovld __cnfn convert_char8_sat_rtz(ushort8); +char8 __ovld __cnfn convert_char8_rtp(ushort8); +char8 __ovld __cnfn convert_char8_sat_rtp(ushort8); +char8 __ovld __cnfn convert_char8_rtn(ushort8); +char8 __ovld __cnfn convert_char8_sat_rtn(ushort8); +char8 __ovld __cnfn convert_char8(ushort8); +char8 __ovld __cnfn convert_char8_sat(ushort8); +char8 __ovld __cnfn convert_char8_rte(int8); +char8 __ovld __cnfn convert_char8_sat_rte(int8); +char8 __ovld __cnfn convert_char8_rtz(int8); +char8 __ovld __cnfn convert_char8_sat_rtz(int8); +char8 __ovld __cnfn convert_char8_rtp(int8); +char8 __ovld __cnfn convert_char8_sat_rtp(int8); +char8 __ovld __cnfn convert_char8_rtn(int8); +char8 __ovld __cnfn convert_char8_sat_rtn(int8); +char8 __ovld __cnfn convert_char8(int8); +char8 __ovld __cnfn convert_char8_sat(int8); +char8 __ovld __cnfn convert_char8_rte(uint8); +char8 __ovld __cnfn convert_char8_sat_rte(uint8); +char8 __ovld __cnfn convert_char8_rtz(uint8); +char8 __ovld __cnfn convert_char8_sat_rtz(uint8); +char8 __ovld __cnfn convert_char8_rtp(uint8); +char8 __ovld __cnfn convert_char8_sat_rtp(uint8); +char8 __ovld __cnfn convert_char8_rtn(uint8); +char8 __ovld __cnfn convert_char8_sat_rtn(uint8); +char8 __ovld __cnfn convert_char8(uint8); +char8 __ovld __cnfn convert_char8_sat(uint8); +char8 __ovld __cnfn convert_char8_rte(long8); +char8 __ovld __cnfn convert_char8_sat_rte(long8); +char8 __ovld __cnfn convert_char8_rtz(long8); +char8 __ovld __cnfn convert_char8_sat_rtz(long8); +char8 __ovld __cnfn convert_char8_rtp(long8); +char8 __ovld __cnfn convert_char8_sat_rtp(long8); +char8 __ovld __cnfn convert_char8_rtn(long8); +char8 __ovld __cnfn convert_char8_sat_rtn(long8); +char8 __ovld __cnfn convert_char8(long8); +char8 __ovld __cnfn convert_char8_sat(long8); +char8 __ovld __cnfn convert_char8_rte(ulong8); +char8 __ovld __cnfn convert_char8_sat_rte(ulong8); +char8 __ovld __cnfn convert_char8_rtz(ulong8); +char8 __ovld __cnfn convert_char8_sat_rtz(ulong8); +char8 __ovld __cnfn convert_char8_rtp(ulong8); +char8 __ovld __cnfn convert_char8_sat_rtp(ulong8); +char8 __ovld __cnfn convert_char8_rtn(ulong8); +char8 __ovld __cnfn convert_char8_sat_rtn(ulong8); +char8 __ovld __cnfn convert_char8(ulong8); +char8 __ovld __cnfn convert_char8_sat(ulong8); +char8 __ovld __cnfn convert_char8_rte(float8); +char8 __ovld __cnfn convert_char8_sat_rte(float8); +char8 __ovld __cnfn convert_char8_rtz(float8); +char8 __ovld __cnfn convert_char8_sat_rtz(float8); +char8 __ovld __cnfn convert_char8_rtp(float8); +char8 __ovld __cnfn convert_char8_sat_rtp(float8); +char8 __ovld __cnfn convert_char8_rtn(float8); +char8 __ovld __cnfn convert_char8_sat_rtn(float8); +char8 __ovld __cnfn convert_char8(float8); +char8 __ovld __cnfn convert_char8_sat(float8); +uchar8 __ovld __cnfn convert_uchar8_rte(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8); +uchar8 __ovld __cnfn convert_uchar8_rtz(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8); +uchar8 __ovld __cnfn convert_uchar8_rtp(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8); +uchar8 __ovld __cnfn convert_uchar8_rtn(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8); +uchar8 __ovld __cnfn convert_uchar8(char8); +uchar8 __ovld __cnfn convert_uchar8_sat(char8); +uchar8 __ovld __cnfn convert_uchar8_rte(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8); +uchar8 __ovld __cnfn convert_uchar8(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rte(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8); +uchar8 __ovld __cnfn convert_uchar8_rtz(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8); +uchar8 __ovld __cnfn convert_uchar8_rtp(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8); +uchar8 __ovld __cnfn convert_uchar8_rtn(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8); +uchar8 __ovld __cnfn convert_uchar8(short8); +uchar8 __ovld __cnfn convert_uchar8_sat(short8); +uchar8 __ovld __cnfn convert_uchar8_rte(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8); +uchar8 __ovld __cnfn convert_uchar8(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rte(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8); +uchar8 __ovld __cnfn convert_uchar8_rtz(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8); +uchar8 __ovld __cnfn convert_uchar8_rtp(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8); +uchar8 __ovld __cnfn convert_uchar8_rtn(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8); +uchar8 __ovld __cnfn convert_uchar8(int8); +uchar8 __ovld __cnfn convert_uchar8_sat(int8); +uchar8 __ovld __cnfn convert_uchar8_rte(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8); +uchar8 __ovld __cnfn convert_uchar8_rtz(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8); +uchar8 __ovld __cnfn convert_uchar8_rtp(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8); +uchar8 __ovld __cnfn convert_uchar8_rtn(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8); +uchar8 __ovld __cnfn convert_uchar8(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat(uint8); +uchar8 __ovld __cnfn convert_uchar8_rte(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8); +uchar8 __ovld __cnfn convert_uchar8_rtz(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8); +uchar8 __ovld __cnfn convert_uchar8_rtp(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8); +uchar8 __ovld __cnfn convert_uchar8_rtn(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8); +uchar8 __ovld __cnfn convert_uchar8(long8); +uchar8 __ovld __cnfn convert_uchar8_sat(long8); +uchar8 __ovld __cnfn convert_uchar8_rte(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8); +uchar8 __ovld __cnfn convert_uchar8(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rte(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8); +uchar8 __ovld __cnfn convert_uchar8_rtz(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8); +uchar8 __ovld __cnfn convert_uchar8_rtp(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8); +uchar8 __ovld __cnfn convert_uchar8_rtn(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8); +uchar8 __ovld __cnfn convert_uchar8(float8); +uchar8 __ovld __cnfn convert_uchar8_sat(float8); +short8 __ovld __cnfn convert_short8_rte(char8); +short8 __ovld __cnfn convert_short8_sat_rte(char8); +short8 __ovld __cnfn convert_short8_rtz(char8); +short8 __ovld __cnfn convert_short8_sat_rtz(char8); +short8 __ovld __cnfn convert_short8_rtp(char8); +short8 __ovld __cnfn convert_short8_sat_rtp(char8); +short8 __ovld __cnfn convert_short8_rtn(char8); +short8 __ovld __cnfn convert_short8_sat_rtn(char8); +short8 __ovld __cnfn convert_short8(char8); +short8 __ovld __cnfn convert_short8_sat(char8); +short8 __ovld __cnfn convert_short8_rte(uchar8); +short8 __ovld __cnfn convert_short8_sat_rte(uchar8); +short8 __ovld __cnfn convert_short8_rtz(uchar8); +short8 __ovld __cnfn convert_short8_sat_rtz(uchar8); +short8 __ovld __cnfn convert_short8_rtp(uchar8); +short8 __ovld __cnfn convert_short8_sat_rtp(uchar8); +short8 __ovld __cnfn convert_short8_rtn(uchar8); +short8 __ovld __cnfn convert_short8_sat_rtn(uchar8); +short8 __ovld __cnfn convert_short8(uchar8); +short8 __ovld __cnfn convert_short8_sat(uchar8); +short8 __ovld __cnfn convert_short8_rte(short8); +short8 __ovld __cnfn convert_short8_sat_rte(short8); +short8 __ovld __cnfn convert_short8_rtz(short8); +short8 __ovld __cnfn convert_short8_sat_rtz(short8); +short8 __ovld __cnfn convert_short8_rtp(short8); +short8 __ovld __cnfn convert_short8_sat_rtp(short8); +short8 __ovld __cnfn convert_short8_rtn(short8); +short8 __ovld __cnfn convert_short8_sat_rtn(short8); +short8 __ovld __cnfn convert_short8(short8); +short8 __ovld __cnfn convert_short8_sat(short8); +short8 __ovld __cnfn convert_short8_rte(ushort8); +short8 __ovld __cnfn convert_short8_sat_rte(ushort8); +short8 __ovld __cnfn convert_short8_rtz(ushort8); +short8 __ovld __cnfn convert_short8_sat_rtz(ushort8); +short8 __ovld __cnfn convert_short8_rtp(ushort8); +short8 __ovld __cnfn convert_short8_sat_rtp(ushort8); +short8 __ovld __cnfn convert_short8_rtn(ushort8); +short8 __ovld __cnfn convert_short8_sat_rtn(ushort8); +short8 __ovld __cnfn convert_short8(ushort8); +short8 __ovld __cnfn convert_short8_sat(ushort8); +short8 __ovld __cnfn convert_short8_rte(int8); +short8 __ovld __cnfn convert_short8_sat_rte(int8); +short8 __ovld __cnfn convert_short8_rtz(int8); +short8 __ovld __cnfn convert_short8_sat_rtz(int8); +short8 __ovld __cnfn convert_short8_rtp(int8); +short8 __ovld __cnfn convert_short8_sat_rtp(int8); +short8 __ovld __cnfn convert_short8_rtn(int8); +short8 __ovld __cnfn convert_short8_sat_rtn(int8); +short8 __ovld __cnfn convert_short8(int8); +short8 __ovld __cnfn convert_short8_sat(int8); +short8 __ovld __cnfn convert_short8_rte(uint8); +short8 __ovld __cnfn convert_short8_sat_rte(uint8); +short8 __ovld __cnfn convert_short8_rtz(uint8); +short8 __ovld __cnfn convert_short8_sat_rtz(uint8); +short8 __ovld __cnfn convert_short8_rtp(uint8); +short8 __ovld __cnfn convert_short8_sat_rtp(uint8); +short8 __ovld __cnfn convert_short8_rtn(uint8); +short8 __ovld __cnfn convert_short8_sat_rtn(uint8); +short8 __ovld __cnfn convert_short8(uint8); +short8 __ovld __cnfn convert_short8_sat(uint8); +short8 __ovld __cnfn convert_short8_rte(long8); +short8 __ovld __cnfn convert_short8_sat_rte(long8); +short8 __ovld __cnfn convert_short8_rtz(long8); +short8 __ovld __cnfn convert_short8_sat_rtz(long8); +short8 __ovld __cnfn convert_short8_rtp(long8); +short8 __ovld __cnfn convert_short8_sat_rtp(long8); +short8 __ovld __cnfn convert_short8_rtn(long8); +short8 __ovld __cnfn convert_short8_sat_rtn(long8); +short8 __ovld __cnfn convert_short8(long8); +short8 __ovld __cnfn convert_short8_sat(long8); +short8 __ovld __cnfn convert_short8_rte(ulong8); +short8 __ovld __cnfn convert_short8_sat_rte(ulong8); +short8 __ovld __cnfn convert_short8_rtz(ulong8); +short8 __ovld __cnfn convert_short8_sat_rtz(ulong8); +short8 __ovld __cnfn convert_short8_rtp(ulong8); +short8 __ovld __cnfn convert_short8_sat_rtp(ulong8); +short8 __ovld __cnfn convert_short8_rtn(ulong8); +short8 __ovld __cnfn convert_short8_sat_rtn(ulong8); +short8 __ovld __cnfn convert_short8(ulong8); +short8 __ovld __cnfn convert_short8_sat(ulong8); +short8 __ovld __cnfn convert_short8_rte(float8); +short8 __ovld __cnfn convert_short8_sat_rte(float8); +short8 __ovld __cnfn convert_short8_rtz(float8); +short8 __ovld __cnfn convert_short8_sat_rtz(float8); +short8 __ovld __cnfn convert_short8_rtp(float8); +short8 __ovld __cnfn convert_short8_sat_rtp(float8); +short8 __ovld __cnfn convert_short8_rtn(float8); +short8 __ovld __cnfn convert_short8_sat_rtn(float8); +short8 __ovld __cnfn convert_short8(float8); +short8 __ovld __cnfn convert_short8_sat(float8); +ushort8 __ovld __cnfn convert_ushort8_rte(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8); +ushort8 __ovld __cnfn convert_ushort8_rtz(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8); +ushort8 __ovld __cnfn convert_ushort8_rtp(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8); +ushort8 __ovld __cnfn convert_ushort8_rtn(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8); +ushort8 __ovld __cnfn convert_ushort8(char8); +ushort8 __ovld __cnfn convert_ushort8_sat(char8); +ushort8 __ovld __cnfn convert_ushort8_rte(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8); +ushort8 __ovld __cnfn convert_ushort8(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rte(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8); +ushort8 __ovld __cnfn convert_ushort8_rtz(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8); +ushort8 __ovld __cnfn convert_ushort8_rtp(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8); +ushort8 __ovld __cnfn convert_ushort8_rtn(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8); +ushort8 __ovld __cnfn convert_ushort8(short8); +ushort8 __ovld __cnfn convert_ushort8_sat(short8); +ushort8 __ovld __cnfn convert_ushort8_rte(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8); +ushort8 __ovld __cnfn convert_ushort8(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rte(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8); +ushort8 __ovld __cnfn convert_ushort8_rtz(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8); +ushort8 __ovld __cnfn convert_ushort8_rtp(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8); +ushort8 __ovld __cnfn convert_ushort8_rtn(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8); +ushort8 __ovld __cnfn convert_ushort8(int8); +ushort8 __ovld __cnfn convert_ushort8_sat(int8); +ushort8 __ovld __cnfn convert_ushort8_rte(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8); +ushort8 __ovld __cnfn convert_ushort8_rtz(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8); +ushort8 __ovld __cnfn convert_ushort8_rtp(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8); +ushort8 __ovld __cnfn convert_ushort8_rtn(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8); +ushort8 __ovld __cnfn convert_ushort8(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat(uint8); +ushort8 __ovld __cnfn convert_ushort8_rte(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8); +ushort8 __ovld __cnfn convert_ushort8_rtz(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8); +ushort8 __ovld __cnfn convert_ushort8_rtp(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8); +ushort8 __ovld __cnfn convert_ushort8_rtn(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8); +ushort8 __ovld __cnfn convert_ushort8(long8); +ushort8 __ovld __cnfn convert_ushort8_sat(long8); +ushort8 __ovld __cnfn convert_ushort8_rte(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8); +ushort8 __ovld __cnfn convert_ushort8(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rte(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8); +ushort8 __ovld __cnfn convert_ushort8_rtz(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8); +ushort8 __ovld __cnfn convert_ushort8_rtp(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8); +ushort8 __ovld __cnfn convert_ushort8_rtn(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8); +ushort8 __ovld __cnfn convert_ushort8(float8); +ushort8 __ovld __cnfn convert_ushort8_sat(float8); +int8 __ovld __cnfn convert_int8_rte(char8); +int8 __ovld __cnfn convert_int8_sat_rte(char8); +int8 __ovld __cnfn convert_int8_rtz(char8); +int8 __ovld __cnfn convert_int8_sat_rtz(char8); +int8 __ovld __cnfn convert_int8_rtp(char8); +int8 __ovld __cnfn convert_int8_sat_rtp(char8); +int8 __ovld __cnfn convert_int8_rtn(char8); +int8 __ovld __cnfn convert_int8_sat_rtn(char8); +int8 __ovld __cnfn convert_int8(char8); +int8 __ovld __cnfn convert_int8_sat(char8); +int8 __ovld __cnfn convert_int8_rte(uchar8); +int8 __ovld __cnfn convert_int8_sat_rte(uchar8); +int8 __ovld __cnfn convert_int8_rtz(uchar8); +int8 __ovld __cnfn convert_int8_sat_rtz(uchar8); +int8 __ovld __cnfn convert_int8_rtp(uchar8); +int8 __ovld __cnfn convert_int8_sat_rtp(uchar8); +int8 __ovld __cnfn convert_int8_rtn(uchar8); +int8 __ovld __cnfn convert_int8_sat_rtn(uchar8); +int8 __ovld __cnfn convert_int8(uchar8); +int8 __ovld __cnfn convert_int8_sat(uchar8); +int8 __ovld __cnfn convert_int8_rte(short8); +int8 __ovld __cnfn convert_int8_sat_rte(short8); +int8 __ovld __cnfn convert_int8_rtz(short8); +int8 __ovld __cnfn convert_int8_sat_rtz(short8); +int8 __ovld __cnfn convert_int8_rtp(short8); +int8 __ovld __cnfn convert_int8_sat_rtp(short8); +int8 __ovld __cnfn convert_int8_rtn(short8); +int8 __ovld __cnfn convert_int8_sat_rtn(short8); +int8 __ovld __cnfn convert_int8(short8); +int8 __ovld __cnfn convert_int8_sat(short8); +int8 __ovld __cnfn convert_int8_rte(ushort8); +int8 __ovld __cnfn convert_int8_sat_rte(ushort8); +int8 __ovld __cnfn convert_int8_rtz(ushort8); +int8 __ovld __cnfn convert_int8_sat_rtz(ushort8); +int8 __ovld __cnfn convert_int8_rtp(ushort8); +int8 __ovld __cnfn convert_int8_sat_rtp(ushort8); +int8 __ovld __cnfn convert_int8_rtn(ushort8); +int8 __ovld __cnfn convert_int8_sat_rtn(ushort8); +int8 __ovld __cnfn convert_int8(ushort8); +int8 __ovld __cnfn convert_int8_sat(ushort8); +int8 __ovld __cnfn convert_int8_rte(int8); +int8 __ovld __cnfn convert_int8_sat_rte(int8); +int8 __ovld __cnfn convert_int8_rtz(int8); +int8 __ovld __cnfn convert_int8_sat_rtz(int8); +int8 __ovld __cnfn convert_int8_rtp(int8); +int8 __ovld __cnfn convert_int8_sat_rtp(int8); +int8 __ovld __cnfn convert_int8_rtn(int8); +int8 __ovld __cnfn convert_int8_sat_rtn(int8); +int8 __ovld __cnfn convert_int8(int8); +int8 __ovld __cnfn convert_int8_sat(int8); +int8 __ovld __cnfn convert_int8_rte(uint8); +int8 __ovld __cnfn convert_int8_sat_rte(uint8); +int8 __ovld __cnfn convert_int8_rtz(uint8); +int8 __ovld __cnfn convert_int8_sat_rtz(uint8); +int8 __ovld __cnfn convert_int8_rtp(uint8); +int8 __ovld __cnfn convert_int8_sat_rtp(uint8); +int8 __ovld __cnfn convert_int8_rtn(uint8); +int8 __ovld __cnfn convert_int8_sat_rtn(uint8); +int8 __ovld __cnfn convert_int8(uint8); +int8 __ovld __cnfn convert_int8_sat(uint8); +int8 __ovld __cnfn convert_int8_rte(long8); +int8 __ovld __cnfn convert_int8_sat_rte(long8); +int8 __ovld __cnfn convert_int8_rtz(long8); +int8 __ovld __cnfn convert_int8_sat_rtz(long8); +int8 __ovld __cnfn convert_int8_rtp(long8); +int8 __ovld __cnfn convert_int8_sat_rtp(long8); +int8 __ovld __cnfn convert_int8_rtn(long8); +int8 __ovld __cnfn convert_int8_sat_rtn(long8); +int8 __ovld __cnfn convert_int8(long8); +int8 __ovld __cnfn convert_int8_sat(long8); +int8 __ovld __cnfn convert_int8_rte(ulong8); +int8 __ovld __cnfn convert_int8_sat_rte(ulong8); +int8 __ovld __cnfn convert_int8_rtz(ulong8); +int8 __ovld __cnfn convert_int8_sat_rtz(ulong8); +int8 __ovld __cnfn convert_int8_rtp(ulong8); +int8 __ovld __cnfn convert_int8_sat_rtp(ulong8); +int8 __ovld __cnfn convert_int8_rtn(ulong8); +int8 __ovld __cnfn convert_int8_sat_rtn(ulong8); +int8 __ovld __cnfn convert_int8(ulong8); +int8 __ovld __cnfn convert_int8_sat(ulong8); +int8 __ovld __cnfn convert_int8_rte(float8); +int8 __ovld __cnfn convert_int8_sat_rte(float8); +int8 __ovld __cnfn convert_int8_rtz(float8); +int8 __ovld __cnfn convert_int8_sat_rtz(float8); +int8 __ovld __cnfn convert_int8_rtp(float8); +int8 __ovld __cnfn convert_int8_sat_rtp(float8); +int8 __ovld __cnfn convert_int8_rtn(float8); +int8 __ovld __cnfn convert_int8_sat_rtn(float8); +int8 __ovld __cnfn convert_int8(float8); +int8 __ovld __cnfn convert_int8_sat(float8); +uint8 __ovld __cnfn convert_uint8_rte(char8); +uint8 __ovld __cnfn convert_uint8_sat_rte(char8); +uint8 __ovld __cnfn convert_uint8_rtz(char8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(char8); +uint8 __ovld __cnfn convert_uint8_rtp(char8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(char8); +uint8 __ovld __cnfn convert_uint8_rtn(char8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(char8); +uint8 __ovld __cnfn convert_uint8(char8); +uint8 __ovld __cnfn convert_uint8_sat(char8); +uint8 __ovld __cnfn convert_uint8_rte(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8); +uint8 __ovld __cnfn convert_uint8_rtz(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8); +uint8 __ovld __cnfn convert_uint8_rtp(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8); +uint8 __ovld __cnfn convert_uint8_rtn(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8); +uint8 __ovld __cnfn convert_uint8(uchar8); +uint8 __ovld __cnfn convert_uint8_sat(uchar8); +uint8 __ovld __cnfn convert_uint8_rte(short8); +uint8 __ovld __cnfn convert_uint8_sat_rte(short8); +uint8 __ovld __cnfn convert_uint8_rtz(short8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(short8); +uint8 __ovld __cnfn convert_uint8_rtp(short8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(short8); +uint8 __ovld __cnfn convert_uint8_rtn(short8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(short8); +uint8 __ovld __cnfn convert_uint8(short8); +uint8 __ovld __cnfn convert_uint8_sat(short8); +uint8 __ovld __cnfn convert_uint8_rte(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8); +uint8 __ovld __cnfn convert_uint8_rtz(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8); +uint8 __ovld __cnfn convert_uint8_rtp(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8); +uint8 __ovld __cnfn convert_uint8_rtn(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8); +uint8 __ovld __cnfn convert_uint8(ushort8); +uint8 __ovld __cnfn convert_uint8_sat(ushort8); +uint8 __ovld __cnfn convert_uint8_rte(int8); +uint8 __ovld __cnfn convert_uint8_sat_rte(int8); +uint8 __ovld __cnfn convert_uint8_rtz(int8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(int8); +uint8 __ovld __cnfn convert_uint8_rtp(int8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(int8); +uint8 __ovld __cnfn convert_uint8_rtn(int8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(int8); +uint8 __ovld __cnfn convert_uint8(int8); +uint8 __ovld __cnfn convert_uint8_sat(int8); +uint8 __ovld __cnfn convert_uint8_rte(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rte(uint8); +uint8 __ovld __cnfn convert_uint8_rtz(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8); +uint8 __ovld __cnfn convert_uint8_rtp(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8); +uint8 __ovld __cnfn convert_uint8_rtn(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8); +uint8 __ovld __cnfn convert_uint8(uint8); +uint8 __ovld __cnfn convert_uint8_sat(uint8); +uint8 __ovld __cnfn convert_uint8_rte(long8); +uint8 __ovld __cnfn convert_uint8_sat_rte(long8); +uint8 __ovld __cnfn convert_uint8_rtz(long8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(long8); +uint8 __ovld __cnfn convert_uint8_rtp(long8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(long8); +uint8 __ovld __cnfn convert_uint8_rtn(long8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(long8); +uint8 __ovld __cnfn convert_uint8(long8); +uint8 __ovld __cnfn convert_uint8_sat(long8); +uint8 __ovld __cnfn convert_uint8_rte(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8); +uint8 __ovld __cnfn convert_uint8_rtz(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8); +uint8 __ovld __cnfn convert_uint8_rtp(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8); +uint8 __ovld __cnfn convert_uint8_rtn(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8); +uint8 __ovld __cnfn convert_uint8(ulong8); +uint8 __ovld __cnfn convert_uint8_sat(ulong8); +uint8 __ovld __cnfn convert_uint8_rte(float8); +uint8 __ovld __cnfn convert_uint8_sat_rte(float8); +uint8 __ovld __cnfn convert_uint8_rtz(float8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(float8); +uint8 __ovld __cnfn convert_uint8_rtp(float8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(float8); +uint8 __ovld __cnfn convert_uint8_rtn(float8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(float8); +uint8 __ovld __cnfn convert_uint8(float8); +uint8 __ovld __cnfn convert_uint8_sat(float8); +long8 __ovld __cnfn convert_long8_rte(char8); +long8 __ovld __cnfn convert_long8_sat_rte(char8); +long8 __ovld __cnfn convert_long8_rtz(char8); +long8 __ovld __cnfn convert_long8_sat_rtz(char8); +long8 __ovld __cnfn convert_long8_rtp(char8); +long8 __ovld __cnfn convert_long8_sat_rtp(char8); +long8 __ovld __cnfn convert_long8_rtn(char8); +long8 __ovld __cnfn convert_long8_sat_rtn(char8); +long8 __ovld __cnfn convert_long8(char8); +long8 __ovld __cnfn convert_long8_sat(char8); +long8 __ovld __cnfn convert_long8_rte(uchar8); +long8 __ovld __cnfn convert_long8_sat_rte(uchar8); +long8 __ovld __cnfn convert_long8_rtz(uchar8); +long8 __ovld __cnfn convert_long8_sat_rtz(uchar8); +long8 __ovld __cnfn convert_long8_rtp(uchar8); +long8 __ovld __cnfn convert_long8_sat_rtp(uchar8); +long8 __ovld __cnfn convert_long8_rtn(uchar8); +long8 __ovld __cnfn convert_long8_sat_rtn(uchar8); +long8 __ovld __cnfn convert_long8(uchar8); +long8 __ovld __cnfn convert_long8_sat(uchar8); +long8 __ovld __cnfn convert_long8_rte(short8); +long8 __ovld __cnfn convert_long8_sat_rte(short8); +long8 __ovld __cnfn convert_long8_rtz(short8); +long8 __ovld __cnfn convert_long8_sat_rtz(short8); +long8 __ovld __cnfn convert_long8_rtp(short8); +long8 __ovld __cnfn convert_long8_sat_rtp(short8); +long8 __ovld __cnfn convert_long8_rtn(short8); +long8 __ovld __cnfn convert_long8_sat_rtn(short8); +long8 __ovld __cnfn convert_long8(short8); +long8 __ovld __cnfn convert_long8_sat(short8); +long8 __ovld __cnfn convert_long8_rte(ushort8); +long8 __ovld __cnfn convert_long8_sat_rte(ushort8); +long8 __ovld __cnfn convert_long8_rtz(ushort8); +long8 __ovld __cnfn convert_long8_sat_rtz(ushort8); +long8 __ovld __cnfn convert_long8_rtp(ushort8); +long8 __ovld __cnfn convert_long8_sat_rtp(ushort8); +long8 __ovld __cnfn convert_long8_rtn(ushort8); +long8 __ovld __cnfn convert_long8_sat_rtn(ushort8); +long8 __ovld __cnfn convert_long8(ushort8); +long8 __ovld __cnfn convert_long8_sat(ushort8); +long8 __ovld __cnfn convert_long8_rte(int8); +long8 __ovld __cnfn convert_long8_sat_rte(int8); +long8 __ovld __cnfn convert_long8_rtz(int8); +long8 __ovld __cnfn convert_long8_sat_rtz(int8); +long8 __ovld __cnfn convert_long8_rtp(int8); +long8 __ovld __cnfn convert_long8_sat_rtp(int8); +long8 __ovld __cnfn convert_long8_rtn(int8); +long8 __ovld __cnfn convert_long8_sat_rtn(int8); +long8 __ovld __cnfn convert_long8(int8); +long8 __ovld __cnfn convert_long8_sat(int8); +long8 __ovld __cnfn convert_long8_rte(uint8); +long8 __ovld __cnfn convert_long8_sat_rte(uint8); +long8 __ovld __cnfn convert_long8_rtz(uint8); +long8 __ovld __cnfn convert_long8_sat_rtz(uint8); +long8 __ovld __cnfn convert_long8_rtp(uint8); +long8 __ovld __cnfn convert_long8_sat_rtp(uint8); +long8 __ovld __cnfn convert_long8_rtn(uint8); +long8 __ovld __cnfn convert_long8_sat_rtn(uint8); +long8 __ovld __cnfn convert_long8(uint8); +long8 __ovld __cnfn convert_long8_sat(uint8); +long8 __ovld __cnfn convert_long8_rte(long8); +long8 __ovld __cnfn convert_long8_sat_rte(long8); +long8 __ovld __cnfn convert_long8_rtz(long8); +long8 __ovld __cnfn convert_long8_sat_rtz(long8); +long8 __ovld __cnfn convert_long8_rtp(long8); +long8 __ovld __cnfn convert_long8_sat_rtp(long8); +long8 __ovld __cnfn convert_long8_rtn(long8); +long8 __ovld __cnfn convert_long8_sat_rtn(long8); +long8 __ovld __cnfn convert_long8(long8); +long8 __ovld __cnfn convert_long8_sat(long8); +long8 __ovld __cnfn convert_long8_rte(ulong8); +long8 __ovld __cnfn convert_long8_sat_rte(ulong8); +long8 __ovld __cnfn convert_long8_rtz(ulong8); +long8 __ovld __cnfn convert_long8_sat_rtz(ulong8); +long8 __ovld __cnfn convert_long8_rtp(ulong8); +long8 __ovld __cnfn convert_long8_sat_rtp(ulong8); +long8 __ovld __cnfn convert_long8_rtn(ulong8); +long8 __ovld __cnfn convert_long8_sat_rtn(ulong8); +long8 __ovld __cnfn convert_long8(ulong8); +long8 __ovld __cnfn convert_long8_sat(ulong8); +long8 __ovld __cnfn convert_long8_rte(float8); +long8 __ovld __cnfn convert_long8_sat_rte(float8); +long8 __ovld __cnfn convert_long8_rtz(float8); +long8 __ovld __cnfn convert_long8_sat_rtz(float8); +long8 __ovld __cnfn convert_long8_rtp(float8); +long8 __ovld __cnfn convert_long8_sat_rtp(float8); +long8 __ovld __cnfn convert_long8_rtn(float8); +long8 __ovld __cnfn convert_long8_sat_rtn(float8); +long8 __ovld __cnfn convert_long8(float8); +long8 __ovld __cnfn convert_long8_sat(float8); +ulong8 __ovld __cnfn convert_ulong8_rte(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8); +ulong8 __ovld __cnfn convert_ulong8_rtz(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8); +ulong8 __ovld __cnfn convert_ulong8_rtp(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8); +ulong8 __ovld __cnfn convert_ulong8_rtn(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8); +ulong8 __ovld __cnfn convert_ulong8(char8); +ulong8 __ovld __cnfn convert_ulong8_sat(char8); +ulong8 __ovld __cnfn convert_ulong8_rte(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8); +ulong8 __ovld __cnfn convert_ulong8(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rte(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8); +ulong8 __ovld __cnfn convert_ulong8_rtz(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8); +ulong8 __ovld __cnfn convert_ulong8_rtp(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8); +ulong8 __ovld __cnfn convert_ulong8_rtn(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8); +ulong8 __ovld __cnfn convert_ulong8(short8); +ulong8 __ovld __cnfn convert_ulong8_sat(short8); +ulong8 __ovld __cnfn convert_ulong8_rte(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8); +ulong8 __ovld __cnfn convert_ulong8(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rte(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8); +ulong8 __ovld __cnfn convert_ulong8_rtz(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8); +ulong8 __ovld __cnfn convert_ulong8_rtp(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8); +ulong8 __ovld __cnfn convert_ulong8_rtn(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8); +ulong8 __ovld __cnfn convert_ulong8(int8); +ulong8 __ovld __cnfn convert_ulong8_sat(int8); +ulong8 __ovld __cnfn convert_ulong8_rte(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8); +ulong8 __ovld __cnfn convert_ulong8_rtz(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8); +ulong8 __ovld __cnfn convert_ulong8_rtp(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8); +ulong8 __ovld __cnfn convert_ulong8_rtn(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8); +ulong8 __ovld __cnfn convert_ulong8(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat(uint8); +ulong8 __ovld __cnfn convert_ulong8_rte(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8); +ulong8 __ovld __cnfn convert_ulong8_rtz(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8); +ulong8 __ovld __cnfn convert_ulong8_rtp(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8); +ulong8 __ovld __cnfn convert_ulong8_rtn(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8); +ulong8 __ovld __cnfn convert_ulong8(long8); +ulong8 __ovld __cnfn convert_ulong8_sat(long8); +ulong8 __ovld __cnfn convert_ulong8_rte(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8); +ulong8 __ovld __cnfn convert_ulong8(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rte(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8); +ulong8 __ovld __cnfn convert_ulong8_rtz(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8); +ulong8 __ovld __cnfn convert_ulong8_rtp(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8); +ulong8 __ovld __cnfn convert_ulong8_rtn(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8); +ulong8 __ovld __cnfn convert_ulong8(float8); +ulong8 __ovld __cnfn convert_ulong8_sat(float8); +float8 __ovld __cnfn convert_float8_rte(char8); +float8 __ovld __cnfn convert_float8_rtz(char8); +float8 __ovld __cnfn convert_float8_rtp(char8); +float8 __ovld __cnfn convert_float8_rtn(char8); +float8 __ovld __cnfn convert_float8(char8); +float8 __ovld __cnfn convert_float8_rte(uchar8); +float8 __ovld __cnfn convert_float8_rtz(uchar8); +float8 __ovld __cnfn convert_float8_rtp(uchar8); +float8 __ovld __cnfn convert_float8_rtn(uchar8); +float8 __ovld __cnfn convert_float8(uchar8); +float8 __ovld __cnfn convert_float8_rte(short8); +float8 __ovld __cnfn convert_float8_rtz(short8); +float8 __ovld __cnfn convert_float8_rtp(short8); +float8 __ovld __cnfn convert_float8_rtn(short8); +float8 __ovld __cnfn convert_float8(short8); +float8 __ovld __cnfn convert_float8_rte(ushort8); +float8 __ovld __cnfn convert_float8_rtz(ushort8); +float8 __ovld __cnfn convert_float8_rtp(ushort8); +float8 __ovld __cnfn convert_float8_rtn(ushort8); +float8 __ovld __cnfn convert_float8(ushort8); +float8 __ovld __cnfn convert_float8_rte(int8); +float8 __ovld __cnfn convert_float8_rtz(int8); +float8 __ovld __cnfn convert_float8_rtp(int8); +float8 __ovld __cnfn convert_float8_rtn(int8); +float8 __ovld __cnfn convert_float8(int8); +float8 __ovld __cnfn convert_float8_rte(uint8); +float8 __ovld __cnfn convert_float8_rtz(uint8); +float8 __ovld __cnfn convert_float8_rtp(uint8); +float8 __ovld __cnfn convert_float8_rtn(uint8); +float8 __ovld __cnfn convert_float8(uint8); +float8 __ovld __cnfn convert_float8_rte(long8); +float8 __ovld __cnfn convert_float8_rtz(long8); +float8 __ovld __cnfn convert_float8_rtp(long8); +float8 __ovld __cnfn convert_float8_rtn(long8); +float8 __ovld __cnfn convert_float8(long8); +float8 __ovld __cnfn convert_float8_rte(ulong8); +float8 __ovld __cnfn convert_float8_rtz(ulong8); +float8 __ovld __cnfn convert_float8_rtp(ulong8); +float8 __ovld __cnfn convert_float8_rtn(ulong8); +float8 __ovld __cnfn convert_float8(ulong8); +float8 __ovld __cnfn convert_float8_rte(float8); +float8 __ovld __cnfn convert_float8_rtz(float8); +float8 __ovld __cnfn convert_float8_rtp(float8); +float8 __ovld __cnfn convert_float8_rtn(float8); +float8 __ovld __cnfn convert_float8(float8); +char16 __ovld __cnfn convert_char16_rte(char16); +char16 __ovld __cnfn convert_char16_sat_rte(char16); +char16 __ovld __cnfn convert_char16_rtz(char16); +char16 __ovld __cnfn convert_char16_sat_rtz(char16); +char16 __ovld __cnfn convert_char16_rtp(char16); +char16 __ovld __cnfn convert_char16_sat_rtp(char16); +char16 __ovld __cnfn convert_char16_rtn(char16); +char16 __ovld __cnfn convert_char16_sat_rtn(char16); +char16 __ovld __cnfn convert_char16(char16); +char16 __ovld __cnfn convert_char16_sat(char16); +char16 __ovld __cnfn convert_char16_rte(uchar16); +char16 __ovld __cnfn convert_char16_sat_rte(uchar16); +char16 __ovld __cnfn convert_char16_rtz(uchar16); +char16 __ovld __cnfn convert_char16_sat_rtz(uchar16); +char16 __ovld __cnfn convert_char16_rtp(uchar16); +char16 __ovld __cnfn convert_char16_sat_rtp(uchar16); +char16 __ovld __cnfn convert_char16_rtn(uchar16); +char16 __ovld __cnfn convert_char16_sat_rtn(uchar16); +char16 __ovld __cnfn convert_char16(uchar16); +char16 __ovld __cnfn convert_char16_sat(uchar16); +char16 __ovld __cnfn convert_char16_rte(short16); +char16 __ovld __cnfn convert_char16_sat_rte(short16); +char16 __ovld __cnfn convert_char16_rtz(short16); +char16 __ovld __cnfn convert_char16_sat_rtz(short16); +char16 __ovld __cnfn convert_char16_rtp(short16); +char16 __ovld __cnfn convert_char16_sat_rtp(short16); +char16 __ovld __cnfn convert_char16_rtn(short16); +char16 __ovld __cnfn convert_char16_sat_rtn(short16); +char16 __ovld __cnfn convert_char16(short16); +char16 __ovld __cnfn convert_char16_sat(short16); +char16 __ovld __cnfn convert_char16_rte(ushort16); +char16 __ovld __cnfn convert_char16_sat_rte(ushort16); +char16 __ovld __cnfn convert_char16_rtz(ushort16); +char16 __ovld __cnfn convert_char16_sat_rtz(ushort16); +char16 __ovld __cnfn convert_char16_rtp(ushort16); +char16 __ovld __cnfn convert_char16_sat_rtp(ushort16); +char16 __ovld __cnfn convert_char16_rtn(ushort16); +char16 __ovld __cnfn convert_char16_sat_rtn(ushort16); +char16 __ovld __cnfn convert_char16(ushort16); +char16 __ovld __cnfn convert_char16_sat(ushort16); +char16 __ovld __cnfn convert_char16_rte(int16); +char16 __ovld __cnfn convert_char16_sat_rte(int16); +char16 __ovld __cnfn convert_char16_rtz(int16); +char16 __ovld __cnfn convert_char16_sat_rtz(int16); +char16 __ovld __cnfn convert_char16_rtp(int16); +char16 __ovld __cnfn convert_char16_sat_rtp(int16); +char16 __ovld __cnfn convert_char16_rtn(int16); +char16 __ovld __cnfn convert_char16_sat_rtn(int16); +char16 __ovld __cnfn convert_char16(int16); +char16 __ovld __cnfn convert_char16_sat(int16); +char16 __ovld __cnfn convert_char16_rte(uint16); +char16 __ovld __cnfn convert_char16_sat_rte(uint16); +char16 __ovld __cnfn convert_char16_rtz(uint16); +char16 __ovld __cnfn convert_char16_sat_rtz(uint16); +char16 __ovld __cnfn convert_char16_rtp(uint16); +char16 __ovld __cnfn convert_char16_sat_rtp(uint16); +char16 __ovld __cnfn convert_char16_rtn(uint16); +char16 __ovld __cnfn convert_char16_sat_rtn(uint16); +char16 __ovld __cnfn convert_char16(uint16); +char16 __ovld __cnfn convert_char16_sat(uint16); +char16 __ovld __cnfn convert_char16_rte(long16); +char16 __ovld __cnfn convert_char16_sat_rte(long16); +char16 __ovld __cnfn convert_char16_rtz(long16); +char16 __ovld __cnfn convert_char16_sat_rtz(long16); +char16 __ovld __cnfn convert_char16_rtp(long16); +char16 __ovld __cnfn convert_char16_sat_rtp(long16); +char16 __ovld __cnfn convert_char16_rtn(long16); +char16 __ovld __cnfn convert_char16_sat_rtn(long16); +char16 __ovld __cnfn convert_char16(long16); +char16 __ovld __cnfn convert_char16_sat(long16); +char16 __ovld __cnfn convert_char16_rte(ulong16); +char16 __ovld __cnfn convert_char16_sat_rte(ulong16); +char16 __ovld __cnfn convert_char16_rtz(ulong16); +char16 __ovld __cnfn convert_char16_sat_rtz(ulong16); +char16 __ovld __cnfn convert_char16_rtp(ulong16); +char16 __ovld __cnfn convert_char16_sat_rtp(ulong16); +char16 __ovld __cnfn convert_char16_rtn(ulong16); +char16 __ovld __cnfn convert_char16_sat_rtn(ulong16); +char16 __ovld __cnfn convert_char16(ulong16); +char16 __ovld __cnfn convert_char16_sat(ulong16); +char16 __ovld __cnfn convert_char16_rte(float16); +char16 __ovld __cnfn convert_char16_sat_rte(float16); +char16 __ovld __cnfn convert_char16_rtz(float16); +char16 __ovld __cnfn convert_char16_sat_rtz(float16); +char16 __ovld __cnfn convert_char16_rtp(float16); +char16 __ovld __cnfn convert_char16_sat_rtp(float16); +char16 __ovld __cnfn convert_char16_rtn(float16); +char16 __ovld __cnfn convert_char16_sat_rtn(float16); +char16 __ovld __cnfn convert_char16(float16); +char16 __ovld __cnfn convert_char16_sat(float16); +uchar16 __ovld __cnfn convert_uchar16_rte(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16); +uchar16 __ovld __cnfn convert_uchar16_rtz(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16); +uchar16 __ovld __cnfn convert_uchar16_rtp(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16); +uchar16 __ovld __cnfn convert_uchar16_rtn(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16); +uchar16 __ovld __cnfn convert_uchar16(char16); +uchar16 __ovld __cnfn convert_uchar16_sat(char16); +uchar16 __ovld __cnfn convert_uchar16_rte(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16); +uchar16 __ovld __cnfn convert_uchar16(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rte(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16); +uchar16 __ovld __cnfn convert_uchar16_rtz(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16); +uchar16 __ovld __cnfn convert_uchar16_rtp(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16); +uchar16 __ovld __cnfn convert_uchar16_rtn(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16); +uchar16 __ovld __cnfn convert_uchar16(short16); +uchar16 __ovld __cnfn convert_uchar16_sat(short16); +uchar16 __ovld __cnfn convert_uchar16_rte(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16); +uchar16 __ovld __cnfn convert_uchar16(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rte(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16); +uchar16 __ovld __cnfn convert_uchar16_rtz(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16); +uchar16 __ovld __cnfn convert_uchar16_rtp(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16); +uchar16 __ovld __cnfn convert_uchar16_rtn(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16); +uchar16 __ovld __cnfn convert_uchar16(int16); +uchar16 __ovld __cnfn convert_uchar16_sat(int16); +uchar16 __ovld __cnfn convert_uchar16_rte(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16); +uchar16 __ovld __cnfn convert_uchar16_rtz(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16); +uchar16 __ovld __cnfn convert_uchar16_rtp(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16); +uchar16 __ovld __cnfn convert_uchar16_rtn(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16); +uchar16 __ovld __cnfn convert_uchar16(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat(uint16); +uchar16 __ovld __cnfn convert_uchar16_rte(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16); +uchar16 __ovld __cnfn convert_uchar16_rtz(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16); +uchar16 __ovld __cnfn convert_uchar16_rtp(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16); +uchar16 __ovld __cnfn convert_uchar16_rtn(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16); +uchar16 __ovld __cnfn convert_uchar16(long16); +uchar16 __ovld __cnfn convert_uchar16_sat(long16); +uchar16 __ovld __cnfn convert_uchar16_rte(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16); +uchar16 __ovld __cnfn convert_uchar16(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rte(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16); +uchar16 __ovld __cnfn convert_uchar16_rtz(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16); +uchar16 __ovld __cnfn convert_uchar16_rtp(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16); +uchar16 __ovld __cnfn convert_uchar16_rtn(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16); +uchar16 __ovld __cnfn convert_uchar16(float16); +uchar16 __ovld __cnfn convert_uchar16_sat(float16); +short16 __ovld __cnfn convert_short16_rte(char16); +short16 __ovld __cnfn convert_short16_sat_rte(char16); +short16 __ovld __cnfn convert_short16_rtz(char16); +short16 __ovld __cnfn convert_short16_sat_rtz(char16); +short16 __ovld __cnfn convert_short16_rtp(char16); +short16 __ovld __cnfn convert_short16_sat_rtp(char16); +short16 __ovld __cnfn convert_short16_rtn(char16); +short16 __ovld __cnfn convert_short16_sat_rtn(char16); +short16 __ovld __cnfn convert_short16(char16); +short16 __ovld __cnfn convert_short16_sat(char16); +short16 __ovld __cnfn convert_short16_rte(uchar16); +short16 __ovld __cnfn convert_short16_sat_rte(uchar16); +short16 __ovld __cnfn convert_short16_rtz(uchar16); +short16 __ovld __cnfn convert_short16_sat_rtz(uchar16); +short16 __ovld __cnfn convert_short16_rtp(uchar16); +short16 __ovld __cnfn convert_short16_sat_rtp(uchar16); +short16 __ovld __cnfn convert_short16_rtn(uchar16); +short16 __ovld __cnfn convert_short16_sat_rtn(uchar16); +short16 __ovld __cnfn convert_short16(uchar16); +short16 __ovld __cnfn convert_short16_sat(uchar16); +short16 __ovld __cnfn convert_short16_rte(short16); +short16 __ovld __cnfn convert_short16_sat_rte(short16); +short16 __ovld __cnfn convert_short16_rtz(short16); +short16 __ovld __cnfn convert_short16_sat_rtz(short16); +short16 __ovld __cnfn convert_short16_rtp(short16); +short16 __ovld __cnfn convert_short16_sat_rtp(short16); +short16 __ovld __cnfn convert_short16_rtn(short16); +short16 __ovld __cnfn convert_short16_sat_rtn(short16); +short16 __ovld __cnfn convert_short16(short16); +short16 __ovld __cnfn convert_short16_sat(short16); +short16 __ovld __cnfn convert_short16_rte(ushort16); +short16 __ovld __cnfn convert_short16_sat_rte(ushort16); +short16 __ovld __cnfn convert_short16_rtz(ushort16); +short16 __ovld __cnfn convert_short16_sat_rtz(ushort16); +short16 __ovld __cnfn convert_short16_rtp(ushort16); +short16 __ovld __cnfn convert_short16_sat_rtp(ushort16); +short16 __ovld __cnfn convert_short16_rtn(ushort16); +short16 __ovld __cnfn convert_short16_sat_rtn(ushort16); +short16 __ovld __cnfn convert_short16(ushort16); +short16 __ovld __cnfn convert_short16_sat(ushort16); +short16 __ovld __cnfn convert_short16_rte(int16); +short16 __ovld __cnfn convert_short16_sat_rte(int16); +short16 __ovld __cnfn convert_short16_rtz(int16); +short16 __ovld __cnfn convert_short16_sat_rtz(int16); +short16 __ovld __cnfn convert_short16_rtp(int16); +short16 __ovld __cnfn convert_short16_sat_rtp(int16); +short16 __ovld __cnfn convert_short16_rtn(int16); +short16 __ovld __cnfn convert_short16_sat_rtn(int16); +short16 __ovld __cnfn convert_short16(int16); +short16 __ovld __cnfn convert_short16_sat(int16); +short16 __ovld __cnfn convert_short16_rte(uint16); +short16 __ovld __cnfn convert_short16_sat_rte(uint16); +short16 __ovld __cnfn convert_short16_rtz(uint16); +short16 __ovld __cnfn convert_short16_sat_rtz(uint16); +short16 __ovld __cnfn convert_short16_rtp(uint16); +short16 __ovld __cnfn convert_short16_sat_rtp(uint16); +short16 __ovld __cnfn convert_short16_rtn(uint16); +short16 __ovld __cnfn convert_short16_sat_rtn(uint16); +short16 __ovld __cnfn convert_short16(uint16); +short16 __ovld __cnfn convert_short16_sat(uint16); +short16 __ovld __cnfn convert_short16_rte(long16); +short16 __ovld __cnfn convert_short16_sat_rte(long16); +short16 __ovld __cnfn convert_short16_rtz(long16); +short16 __ovld __cnfn convert_short16_sat_rtz(long16); +short16 __ovld __cnfn convert_short16_rtp(long16); +short16 __ovld __cnfn convert_short16_sat_rtp(long16); +short16 __ovld __cnfn convert_short16_rtn(long16); +short16 __ovld __cnfn convert_short16_sat_rtn(long16); +short16 __ovld __cnfn convert_short16(long16); +short16 __ovld __cnfn convert_short16_sat(long16); +short16 __ovld __cnfn convert_short16_rte(ulong16); +short16 __ovld __cnfn convert_short16_sat_rte(ulong16); +short16 __ovld __cnfn convert_short16_rtz(ulong16); +short16 __ovld __cnfn convert_short16_sat_rtz(ulong16); +short16 __ovld __cnfn convert_short16_rtp(ulong16); +short16 __ovld __cnfn convert_short16_sat_rtp(ulong16); +short16 __ovld __cnfn convert_short16_rtn(ulong16); +short16 __ovld __cnfn convert_short16_sat_rtn(ulong16); +short16 __ovld __cnfn convert_short16(ulong16); +short16 __ovld __cnfn convert_short16_sat(ulong16); +short16 __ovld __cnfn convert_short16_rte(float16); +short16 __ovld __cnfn convert_short16_sat_rte(float16); +short16 __ovld __cnfn convert_short16_rtz(float16); +short16 __ovld __cnfn convert_short16_sat_rtz(float16); +short16 __ovld __cnfn convert_short16_rtp(float16); +short16 __ovld __cnfn convert_short16_sat_rtp(float16); +short16 __ovld __cnfn convert_short16_rtn(float16); +short16 __ovld __cnfn convert_short16_sat_rtn(float16); +short16 __ovld __cnfn convert_short16(float16); +short16 __ovld __cnfn convert_short16_sat(float16); +ushort16 __ovld __cnfn convert_ushort16_rte(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16); +ushort16 __ovld __cnfn convert_ushort16_rtz(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16); +ushort16 __ovld __cnfn convert_ushort16_rtp(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16); +ushort16 __ovld __cnfn convert_ushort16_rtn(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16); +ushort16 __ovld __cnfn convert_ushort16(char16); +ushort16 __ovld __cnfn convert_ushort16_sat(char16); +ushort16 __ovld __cnfn convert_ushort16_rte(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16); +ushort16 __ovld __cnfn convert_ushort16(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rte(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16); +ushort16 __ovld __cnfn convert_ushort16_rtz(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16); +ushort16 __ovld __cnfn convert_ushort16_rtp(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16); +ushort16 __ovld __cnfn convert_ushort16_rtn(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16); +ushort16 __ovld __cnfn convert_ushort16(short16); +ushort16 __ovld __cnfn convert_ushort16_sat(short16); +ushort16 __ovld __cnfn convert_ushort16_rte(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16); +ushort16 __ovld __cnfn convert_ushort16(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rte(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16); +ushort16 __ovld __cnfn convert_ushort16_rtz(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16); +ushort16 __ovld __cnfn convert_ushort16_rtp(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16); +ushort16 __ovld __cnfn convert_ushort16_rtn(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16); +ushort16 __ovld __cnfn convert_ushort16(int16); +ushort16 __ovld __cnfn convert_ushort16_sat(int16); +ushort16 __ovld __cnfn convert_ushort16_rte(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16); +ushort16 __ovld __cnfn convert_ushort16_rtz(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16); +ushort16 __ovld __cnfn convert_ushort16_rtp(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16); +ushort16 __ovld __cnfn convert_ushort16_rtn(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16); +ushort16 __ovld __cnfn convert_ushort16(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat(uint16); +ushort16 __ovld __cnfn convert_ushort16_rte(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16); +ushort16 __ovld __cnfn convert_ushort16_rtz(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16); +ushort16 __ovld __cnfn convert_ushort16_rtp(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16); +ushort16 __ovld __cnfn convert_ushort16_rtn(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16); +ushort16 __ovld __cnfn convert_ushort16(long16); +ushort16 __ovld __cnfn convert_ushort16_sat(long16); +ushort16 __ovld __cnfn convert_ushort16_rte(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16); +ushort16 __ovld __cnfn convert_ushort16(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rte(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16); +ushort16 __ovld __cnfn convert_ushort16_rtz(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16); +ushort16 __ovld __cnfn convert_ushort16_rtp(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16); +ushort16 __ovld __cnfn convert_ushort16_rtn(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16); +ushort16 __ovld __cnfn convert_ushort16(float16); +ushort16 __ovld __cnfn convert_ushort16_sat(float16); +int16 __ovld __cnfn convert_int16_rte(char16); +int16 __ovld __cnfn convert_int16_sat_rte(char16); +int16 __ovld __cnfn convert_int16_rtz(char16); +int16 __ovld __cnfn convert_int16_sat_rtz(char16); +int16 __ovld __cnfn convert_int16_rtp(char16); +int16 __ovld __cnfn convert_int16_sat_rtp(char16); +int16 __ovld __cnfn convert_int16_rtn(char16); +int16 __ovld __cnfn convert_int16_sat_rtn(char16); +int16 __ovld __cnfn convert_int16(char16); +int16 __ovld __cnfn convert_int16_sat(char16); +int16 __ovld __cnfn convert_int16_rte(uchar16); +int16 __ovld __cnfn convert_int16_sat_rte(uchar16); +int16 __ovld __cnfn convert_int16_rtz(uchar16); +int16 __ovld __cnfn convert_int16_sat_rtz(uchar16); +int16 __ovld __cnfn convert_int16_rtp(uchar16); +int16 __ovld __cnfn convert_int16_sat_rtp(uchar16); +int16 __ovld __cnfn convert_int16_rtn(uchar16); +int16 __ovld __cnfn convert_int16_sat_rtn(uchar16); +int16 __ovld __cnfn convert_int16(uchar16); +int16 __ovld __cnfn convert_int16_sat(uchar16); +int16 __ovld __cnfn convert_int16_rte(short16); +int16 __ovld __cnfn convert_int16_sat_rte(short16); +int16 __ovld __cnfn convert_int16_rtz(short16); +int16 __ovld __cnfn convert_int16_sat_rtz(short16); +int16 __ovld __cnfn convert_int16_rtp(short16); +int16 __ovld __cnfn convert_int16_sat_rtp(short16); +int16 __ovld __cnfn convert_int16_rtn(short16); +int16 __ovld __cnfn convert_int16_sat_rtn(short16); +int16 __ovld __cnfn convert_int16(short16); +int16 __ovld __cnfn convert_int16_sat(short16); +int16 __ovld __cnfn convert_int16_rte(ushort16); +int16 __ovld __cnfn convert_int16_sat_rte(ushort16); +int16 __ovld __cnfn convert_int16_rtz(ushort16); +int16 __ovld __cnfn convert_int16_sat_rtz(ushort16); +int16 __ovld __cnfn convert_int16_rtp(ushort16); +int16 __ovld __cnfn convert_int16_sat_rtp(ushort16); +int16 __ovld __cnfn convert_int16_rtn(ushort16); +int16 __ovld __cnfn convert_int16_sat_rtn(ushort16); +int16 __ovld __cnfn convert_int16(ushort16); +int16 __ovld __cnfn convert_int16_sat(ushort16); +int16 __ovld __cnfn convert_int16_rte(int16); +int16 __ovld __cnfn convert_int16_sat_rte(int16); +int16 __ovld __cnfn convert_int16_rtz(int16); +int16 __ovld __cnfn convert_int16_sat_rtz(int16); +int16 __ovld __cnfn convert_int16_rtp(int16); +int16 __ovld __cnfn convert_int16_sat_rtp(int16); +int16 __ovld __cnfn convert_int16_rtn(int16); +int16 __ovld __cnfn convert_int16_sat_rtn(int16); +int16 __ovld __cnfn convert_int16(int16); +int16 __ovld __cnfn convert_int16_sat(int16); +int16 __ovld __cnfn convert_int16_rte(uint16); +int16 __ovld __cnfn convert_int16_sat_rte(uint16); +int16 __ovld __cnfn convert_int16_rtz(uint16); +int16 __ovld __cnfn convert_int16_sat_rtz(uint16); +int16 __ovld __cnfn convert_int16_rtp(uint16); +int16 __ovld __cnfn convert_int16_sat_rtp(uint16); +int16 __ovld __cnfn convert_int16_rtn(uint16); +int16 __ovld __cnfn convert_int16_sat_rtn(uint16); +int16 __ovld __cnfn convert_int16(uint16); +int16 __ovld __cnfn convert_int16_sat(uint16); +int16 __ovld __cnfn convert_int16_rte(long16); +int16 __ovld __cnfn convert_int16_sat_rte(long16); +int16 __ovld __cnfn convert_int16_rtz(long16); +int16 __ovld __cnfn convert_int16_sat_rtz(long16); +int16 __ovld __cnfn convert_int16_rtp(long16); +int16 __ovld __cnfn convert_int16_sat_rtp(long16); +int16 __ovld __cnfn convert_int16_rtn(long16); +int16 __ovld __cnfn convert_int16_sat_rtn(long16); +int16 __ovld __cnfn convert_int16(long16); +int16 __ovld __cnfn convert_int16_sat(long16); +int16 __ovld __cnfn convert_int16_rte(ulong16); +int16 __ovld __cnfn convert_int16_sat_rte(ulong16); +int16 __ovld __cnfn convert_int16_rtz(ulong16); +int16 __ovld __cnfn convert_int16_sat_rtz(ulong16); +int16 __ovld __cnfn convert_int16_rtp(ulong16); +int16 __ovld __cnfn convert_int16_sat_rtp(ulong16); +int16 __ovld __cnfn convert_int16_rtn(ulong16); +int16 __ovld __cnfn convert_int16_sat_rtn(ulong16); +int16 __ovld __cnfn convert_int16(ulong16); +int16 __ovld __cnfn convert_int16_sat(ulong16); +int16 __ovld __cnfn convert_int16_rte(float16); +int16 __ovld __cnfn convert_int16_sat_rte(float16); +int16 __ovld __cnfn convert_int16_rtz(float16); +int16 __ovld __cnfn convert_int16_sat_rtz(float16); +int16 __ovld __cnfn convert_int16_rtp(float16); +int16 __ovld __cnfn convert_int16_sat_rtp(float16); +int16 __ovld __cnfn convert_int16_rtn(float16); +int16 __ovld __cnfn convert_int16_sat_rtn(float16); +int16 __ovld __cnfn convert_int16(float16); +int16 __ovld __cnfn convert_int16_sat(float16); +uint16 __ovld __cnfn convert_uint16_rte(char16); +uint16 __ovld __cnfn convert_uint16_sat_rte(char16); +uint16 __ovld __cnfn convert_uint16_rtz(char16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(char16); +uint16 __ovld __cnfn convert_uint16_rtp(char16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(char16); +uint16 __ovld __cnfn convert_uint16_rtn(char16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(char16); +uint16 __ovld __cnfn convert_uint16(char16); +uint16 __ovld __cnfn convert_uint16_sat(char16); +uint16 __ovld __cnfn convert_uint16_rte(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16); +uint16 __ovld __cnfn convert_uint16_rtz(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16); +uint16 __ovld __cnfn convert_uint16_rtp(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16); +uint16 __ovld __cnfn convert_uint16_rtn(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16); +uint16 __ovld __cnfn convert_uint16(uchar16); +uint16 __ovld __cnfn convert_uint16_sat(uchar16); +uint16 __ovld __cnfn convert_uint16_rte(short16); +uint16 __ovld __cnfn convert_uint16_sat_rte(short16); +uint16 __ovld __cnfn convert_uint16_rtz(short16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(short16); +uint16 __ovld __cnfn convert_uint16_rtp(short16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(short16); +uint16 __ovld __cnfn convert_uint16_rtn(short16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(short16); +uint16 __ovld __cnfn convert_uint16(short16); +uint16 __ovld __cnfn convert_uint16_sat(short16); +uint16 __ovld __cnfn convert_uint16_rte(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16); +uint16 __ovld __cnfn convert_uint16_rtz(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16); +uint16 __ovld __cnfn convert_uint16_rtp(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16); +uint16 __ovld __cnfn convert_uint16_rtn(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16); +uint16 __ovld __cnfn convert_uint16(ushort16); +uint16 __ovld __cnfn convert_uint16_sat(ushort16); +uint16 __ovld __cnfn convert_uint16_rte(int16); +uint16 __ovld __cnfn convert_uint16_sat_rte(int16); +uint16 __ovld __cnfn convert_uint16_rtz(int16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(int16); +uint16 __ovld __cnfn convert_uint16_rtp(int16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(int16); +uint16 __ovld __cnfn convert_uint16_rtn(int16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(int16); +uint16 __ovld __cnfn convert_uint16(int16); +uint16 __ovld __cnfn convert_uint16_sat(int16); +uint16 __ovld __cnfn convert_uint16_rte(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rte(uint16); +uint16 __ovld __cnfn convert_uint16_rtz(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16); +uint16 __ovld __cnfn convert_uint16_rtp(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16); +uint16 __ovld __cnfn convert_uint16_rtn(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16); +uint16 __ovld __cnfn convert_uint16(uint16); +uint16 __ovld __cnfn convert_uint16_sat(uint16); +uint16 __ovld __cnfn convert_uint16_rte(long16); +uint16 __ovld __cnfn convert_uint16_sat_rte(long16); +uint16 __ovld __cnfn convert_uint16_rtz(long16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(long16); +uint16 __ovld __cnfn convert_uint16_rtp(long16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(long16); +uint16 __ovld __cnfn convert_uint16_rtn(long16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(long16); +uint16 __ovld __cnfn convert_uint16(long16); +uint16 __ovld __cnfn convert_uint16_sat(long16); +uint16 __ovld __cnfn convert_uint16_rte(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16); +uint16 __ovld __cnfn convert_uint16_rtz(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16); +uint16 __ovld __cnfn convert_uint16_rtp(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16); +uint16 __ovld __cnfn convert_uint16_rtn(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16); +uint16 __ovld __cnfn convert_uint16(ulong16); +uint16 __ovld __cnfn convert_uint16_sat(ulong16); +uint16 __ovld __cnfn convert_uint16_rte(float16); +uint16 __ovld __cnfn convert_uint16_sat_rte(float16); +uint16 __ovld __cnfn convert_uint16_rtz(float16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(float16); +uint16 __ovld __cnfn convert_uint16_rtp(float16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(float16); +uint16 __ovld __cnfn convert_uint16_rtn(float16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(float16); +uint16 __ovld __cnfn convert_uint16(float16); +uint16 __ovld __cnfn convert_uint16_sat(float16); +long16 __ovld __cnfn convert_long16_rte(char16); +long16 __ovld __cnfn convert_long16_sat_rte(char16); +long16 __ovld __cnfn convert_long16_rtz(char16); +long16 __ovld __cnfn convert_long16_sat_rtz(char16); +long16 __ovld __cnfn convert_long16_rtp(char16); +long16 __ovld __cnfn convert_long16_sat_rtp(char16); +long16 __ovld __cnfn convert_long16_rtn(char16); +long16 __ovld __cnfn convert_long16_sat_rtn(char16); +long16 __ovld __cnfn convert_long16(char16); +long16 __ovld __cnfn convert_long16_sat(char16); +long16 __ovld __cnfn convert_long16_rte(uchar16); +long16 __ovld __cnfn convert_long16_sat_rte(uchar16); +long16 __ovld __cnfn convert_long16_rtz(uchar16); +long16 __ovld __cnfn convert_long16_sat_rtz(uchar16); +long16 __ovld __cnfn convert_long16_rtp(uchar16); +long16 __ovld __cnfn convert_long16_sat_rtp(uchar16); +long16 __ovld __cnfn convert_long16_rtn(uchar16); +long16 __ovld __cnfn convert_long16_sat_rtn(uchar16); +long16 __ovld __cnfn convert_long16(uchar16); +long16 __ovld __cnfn convert_long16_sat(uchar16); +long16 __ovld __cnfn convert_long16_rte(short16); +long16 __ovld __cnfn convert_long16_sat_rte(short16); +long16 __ovld __cnfn convert_long16_rtz(short16); +long16 __ovld __cnfn convert_long16_sat_rtz(short16); +long16 __ovld __cnfn convert_long16_rtp(short16); +long16 __ovld __cnfn convert_long16_sat_rtp(short16); +long16 __ovld __cnfn convert_long16_rtn(short16); +long16 __ovld __cnfn convert_long16_sat_rtn(short16); +long16 __ovld __cnfn convert_long16(short16); +long16 __ovld __cnfn convert_long16_sat(short16); +long16 __ovld __cnfn convert_long16_rte(ushort16); +long16 __ovld __cnfn convert_long16_sat_rte(ushort16); +long16 __ovld __cnfn convert_long16_rtz(ushort16); +long16 __ovld __cnfn convert_long16_sat_rtz(ushort16); +long16 __ovld __cnfn convert_long16_rtp(ushort16); +long16 __ovld __cnfn convert_long16_sat_rtp(ushort16); +long16 __ovld __cnfn convert_long16_rtn(ushort16); +long16 __ovld __cnfn convert_long16_sat_rtn(ushort16); +long16 __ovld __cnfn convert_long16(ushort16); +long16 __ovld __cnfn convert_long16_sat(ushort16); +long16 __ovld __cnfn convert_long16_rte(int16); +long16 __ovld __cnfn convert_long16_sat_rte(int16); +long16 __ovld __cnfn convert_long16_rtz(int16); +long16 __ovld __cnfn convert_long16_sat_rtz(int16); +long16 __ovld __cnfn convert_long16_rtp(int16); +long16 __ovld __cnfn convert_long16_sat_rtp(int16); +long16 __ovld __cnfn convert_long16_rtn(int16); +long16 __ovld __cnfn convert_long16_sat_rtn(int16); +long16 __ovld __cnfn convert_long16(int16); +long16 __ovld __cnfn convert_long16_sat(int16); +long16 __ovld __cnfn convert_long16_rte(uint16); +long16 __ovld __cnfn convert_long16_sat_rte(uint16); +long16 __ovld __cnfn convert_long16_rtz(uint16); +long16 __ovld __cnfn convert_long16_sat_rtz(uint16); +long16 __ovld __cnfn convert_long16_rtp(uint16); +long16 __ovld __cnfn convert_long16_sat_rtp(uint16); +long16 __ovld __cnfn convert_long16_rtn(uint16); +long16 __ovld __cnfn convert_long16_sat_rtn(uint16); +long16 __ovld __cnfn convert_long16(uint16); +long16 __ovld __cnfn convert_long16_sat(uint16); +long16 __ovld __cnfn convert_long16_rte(long16); +long16 __ovld __cnfn convert_long16_sat_rte(long16); +long16 __ovld __cnfn convert_long16_rtz(long16); +long16 __ovld __cnfn convert_long16_sat_rtz(long16); +long16 __ovld __cnfn convert_long16_rtp(long16); +long16 __ovld __cnfn convert_long16_sat_rtp(long16); +long16 __ovld __cnfn convert_long16_rtn(long16); +long16 __ovld __cnfn convert_long16_sat_rtn(long16); +long16 __ovld __cnfn convert_long16(long16); +long16 __ovld __cnfn convert_long16_sat(long16); +long16 __ovld __cnfn convert_long16_rte(ulong16); +long16 __ovld __cnfn convert_long16_sat_rte(ulong16); +long16 __ovld __cnfn convert_long16_rtz(ulong16); +long16 __ovld __cnfn convert_long16_sat_rtz(ulong16); +long16 __ovld __cnfn convert_long16_rtp(ulong16); +long16 __ovld __cnfn convert_long16_sat_rtp(ulong16); +long16 __ovld __cnfn convert_long16_rtn(ulong16); +long16 __ovld __cnfn convert_long16_sat_rtn(ulong16); +long16 __ovld __cnfn convert_long16(ulong16); +long16 __ovld __cnfn convert_long16_sat(ulong16); +long16 __ovld __cnfn convert_long16_rte(float16); +long16 __ovld __cnfn convert_long16_sat_rte(float16); +long16 __ovld __cnfn convert_long16_rtz(float16); +long16 __ovld __cnfn convert_long16_sat_rtz(float16); +long16 __ovld __cnfn convert_long16_rtp(float16); +long16 __ovld __cnfn convert_long16_sat_rtp(float16); +long16 __ovld __cnfn convert_long16_rtn(float16); +long16 __ovld __cnfn convert_long16_sat_rtn(float16); +long16 __ovld __cnfn convert_long16(float16); +long16 __ovld __cnfn convert_long16_sat(float16); +ulong16 __ovld __cnfn convert_ulong16_rte(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16); +ulong16 __ovld __cnfn convert_ulong16_rtz(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16); +ulong16 __ovld __cnfn convert_ulong16_rtp(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16); +ulong16 __ovld __cnfn convert_ulong16_rtn(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16); +ulong16 __ovld __cnfn convert_ulong16(char16); +ulong16 __ovld __cnfn convert_ulong16_sat(char16); +ulong16 __ovld __cnfn convert_ulong16_rte(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16); +ulong16 __ovld __cnfn convert_ulong16(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rte(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16); +ulong16 __ovld __cnfn convert_ulong16_rtz(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16); +ulong16 __ovld __cnfn convert_ulong16_rtp(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16); +ulong16 __ovld __cnfn convert_ulong16_rtn(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16); +ulong16 __ovld __cnfn convert_ulong16(short16); +ulong16 __ovld __cnfn convert_ulong16_sat(short16); +ulong16 __ovld __cnfn convert_ulong16_rte(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16); +ulong16 __ovld __cnfn convert_ulong16(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rte(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16); +ulong16 __ovld __cnfn convert_ulong16_rtz(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16); +ulong16 __ovld __cnfn convert_ulong16_rtp(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16); +ulong16 __ovld __cnfn convert_ulong16_rtn(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16); +ulong16 __ovld __cnfn convert_ulong16(int16); +ulong16 __ovld __cnfn convert_ulong16_sat(int16); +ulong16 __ovld __cnfn convert_ulong16_rte(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16); +ulong16 __ovld __cnfn convert_ulong16_rtz(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16); +ulong16 __ovld __cnfn convert_ulong16_rtp(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16); +ulong16 __ovld __cnfn convert_ulong16_rtn(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16); +ulong16 __ovld __cnfn convert_ulong16(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat(uint16); +ulong16 __ovld __cnfn convert_ulong16_rte(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16); +ulong16 __ovld __cnfn convert_ulong16_rtz(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16); +ulong16 __ovld __cnfn convert_ulong16_rtp(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16); +ulong16 __ovld __cnfn convert_ulong16_rtn(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16); +ulong16 __ovld __cnfn convert_ulong16(long16); +ulong16 __ovld __cnfn convert_ulong16_sat(long16); +ulong16 __ovld __cnfn convert_ulong16_rte(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16); +ulong16 __ovld __cnfn convert_ulong16(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rte(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16); +ulong16 __ovld __cnfn convert_ulong16_rtz(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16); +ulong16 __ovld __cnfn convert_ulong16_rtp(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16); +ulong16 __ovld __cnfn convert_ulong16_rtn(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16); +ulong16 __ovld __cnfn convert_ulong16(float16); +ulong16 __ovld __cnfn convert_ulong16_sat(float16); +float16 __ovld __cnfn convert_float16_rte(char16); +float16 __ovld __cnfn convert_float16_rtz(char16); +float16 __ovld __cnfn convert_float16_rtp(char16); +float16 __ovld __cnfn convert_float16_rtn(char16); +float16 __ovld __cnfn convert_float16(char16); +float16 __ovld __cnfn convert_float16_rte(uchar16); +float16 __ovld __cnfn convert_float16_rtz(uchar16); +float16 __ovld __cnfn convert_float16_rtp(uchar16); +float16 __ovld __cnfn convert_float16_rtn(uchar16); +float16 __ovld __cnfn convert_float16(uchar16); +float16 __ovld __cnfn convert_float16_rte(short16); +float16 __ovld __cnfn convert_float16_rtz(short16); +float16 __ovld __cnfn convert_float16_rtp(short16); +float16 __ovld __cnfn convert_float16_rtn(short16); +float16 __ovld __cnfn convert_float16(short16); +float16 __ovld __cnfn convert_float16_rte(ushort16); +float16 __ovld __cnfn convert_float16_rtz(ushort16); +float16 __ovld __cnfn convert_float16_rtp(ushort16); +float16 __ovld __cnfn convert_float16_rtn(ushort16); +float16 __ovld __cnfn convert_float16(ushort16); +float16 __ovld __cnfn convert_float16_rte(int16); +float16 __ovld __cnfn convert_float16_rtz(int16); +float16 __ovld __cnfn convert_float16_rtp(int16); +float16 __ovld __cnfn convert_float16_rtn(int16); +float16 __ovld __cnfn convert_float16(int16); +float16 __ovld __cnfn convert_float16_rte(uint16); +float16 __ovld __cnfn convert_float16_rtz(uint16); +float16 __ovld __cnfn convert_float16_rtp(uint16); +float16 __ovld __cnfn convert_float16_rtn(uint16); +float16 __ovld __cnfn convert_float16(uint16); +float16 __ovld __cnfn convert_float16_rte(long16); +float16 __ovld __cnfn convert_float16_rtz(long16); +float16 __ovld __cnfn convert_float16_rtp(long16); +float16 __ovld __cnfn convert_float16_rtn(long16); +float16 __ovld __cnfn convert_float16(long16); +float16 __ovld __cnfn convert_float16_rte(ulong16); +float16 __ovld __cnfn convert_float16_rtz(ulong16); +float16 __ovld __cnfn convert_float16_rtp(ulong16); +float16 __ovld __cnfn convert_float16_rtn(ulong16); +float16 __ovld __cnfn convert_float16(ulong16); +float16 __ovld __cnfn convert_float16_rte(float16); +float16 __ovld __cnfn convert_float16_rtz(float16); +float16 __ovld __cnfn convert_float16_rtp(float16); +float16 __ovld __cnfn convert_float16_rtn(float16); +float16 __ovld __cnfn convert_float16(float16); + +// Conversions with double data type parameters or return value. + +#ifdef cl_khr_fp64 +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +char __ovld __cnfn convert_char(double); +char __ovld __cnfn convert_char_rte(double); +char __ovld __cnfn convert_char_rtn(double); +char __ovld __cnfn convert_char_rtp(double); +char __ovld __cnfn convert_char_rtz(double); +char __ovld __cnfn convert_char_sat(double); +char __ovld __cnfn convert_char_sat_rte(double); +char __ovld __cnfn convert_char_sat_rtn(double); +char __ovld __cnfn convert_char_sat_rtp(double); +char __ovld __cnfn convert_char_sat_rtz(double); +char2 __ovld __cnfn convert_char2(double2); +char2 __ovld __cnfn convert_char2_rte(double2); +char2 __ovld __cnfn convert_char2_rtn(double2); +char2 __ovld __cnfn convert_char2_rtp(double2); +char2 __ovld __cnfn convert_char2_rtz(double2); +char2 __ovld __cnfn convert_char2_sat(double2); +char2 __ovld __cnfn convert_char2_sat_rte(double2); +char2 __ovld __cnfn convert_char2_sat_rtn(double2); +char2 __ovld __cnfn convert_char2_sat_rtp(double2); +char2 __ovld __cnfn convert_char2_sat_rtz(double2); +char3 __ovld __cnfn convert_char3(double3); +char3 __ovld __cnfn convert_char3_rte(double3); +char3 __ovld __cnfn convert_char3_rtn(double3); +char3 __ovld __cnfn convert_char3_rtp(double3); +char3 __ovld __cnfn convert_char3_rtz(double3); +char3 __ovld __cnfn convert_char3_sat(double3); +char3 __ovld __cnfn convert_char3_sat_rte(double3); +char3 __ovld __cnfn convert_char3_sat_rtn(double3); +char3 __ovld __cnfn convert_char3_sat_rtp(double3); +char3 __ovld __cnfn convert_char3_sat_rtz(double3); +char4 __ovld __cnfn convert_char4(double4); +char4 __ovld __cnfn convert_char4_rte(double4); +char4 __ovld __cnfn convert_char4_rtn(double4); +char4 __ovld __cnfn convert_char4_rtp(double4); +char4 __ovld __cnfn convert_char4_rtz(double4); +char4 __ovld __cnfn convert_char4_sat(double4); +char4 __ovld __cnfn convert_char4_sat_rte(double4); +char4 __ovld __cnfn convert_char4_sat_rtn(double4); +char4 __ovld __cnfn convert_char4_sat_rtp(double4); +char4 __ovld __cnfn convert_char4_sat_rtz(double4); +char8 __ovld __cnfn convert_char8(double8); +char8 __ovld __cnfn convert_char8_rte(double8); +char8 __ovld __cnfn convert_char8_rtn(double8); +char8 __ovld __cnfn convert_char8_rtp(double8); +char8 __ovld __cnfn convert_char8_rtz(double8); +char8 __ovld __cnfn convert_char8_sat(double8); +char8 __ovld __cnfn convert_char8_sat_rte(double8); +char8 __ovld __cnfn convert_char8_sat_rtn(double8); +char8 __ovld __cnfn convert_char8_sat_rtp(double8); +char8 __ovld __cnfn convert_char8_sat_rtz(double8); +char16 __ovld __cnfn convert_char16(double16); +char16 __ovld __cnfn convert_char16_rte(double16); +char16 __ovld __cnfn convert_char16_rtn(double16); +char16 __ovld __cnfn convert_char16_rtp(double16); +char16 __ovld __cnfn convert_char16_rtz(double16); +char16 __ovld __cnfn convert_char16_sat(double16); +char16 __ovld __cnfn convert_char16_sat_rte(double16); +char16 __ovld __cnfn convert_char16_sat_rtn(double16); +char16 __ovld __cnfn convert_char16_sat_rtp(double16); +char16 __ovld __cnfn convert_char16_sat_rtz(double16); + +uchar __ovld __cnfn convert_uchar(double); +uchar __ovld __cnfn convert_uchar_rte(double); +uchar __ovld __cnfn convert_uchar_rtn(double); +uchar __ovld __cnfn convert_uchar_rtp(double); +uchar __ovld __cnfn convert_uchar_rtz(double); +uchar __ovld __cnfn convert_uchar_sat(double); +uchar __ovld __cnfn convert_uchar_sat_rte(double); +uchar __ovld __cnfn convert_uchar_sat_rtn(double); +uchar __ovld __cnfn convert_uchar_sat_rtp(double); +uchar __ovld __cnfn convert_uchar_sat_rtz(double); +uchar2 __ovld __cnfn convert_uchar2(double2); +uchar2 __ovld __cnfn convert_uchar2_rte(double2); +uchar2 __ovld __cnfn convert_uchar2_rtn(double2); +uchar2 __ovld __cnfn convert_uchar2_rtp(double2); +uchar2 __ovld __cnfn convert_uchar2_rtz(double2); +uchar2 __ovld __cnfn convert_uchar2_sat(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2); +uchar3 __ovld __cnfn convert_uchar3(double3); +uchar3 __ovld __cnfn convert_uchar3_rte(double3); +uchar3 __ovld __cnfn convert_uchar3_rtn(double3); +uchar3 __ovld __cnfn convert_uchar3_rtp(double3); +uchar3 __ovld __cnfn convert_uchar3_rtz(double3); +uchar3 __ovld __cnfn convert_uchar3_sat(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3); +uchar4 __ovld __cnfn convert_uchar4(double4); +uchar4 __ovld __cnfn convert_uchar4_rte(double4); +uchar4 __ovld __cnfn convert_uchar4_rtn(double4); +uchar4 __ovld __cnfn convert_uchar4_rtp(double4); +uchar4 __ovld __cnfn convert_uchar4_rtz(double4); +uchar4 __ovld __cnfn convert_uchar4_sat(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4); +uchar8 __ovld __cnfn convert_uchar8(double8); +uchar8 __ovld __cnfn convert_uchar8_rte(double8); +uchar8 __ovld __cnfn convert_uchar8_rtn(double8); +uchar8 __ovld __cnfn convert_uchar8_rtp(double8); +uchar8 __ovld __cnfn convert_uchar8_rtz(double8); +uchar8 __ovld __cnfn convert_uchar8_sat(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8); +uchar16 __ovld __cnfn convert_uchar16(double16); +uchar16 __ovld __cnfn convert_uchar16_rte(double16); +uchar16 __ovld __cnfn convert_uchar16_rtn(double16); +uchar16 __ovld __cnfn convert_uchar16_rtp(double16); +uchar16 __ovld __cnfn convert_uchar16_rtz(double16); +uchar16 __ovld __cnfn convert_uchar16_sat(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16); + +short __ovld __cnfn convert_short(double); +short __ovld __cnfn convert_short_rte(double); +short __ovld __cnfn convert_short_rtn(double); +short __ovld __cnfn convert_short_rtp(double); +short __ovld __cnfn convert_short_rtz(double); +short __ovld __cnfn convert_short_sat(double); +short __ovld __cnfn convert_short_sat_rte(double); +short __ovld __cnfn convert_short_sat_rtn(double); +short __ovld __cnfn convert_short_sat_rtp(double); +short __ovld __cnfn convert_short_sat_rtz(double); +short2 __ovld __cnfn convert_short2(double2); +short2 __ovld __cnfn convert_short2_rte(double2); +short2 __ovld __cnfn convert_short2_rtn(double2); +short2 __ovld __cnfn convert_short2_rtp(double2); +short2 __ovld __cnfn convert_short2_rtz(double2); +short2 __ovld __cnfn convert_short2_sat(double2); +short2 __ovld __cnfn convert_short2_sat_rte(double2); +short2 __ovld __cnfn convert_short2_sat_rtn(double2); +short2 __ovld __cnfn convert_short2_sat_rtp(double2); +short2 __ovld __cnfn convert_short2_sat_rtz(double2); +short3 __ovld __cnfn convert_short3(double3); +short3 __ovld __cnfn convert_short3_rte(double3); +short3 __ovld __cnfn convert_short3_rtn(double3); +short3 __ovld __cnfn convert_short3_rtp(double3); +short3 __ovld __cnfn convert_short3_rtz(double3); +short3 __ovld __cnfn convert_short3_sat(double3); +short3 __ovld __cnfn convert_short3_sat_rte(double3); +short3 __ovld __cnfn convert_short3_sat_rtn(double3); +short3 __ovld __cnfn convert_short3_sat_rtp(double3); +short3 __ovld __cnfn convert_short3_sat_rtz(double3); +short4 __ovld __cnfn convert_short4(double4); +short4 __ovld __cnfn convert_short4_rte(double4); +short4 __ovld __cnfn convert_short4_rtn(double4); +short4 __ovld __cnfn convert_short4_rtp(double4); +short4 __ovld __cnfn convert_short4_rtz(double4); +short4 __ovld __cnfn convert_short4_sat(double4); +short4 __ovld __cnfn convert_short4_sat_rte(double4); +short4 __ovld __cnfn convert_short4_sat_rtn(double4); +short4 __ovld __cnfn convert_short4_sat_rtp(double4); +short4 __ovld __cnfn convert_short4_sat_rtz(double4); +short8 __ovld __cnfn convert_short8(double8); +short8 __ovld __cnfn convert_short8_rte(double8); +short8 __ovld __cnfn convert_short8_rtn(double8); +short8 __ovld __cnfn convert_short8_rtp(double8); +short8 __ovld __cnfn convert_short8_rtz(double8); +short8 __ovld __cnfn convert_short8_sat(double8); +short8 __ovld __cnfn convert_short8_sat_rte(double8); +short8 __ovld __cnfn convert_short8_sat_rtn(double8); +short8 __ovld __cnfn convert_short8_sat_rtp(double8); +short8 __ovld __cnfn convert_short8_sat_rtz(double8); +short16 __ovld __cnfn convert_short16(double16); +short16 __ovld __cnfn convert_short16_rte(double16); +short16 __ovld __cnfn convert_short16_rtn(double16); +short16 __ovld __cnfn convert_short16_rtp(double16); +short16 __ovld __cnfn convert_short16_rtz(double16); +short16 __ovld __cnfn convert_short16_sat(double16); +short16 __ovld __cnfn convert_short16_sat_rte(double16); +short16 __ovld __cnfn convert_short16_sat_rtn(double16); +short16 __ovld __cnfn convert_short16_sat_rtp(double16); +short16 __ovld __cnfn convert_short16_sat_rtz(double16); + +ushort __ovld __cnfn convert_ushort(double); +ushort __ovld __cnfn convert_ushort_rte(double); +ushort __ovld __cnfn convert_ushort_rtn(double); +ushort __ovld __cnfn convert_ushort_rtp(double); +ushort __ovld __cnfn convert_ushort_rtz(double); +ushort __ovld __cnfn convert_ushort_sat(double); +ushort __ovld __cnfn convert_ushort_sat_rte(double); +ushort __ovld __cnfn convert_ushort_sat_rtn(double); +ushort __ovld __cnfn convert_ushort_sat_rtp(double); +ushort __ovld __cnfn convert_ushort_sat_rtz(double); +ushort2 __ovld __cnfn convert_ushort2(double2); +ushort2 __ovld __cnfn convert_ushort2_rte(double2); +ushort2 __ovld __cnfn convert_ushort2_rtn(double2); +ushort2 __ovld __cnfn convert_ushort2_rtp(double2); +ushort2 __ovld __cnfn convert_ushort2_rtz(double2); +ushort2 __ovld __cnfn convert_ushort2_sat(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2); +ushort3 __ovld __cnfn convert_ushort3(double3); +ushort3 __ovld __cnfn convert_ushort3_rte(double3); +ushort3 __ovld __cnfn convert_ushort3_rtn(double3); +ushort3 __ovld __cnfn convert_ushort3_rtp(double3); +ushort3 __ovld __cnfn convert_ushort3_rtz(double3); +ushort3 __ovld __cnfn convert_ushort3_sat(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3); +ushort4 __ovld __cnfn convert_ushort4(double4); +ushort4 __ovld __cnfn convert_ushort4_rte(double4); +ushort4 __ovld __cnfn convert_ushort4_rtn(double4); +ushort4 __ovld __cnfn convert_ushort4_rtp(double4); +ushort4 __ovld __cnfn convert_ushort4_rtz(double4); +ushort4 __ovld __cnfn convert_ushort4_sat(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4); +ushort8 __ovld __cnfn convert_ushort8(double8); +ushort8 __ovld __cnfn convert_ushort8_rte(double8); +ushort8 __ovld __cnfn convert_ushort8_rtn(double8); +ushort8 __ovld __cnfn convert_ushort8_rtp(double8); +ushort8 __ovld __cnfn convert_ushort8_rtz(double8); +ushort8 __ovld __cnfn convert_ushort8_sat(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8); +ushort16 __ovld __cnfn convert_ushort16(double16); +ushort16 __ovld __cnfn convert_ushort16_rte(double16); +ushort16 __ovld __cnfn convert_ushort16_rtn(double16); +ushort16 __ovld __cnfn convert_ushort16_rtp(double16); +ushort16 __ovld __cnfn convert_ushort16_rtz(double16); +ushort16 __ovld __cnfn convert_ushort16_sat(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16); + +int __ovld __cnfn convert_int(double); +int __ovld __cnfn convert_int_rte(double); +int __ovld __cnfn convert_int_rtn(double); +int __ovld __cnfn convert_int_rtp(double); +int __ovld __cnfn convert_int_rtz(double); +int __ovld __cnfn convert_int_sat(double); +int __ovld __cnfn convert_int_sat_rte(double); +int __ovld __cnfn convert_int_sat_rtn(double); +int __ovld __cnfn convert_int_sat_rtp(double); +int __ovld __cnfn convert_int_sat_rtz(double); +int2 __ovld __cnfn convert_int2(double2); +int2 __ovld __cnfn convert_int2_rte(double2); +int2 __ovld __cnfn convert_int2_rtn(double2); +int2 __ovld __cnfn convert_int2_rtp(double2); +int2 __ovld __cnfn convert_int2_rtz(double2); +int2 __ovld __cnfn convert_int2_sat(double2); +int2 __ovld __cnfn convert_int2_sat_rte(double2); +int2 __ovld __cnfn convert_int2_sat_rtn(double2); +int2 __ovld __cnfn convert_int2_sat_rtp(double2); +int2 __ovld __cnfn convert_int2_sat_rtz(double2); +int3 __ovld __cnfn convert_int3(double3); +int3 __ovld __cnfn convert_int3_rte(double3); +int3 __ovld __cnfn convert_int3_rtn(double3); +int3 __ovld __cnfn convert_int3_rtp(double3); +int3 __ovld __cnfn convert_int3_rtz(double3); +int3 __ovld __cnfn convert_int3_sat(double3); +int3 __ovld __cnfn convert_int3_sat_rte(double3); +int3 __ovld __cnfn convert_int3_sat_rtn(double3); +int3 __ovld __cnfn convert_int3_sat_rtp(double3); +int3 __ovld __cnfn convert_int3_sat_rtz(double3); +int4 __ovld __cnfn convert_int4(double4); +int4 __ovld __cnfn convert_int4_rte(double4); +int4 __ovld __cnfn convert_int4_rtn(double4); +int4 __ovld __cnfn convert_int4_rtp(double4); +int4 __ovld __cnfn convert_int4_rtz(double4); +int4 __ovld __cnfn convert_int4_sat(double4); +int4 __ovld __cnfn convert_int4_sat_rte(double4); +int4 __ovld __cnfn convert_int4_sat_rtn(double4); +int4 __ovld __cnfn convert_int4_sat_rtp(double4); +int4 __ovld __cnfn convert_int4_sat_rtz(double4); +int8 __ovld __cnfn convert_int8(double8); +int8 __ovld __cnfn convert_int8_rte(double8); +int8 __ovld __cnfn convert_int8_rtn(double8); +int8 __ovld __cnfn convert_int8_rtp(double8); +int8 __ovld __cnfn convert_int8_rtz(double8); +int8 __ovld __cnfn convert_int8_sat(double8); +int8 __ovld __cnfn convert_int8_sat_rte(double8); +int8 __ovld __cnfn convert_int8_sat_rtn(double8); +int8 __ovld __cnfn convert_int8_sat_rtp(double8); +int8 __ovld __cnfn convert_int8_sat_rtz(double8); +int16 __ovld __cnfn convert_int16(double16); +int16 __ovld __cnfn convert_int16_rte(double16); +int16 __ovld __cnfn convert_int16_rtn(double16); +int16 __ovld __cnfn convert_int16_rtp(double16); +int16 __ovld __cnfn convert_int16_rtz(double16); +int16 __ovld __cnfn convert_int16_sat(double16); +int16 __ovld __cnfn convert_int16_sat_rte(double16); +int16 __ovld __cnfn convert_int16_sat_rtn(double16); +int16 __ovld __cnfn convert_int16_sat_rtp(double16); +int16 __ovld __cnfn convert_int16_sat_rtz(double16); + +uint __ovld __cnfn convert_uint(double); +uint __ovld __cnfn convert_uint_rte(double); +uint __ovld __cnfn convert_uint_rtn(double); +uint __ovld __cnfn convert_uint_rtp(double); +uint __ovld __cnfn convert_uint_rtz(double); +uint __ovld __cnfn convert_uint_sat(double); +uint __ovld __cnfn convert_uint_sat_rte(double); +uint __ovld __cnfn convert_uint_sat_rtn(double); +uint __ovld __cnfn convert_uint_sat_rtp(double); +uint __ovld __cnfn convert_uint_sat_rtz(double); +uint2 __ovld __cnfn convert_uint2(double2); +uint2 __ovld __cnfn convert_uint2_rte(double2); +uint2 __ovld __cnfn convert_uint2_rtn(double2); +uint2 __ovld __cnfn convert_uint2_rtp(double2); +uint2 __ovld __cnfn convert_uint2_rtz(double2); +uint2 __ovld __cnfn convert_uint2_sat(double2); +uint2 __ovld __cnfn convert_uint2_sat_rte(double2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(double2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(double2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(double2); +uint3 __ovld __cnfn convert_uint3(double3); +uint3 __ovld __cnfn convert_uint3_rte(double3); +uint3 __ovld __cnfn convert_uint3_rtn(double3); +uint3 __ovld __cnfn convert_uint3_rtp(double3); +uint3 __ovld __cnfn convert_uint3_rtz(double3); +uint3 __ovld __cnfn convert_uint3_sat(double3); +uint3 __ovld __cnfn convert_uint3_sat_rte(double3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(double3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(double3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(double3); +uint4 __ovld __cnfn convert_uint4(double4); +uint4 __ovld __cnfn convert_uint4_rte(double4); +uint4 __ovld __cnfn convert_uint4_rtn(double4); +uint4 __ovld __cnfn convert_uint4_rtp(double4); +uint4 __ovld __cnfn convert_uint4_rtz(double4); +uint4 __ovld __cnfn convert_uint4_sat(double4); +uint4 __ovld __cnfn convert_uint4_sat_rte(double4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(double4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(double4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(double4); +uint8 __ovld __cnfn convert_uint8(double8); +uint8 __ovld __cnfn convert_uint8_rte(double8); +uint8 __ovld __cnfn convert_uint8_rtn(double8); +uint8 __ovld __cnfn convert_uint8_rtp(double8); +uint8 __ovld __cnfn convert_uint8_rtz(double8); +uint8 __ovld __cnfn convert_uint8_sat(double8); +uint8 __ovld __cnfn convert_uint8_sat_rte(double8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(double8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(double8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(double8); +uint16 __ovld __cnfn convert_uint16(double16); +uint16 __ovld __cnfn convert_uint16_rte(double16); +uint16 __ovld __cnfn convert_uint16_rtn(double16); +uint16 __ovld __cnfn convert_uint16_rtp(double16); +uint16 __ovld __cnfn convert_uint16_rtz(double16); +uint16 __ovld __cnfn convert_uint16_sat(double16); +uint16 __ovld __cnfn convert_uint16_sat_rte(double16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(double16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(double16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(double16); + +long __ovld __cnfn convert_long(double); +long __ovld __cnfn convert_long_rte(double); +long __ovld __cnfn convert_long_rtn(double); +long __ovld __cnfn convert_long_rtp(double); +long __ovld __cnfn convert_long_rtz(double); +long __ovld __cnfn convert_long_sat(double); +long __ovld __cnfn convert_long_sat_rte(double); +long __ovld __cnfn convert_long_sat_rtn(double); +long __ovld __cnfn convert_long_sat_rtp(double); +long __ovld __cnfn convert_long_sat_rtz(double); +long2 __ovld __cnfn convert_long2(double2); +long2 __ovld __cnfn convert_long2_rte(double2); +long2 __ovld __cnfn convert_long2_rtn(double2); +long2 __ovld __cnfn convert_long2_rtp(double2); +long2 __ovld __cnfn convert_long2_rtz(double2); +long2 __ovld __cnfn convert_long2_sat(double2); +long2 __ovld __cnfn convert_long2_sat_rte(double2); +long2 __ovld __cnfn convert_long2_sat_rtn(double2); +long2 __ovld __cnfn convert_long2_sat_rtp(double2); +long2 __ovld __cnfn convert_long2_sat_rtz(double2); +long3 __ovld __cnfn convert_long3(double3); +long3 __ovld __cnfn convert_long3_rte(double3); +long3 __ovld __cnfn convert_long3_rtn(double3); +long3 __ovld __cnfn convert_long3_rtp(double3); +long3 __ovld __cnfn convert_long3_rtz(double3); +long3 __ovld __cnfn convert_long3_sat(double3); +long3 __ovld __cnfn convert_long3_sat_rte(double3); +long3 __ovld __cnfn convert_long3_sat_rtn(double3); +long3 __ovld __cnfn convert_long3_sat_rtp(double3); +long3 __ovld __cnfn convert_long3_sat_rtz(double3); +long4 __ovld __cnfn convert_long4(double4); +long4 __ovld __cnfn convert_long4_rte(double4); +long4 __ovld __cnfn convert_long4_rtn(double4); +long4 __ovld __cnfn convert_long4_rtp(double4); +long4 __ovld __cnfn convert_long4_rtz(double4); +long4 __ovld __cnfn convert_long4_sat(double4); +long4 __ovld __cnfn convert_long4_sat_rte(double4); +long4 __ovld __cnfn convert_long4_sat_rtn(double4); +long4 __ovld __cnfn convert_long4_sat_rtp(double4); +long4 __ovld __cnfn convert_long4_sat_rtz(double4); +long8 __ovld __cnfn convert_long8(double8); +long8 __ovld __cnfn convert_long8_rte(double8); +long8 __ovld __cnfn convert_long8_rtn(double8); +long8 __ovld __cnfn convert_long8_rtp(double8); +long8 __ovld __cnfn convert_long8_rtz(double8); +long8 __ovld __cnfn convert_long8_sat(double8); +long8 __ovld __cnfn convert_long8_sat_rte(double8); +long8 __ovld __cnfn convert_long8_sat_rtn(double8); +long8 __ovld __cnfn convert_long8_sat_rtp(double8); +long8 __ovld __cnfn convert_long8_sat_rtz(double8); +long16 __ovld __cnfn convert_long16(double16); +long16 __ovld __cnfn convert_long16_rte(double16); +long16 __ovld __cnfn convert_long16_rtn(double16); +long16 __ovld __cnfn convert_long16_rtp(double16); +long16 __ovld __cnfn convert_long16_rtz(double16); +long16 __ovld __cnfn convert_long16_sat(double16); +long16 __ovld __cnfn convert_long16_sat_rte(double16); +long16 __ovld __cnfn convert_long16_sat_rtn(double16); +long16 __ovld __cnfn convert_long16_sat_rtp(double16); +long16 __ovld __cnfn convert_long16_sat_rtz(double16); + +ulong __ovld __cnfn convert_ulong(double); +ulong __ovld __cnfn convert_ulong_rte(double); +ulong __ovld __cnfn convert_ulong_rtn(double); +ulong __ovld __cnfn convert_ulong_rtp(double); +ulong __ovld __cnfn convert_ulong_rtz(double); +ulong __ovld __cnfn convert_ulong_sat(double); +ulong __ovld __cnfn convert_ulong_sat_rte(double); +ulong __ovld __cnfn convert_ulong_sat_rtn(double); +ulong __ovld __cnfn convert_ulong_sat_rtp(double); +ulong __ovld __cnfn convert_ulong_sat_rtz(double); +ulong2 __ovld __cnfn convert_ulong2(double2); +ulong2 __ovld __cnfn convert_ulong2_rte(double2); +ulong2 __ovld __cnfn convert_ulong2_rtn(double2); +ulong2 __ovld __cnfn convert_ulong2_rtp(double2); +ulong2 __ovld __cnfn convert_ulong2_rtz(double2); +ulong2 __ovld __cnfn convert_ulong2_sat(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2); +ulong3 __ovld __cnfn convert_ulong3(double3); +ulong3 __ovld __cnfn convert_ulong3_rte(double3); +ulong3 __ovld __cnfn convert_ulong3_rtn(double3); +ulong3 __ovld __cnfn convert_ulong3_rtp(double3); +ulong3 __ovld __cnfn convert_ulong3_rtz(double3); +ulong3 __ovld __cnfn convert_ulong3_sat(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3); +ulong4 __ovld __cnfn convert_ulong4(double4); +ulong4 __ovld __cnfn convert_ulong4_rte(double4); +ulong4 __ovld __cnfn convert_ulong4_rtn(double4); +ulong4 __ovld __cnfn convert_ulong4_rtp(double4); +ulong4 __ovld __cnfn convert_ulong4_rtz(double4); +ulong4 __ovld __cnfn convert_ulong4_sat(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4); +ulong8 __ovld __cnfn convert_ulong8(double8); +ulong8 __ovld __cnfn convert_ulong8_rte(double8); +ulong8 __ovld __cnfn convert_ulong8_rtn(double8); +ulong8 __ovld __cnfn convert_ulong8_rtp(double8); +ulong8 __ovld __cnfn convert_ulong8_rtz(double8); +ulong8 __ovld __cnfn convert_ulong8_sat(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8); +ulong16 __ovld __cnfn convert_ulong16(double16); +ulong16 __ovld __cnfn convert_ulong16_rte(double16); +ulong16 __ovld __cnfn convert_ulong16_rtn(double16); +ulong16 __ovld __cnfn convert_ulong16_rtp(double16); +ulong16 __ovld __cnfn convert_ulong16_rtz(double16); +ulong16 __ovld __cnfn convert_ulong16_sat(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16); + +float __ovld __cnfn convert_float(double); +float __ovld __cnfn convert_float_rte(double); +float __ovld __cnfn convert_float_rtn(double); +float __ovld __cnfn convert_float_rtp(double); +float __ovld __cnfn convert_float_rtz(double); +float2 __ovld __cnfn convert_float2(double2); +float2 __ovld __cnfn convert_float2_rte(double2); +float2 __ovld __cnfn convert_float2_rtn(double2); +float2 __ovld __cnfn convert_float2_rtp(double2); +float2 __ovld __cnfn convert_float2_rtz(double2); +float3 __ovld __cnfn convert_float3(double3); +float3 __ovld __cnfn convert_float3_rte(double3); +float3 __ovld __cnfn convert_float3_rtn(double3); +float3 __ovld __cnfn convert_float3_rtp(double3); +float3 __ovld __cnfn convert_float3_rtz(double3); +float4 __ovld __cnfn convert_float4(double4); +float4 __ovld __cnfn convert_float4_rte(double4); +float4 __ovld __cnfn convert_float4_rtn(double4); +float4 __ovld __cnfn convert_float4_rtp(double4); +float4 __ovld __cnfn convert_float4_rtz(double4); +float8 __ovld __cnfn convert_float8(double8); +float8 __ovld __cnfn convert_float8_rte(double8); +float8 __ovld __cnfn convert_float8_rtn(double8); +float8 __ovld __cnfn convert_float8_rtp(double8); +float8 __ovld __cnfn convert_float8_rtz(double8); +float16 __ovld __cnfn convert_float16(double16); +float16 __ovld __cnfn convert_float16_rte(double16); +float16 __ovld __cnfn convert_float16_rtn(double16); +float16 __ovld __cnfn convert_float16_rtp(double16); +float16 __ovld __cnfn convert_float16_rtz(double16); + +double __ovld __cnfn convert_double(char); +double __ovld __cnfn convert_double(double); +double __ovld __cnfn convert_double(float); +double __ovld __cnfn convert_double(int); +double __ovld __cnfn convert_double(long); +double __ovld __cnfn convert_double(short); +double __ovld __cnfn convert_double(uchar); +double __ovld __cnfn convert_double(uint); +double __ovld __cnfn convert_double(ulong); +double __ovld __cnfn convert_double(ushort); +double __ovld __cnfn convert_double_rte(char); +double __ovld __cnfn convert_double_rte(double); +double __ovld __cnfn convert_double_rte(float); +double __ovld __cnfn convert_double_rte(int); +double __ovld __cnfn convert_double_rte(long); +double __ovld __cnfn convert_double_rte(short); +double __ovld __cnfn convert_double_rte(uchar); +double __ovld __cnfn convert_double_rte(uint); +double __ovld __cnfn convert_double_rte(ulong); +double __ovld __cnfn convert_double_rte(ushort); +double __ovld __cnfn convert_double_rtn(char); +double __ovld __cnfn convert_double_rtn(double); +double __ovld __cnfn convert_double_rtn(float); +double __ovld __cnfn convert_double_rtn(int); +double __ovld __cnfn convert_double_rtn(long); +double __ovld __cnfn convert_double_rtn(short); +double __ovld __cnfn convert_double_rtn(uchar); +double __ovld __cnfn convert_double_rtn(uint); +double __ovld __cnfn convert_double_rtn(ulong); +double __ovld __cnfn convert_double_rtn(ushort); +double __ovld __cnfn convert_double_rtp(char); +double __ovld __cnfn convert_double_rtp(double); +double __ovld __cnfn convert_double_rtp(float); +double __ovld __cnfn convert_double_rtp(int); +double __ovld __cnfn convert_double_rtp(long); +double __ovld __cnfn convert_double_rtp(short); +double __ovld __cnfn convert_double_rtp(uchar); +double __ovld __cnfn convert_double_rtp(uint); +double __ovld __cnfn convert_double_rtp(ulong); +double __ovld __cnfn convert_double_rtp(ushort); +double __ovld __cnfn convert_double_rtz(char); +double __ovld __cnfn convert_double_rtz(double); +double __ovld __cnfn convert_double_rtz(float); +double __ovld __cnfn convert_double_rtz(int); +double __ovld __cnfn convert_double_rtz(long); +double __ovld __cnfn convert_double_rtz(short); +double __ovld __cnfn convert_double_rtz(uchar); +double __ovld __cnfn convert_double_rtz(uint); +double __ovld __cnfn convert_double_rtz(ulong); +double __ovld __cnfn convert_double_rtz(ushort); +double2 __ovld __cnfn convert_double2(char2); +double2 __ovld __cnfn convert_double2(double2); +double2 __ovld __cnfn convert_double2(float2); +double2 __ovld __cnfn convert_double2(int2); +double2 __ovld __cnfn convert_double2(long2); +double2 __ovld __cnfn convert_double2(short2); +double2 __ovld __cnfn convert_double2(uchar2); +double2 __ovld __cnfn convert_double2(uint2); +double2 __ovld __cnfn convert_double2(ulong2); +double2 __ovld __cnfn convert_double2(ushort2); +double2 __ovld __cnfn convert_double2_rte(char2); +double2 __ovld __cnfn convert_double2_rte(double2); +double2 __ovld __cnfn convert_double2_rte(float2); +double2 __ovld __cnfn convert_double2_rte(int2); +double2 __ovld __cnfn convert_double2_rte(long2); +double2 __ovld __cnfn convert_double2_rte(short2); +double2 __ovld __cnfn convert_double2_rte(uchar2); +double2 __ovld __cnfn convert_double2_rte(uint2); +double2 __ovld __cnfn convert_double2_rte(ulong2); +double2 __ovld __cnfn convert_double2_rte(ushort2); +double2 __ovld __cnfn convert_double2_rtn(char2); +double2 __ovld __cnfn convert_double2_rtn(double2); +double2 __ovld __cnfn convert_double2_rtn(float2); +double2 __ovld __cnfn convert_double2_rtn(int2); +double2 __ovld __cnfn convert_double2_rtn(long2); +double2 __ovld __cnfn convert_double2_rtn(short2); +double2 __ovld __cnfn convert_double2_rtn(uchar2); +double2 __ovld __cnfn convert_double2_rtn(uint2); +double2 __ovld __cnfn convert_double2_rtn(ulong2); +double2 __ovld __cnfn convert_double2_rtn(ushort2); +double2 __ovld __cnfn convert_double2_rtp(char2); +double2 __ovld __cnfn convert_double2_rtp(double2); +double2 __ovld __cnfn convert_double2_rtp(float2); +double2 __ovld __cnfn convert_double2_rtp(int2); +double2 __ovld __cnfn convert_double2_rtp(long2); +double2 __ovld __cnfn convert_double2_rtp(short2); +double2 __ovld __cnfn convert_double2_rtp(uchar2); +double2 __ovld __cnfn convert_double2_rtp(uint2); +double2 __ovld __cnfn convert_double2_rtp(ulong2); +double2 __ovld __cnfn convert_double2_rtp(ushort2); +double2 __ovld __cnfn convert_double2_rtz(char2); +double2 __ovld __cnfn convert_double2_rtz(double2); +double2 __ovld __cnfn convert_double2_rtz(float2); +double2 __ovld __cnfn convert_double2_rtz(int2); +double2 __ovld __cnfn convert_double2_rtz(long2); +double2 __ovld __cnfn convert_double2_rtz(short2); +double2 __ovld __cnfn convert_double2_rtz(uchar2); +double2 __ovld __cnfn convert_double2_rtz(uint2); +double2 __ovld __cnfn convert_double2_rtz(ulong2); +double2 __ovld __cnfn convert_double2_rtz(ushort2); +double3 __ovld __cnfn convert_double3(char3); +double3 __ovld __cnfn convert_double3(double3); +double3 __ovld __cnfn convert_double3(float3); +double3 __ovld __cnfn convert_double3(int3); +double3 __ovld __cnfn convert_double3(long3); +double3 __ovld __cnfn convert_double3(short3); +double3 __ovld __cnfn convert_double3(uchar3); +double3 __ovld __cnfn convert_double3(uint3); +double3 __ovld __cnfn convert_double3(ulong3); +double3 __ovld __cnfn convert_double3(ushort3); +double3 __ovld __cnfn convert_double3_rte(char3); +double3 __ovld __cnfn convert_double3_rte(double3); +double3 __ovld __cnfn convert_double3_rte(float3); +double3 __ovld __cnfn convert_double3_rte(int3); +double3 __ovld __cnfn convert_double3_rte(long3); +double3 __ovld __cnfn convert_double3_rte(short3); +double3 __ovld __cnfn convert_double3_rte(uchar3); +double3 __ovld __cnfn convert_double3_rte(uint3); +double3 __ovld __cnfn convert_double3_rte(ulong3); +double3 __ovld __cnfn convert_double3_rte(ushort3); +double3 __ovld __cnfn convert_double3_rtn(char3); +double3 __ovld __cnfn convert_double3_rtn(double3); +double3 __ovld __cnfn convert_double3_rtn(float3); +double3 __ovld __cnfn convert_double3_rtn(int3); +double3 __ovld __cnfn convert_double3_rtn(long3); +double3 __ovld __cnfn convert_double3_rtn(short3); +double3 __ovld __cnfn convert_double3_rtn(uchar3); +double3 __ovld __cnfn convert_double3_rtn(uint3); +double3 __ovld __cnfn convert_double3_rtn(ulong3); +double3 __ovld __cnfn convert_double3_rtn(ushort3); +double3 __ovld __cnfn convert_double3_rtp(char3); +double3 __ovld __cnfn convert_double3_rtp(double3); +double3 __ovld __cnfn convert_double3_rtp(float3); +double3 __ovld __cnfn convert_double3_rtp(int3); +double3 __ovld __cnfn convert_double3_rtp(long3); +double3 __ovld __cnfn convert_double3_rtp(short3); +double3 __ovld __cnfn convert_double3_rtp(uchar3); +double3 __ovld __cnfn convert_double3_rtp(uint3); +double3 __ovld __cnfn convert_double3_rtp(ulong3); +double3 __ovld __cnfn convert_double3_rtp(ushort3); +double3 __ovld __cnfn convert_double3_rtz(char3); +double3 __ovld __cnfn convert_double3_rtz(double3); +double3 __ovld __cnfn convert_double3_rtz(float3); +double3 __ovld __cnfn convert_double3_rtz(int3); +double3 __ovld __cnfn convert_double3_rtz(long3); +double3 __ovld __cnfn convert_double3_rtz(short3); +double3 __ovld __cnfn convert_double3_rtz(uchar3); +double3 __ovld __cnfn convert_double3_rtz(uint3); +double3 __ovld __cnfn convert_double3_rtz(ulong3); +double3 __ovld __cnfn convert_double3_rtz(ushort3); +double4 __ovld __cnfn convert_double4(char4); +double4 __ovld __cnfn convert_double4(double4); +double4 __ovld __cnfn convert_double4(float4); +double4 __ovld __cnfn convert_double4(int4); +double4 __ovld __cnfn convert_double4(long4); +double4 __ovld __cnfn convert_double4(short4); +double4 __ovld __cnfn convert_double4(uchar4); +double4 __ovld __cnfn convert_double4(uint4); +double4 __ovld __cnfn convert_double4(ulong4); +double4 __ovld __cnfn convert_double4(ushort4); +double4 __ovld __cnfn convert_double4_rte(char4); +double4 __ovld __cnfn convert_double4_rte(double4); +double4 __ovld __cnfn convert_double4_rte(float4); +double4 __ovld __cnfn convert_double4_rte(int4); +double4 __ovld __cnfn convert_double4_rte(long4); +double4 __ovld __cnfn convert_double4_rte(short4); +double4 __ovld __cnfn convert_double4_rte(uchar4); +double4 __ovld __cnfn convert_double4_rte(uint4); +double4 __ovld __cnfn convert_double4_rte(ulong4); +double4 __ovld __cnfn convert_double4_rte(ushort4); +double4 __ovld __cnfn convert_double4_rtn(char4); +double4 __ovld __cnfn convert_double4_rtn(double4); +double4 __ovld __cnfn convert_double4_rtn(float4); +double4 __ovld __cnfn convert_double4_rtn(int4); +double4 __ovld __cnfn convert_double4_rtn(long4); +double4 __ovld __cnfn convert_double4_rtn(short4); +double4 __ovld __cnfn convert_double4_rtn(uchar4); +double4 __ovld __cnfn convert_double4_rtn(uint4); +double4 __ovld __cnfn convert_double4_rtn(ulong4); +double4 __ovld __cnfn convert_double4_rtn(ushort4); +double4 __ovld __cnfn convert_double4_rtp(char4); +double4 __ovld __cnfn convert_double4_rtp(double4); +double4 __ovld __cnfn convert_double4_rtp(float4); +double4 __ovld __cnfn convert_double4_rtp(int4); +double4 __ovld __cnfn convert_double4_rtp(long4); +double4 __ovld __cnfn convert_double4_rtp(short4); +double4 __ovld __cnfn convert_double4_rtp(uchar4); +double4 __ovld __cnfn convert_double4_rtp(uint4); +double4 __ovld __cnfn convert_double4_rtp(ulong4); +double4 __ovld __cnfn convert_double4_rtp(ushort4); +double4 __ovld __cnfn convert_double4_rtz(char4); +double4 __ovld __cnfn convert_double4_rtz(double4); +double4 __ovld __cnfn convert_double4_rtz(float4); +double4 __ovld __cnfn convert_double4_rtz(int4); +double4 __ovld __cnfn convert_double4_rtz(long4); +double4 __ovld __cnfn convert_double4_rtz(short4); +double4 __ovld __cnfn convert_double4_rtz(uchar4); +double4 __ovld __cnfn convert_double4_rtz(uint4); +double4 __ovld __cnfn convert_double4_rtz(ulong4); +double4 __ovld __cnfn convert_double4_rtz(ushort4); +double8 __ovld __cnfn convert_double8(char8); +double8 __ovld __cnfn convert_double8(double8); +double8 __ovld __cnfn convert_double8(float8); +double8 __ovld __cnfn convert_double8(int8); +double8 __ovld __cnfn convert_double8(long8); +double8 __ovld __cnfn convert_double8(short8); +double8 __ovld __cnfn convert_double8(uchar8); +double8 __ovld __cnfn convert_double8(uint8); +double8 __ovld __cnfn convert_double8(ulong8); +double8 __ovld __cnfn convert_double8(ushort8); +double8 __ovld __cnfn convert_double8_rte(char8); +double8 __ovld __cnfn convert_double8_rte(double8); +double8 __ovld __cnfn convert_double8_rte(float8); +double8 __ovld __cnfn convert_double8_rte(int8); +double8 __ovld __cnfn convert_double8_rte(long8); +double8 __ovld __cnfn convert_double8_rte(short8); +double8 __ovld __cnfn convert_double8_rte(uchar8); +double8 __ovld __cnfn convert_double8_rte(uint8); +double8 __ovld __cnfn convert_double8_rte(ulong8); +double8 __ovld __cnfn convert_double8_rte(ushort8); +double8 __ovld __cnfn convert_double8_rtn(char8); +double8 __ovld __cnfn convert_double8_rtn(double8); +double8 __ovld __cnfn convert_double8_rtn(float8); +double8 __ovld __cnfn convert_double8_rtn(int8); +double8 __ovld __cnfn convert_double8_rtn(long8); +double8 __ovld __cnfn convert_double8_rtn(short8); +double8 __ovld __cnfn convert_double8_rtn(uchar8); +double8 __ovld __cnfn convert_double8_rtn(uint8); +double8 __ovld __cnfn convert_double8_rtn(ulong8); +double8 __ovld __cnfn convert_double8_rtn(ushort8); +double8 __ovld __cnfn convert_double8_rtp(char8); +double8 __ovld __cnfn convert_double8_rtp(double8); +double8 __ovld __cnfn convert_double8_rtp(float8); +double8 __ovld __cnfn convert_double8_rtp(int8); +double8 __ovld __cnfn convert_double8_rtp(long8); +double8 __ovld __cnfn convert_double8_rtp(short8); +double8 __ovld __cnfn convert_double8_rtp(uchar8); +double8 __ovld __cnfn convert_double8_rtp(uint8); +double8 __ovld __cnfn convert_double8_rtp(ulong8); +double8 __ovld __cnfn convert_double8_rtp(ushort8); +double8 __ovld __cnfn convert_double8_rtz(char8); +double8 __ovld __cnfn convert_double8_rtz(double8); +double8 __ovld __cnfn convert_double8_rtz(float8); +double8 __ovld __cnfn convert_double8_rtz(int8); +double8 __ovld __cnfn convert_double8_rtz(long8); +double8 __ovld __cnfn convert_double8_rtz(short8); +double8 __ovld __cnfn convert_double8_rtz(uchar8); +double8 __ovld __cnfn convert_double8_rtz(uint8); +double8 __ovld __cnfn convert_double8_rtz(ulong8); +double8 __ovld __cnfn convert_double8_rtz(ushort8); +double16 __ovld __cnfn convert_double16(char16); +double16 __ovld __cnfn convert_double16(double16); +double16 __ovld __cnfn convert_double16(float16); +double16 __ovld __cnfn convert_double16(int16); +double16 __ovld __cnfn convert_double16(long16); +double16 __ovld __cnfn convert_double16(short16); +double16 __ovld __cnfn convert_double16(uchar16); +double16 __ovld __cnfn convert_double16(uint16); +double16 __ovld __cnfn convert_double16(ulong16); +double16 __ovld __cnfn convert_double16(ushort16); +double16 __ovld __cnfn convert_double16_rte(char16); +double16 __ovld __cnfn convert_double16_rte(double16); +double16 __ovld __cnfn convert_double16_rte(float16); +double16 __ovld __cnfn convert_double16_rte(int16); +double16 __ovld __cnfn convert_double16_rte(long16); +double16 __ovld __cnfn convert_double16_rte(short16); +double16 __ovld __cnfn convert_double16_rte(uchar16); +double16 __ovld __cnfn convert_double16_rte(uint16); +double16 __ovld __cnfn convert_double16_rte(ulong16); +double16 __ovld __cnfn convert_double16_rte(ushort16); +double16 __ovld __cnfn convert_double16_rtn(char16); +double16 __ovld __cnfn convert_double16_rtn(double16); +double16 __ovld __cnfn convert_double16_rtn(float16); +double16 __ovld __cnfn convert_double16_rtn(int16); +double16 __ovld __cnfn convert_double16_rtn(long16); +double16 __ovld __cnfn convert_double16_rtn(short16); +double16 __ovld __cnfn convert_double16_rtn(uchar16); +double16 __ovld __cnfn convert_double16_rtn(uint16); +double16 __ovld __cnfn convert_double16_rtn(ulong16); +double16 __ovld __cnfn convert_double16_rtn(ushort16); +double16 __ovld __cnfn convert_double16_rtp(char16); +double16 __ovld __cnfn convert_double16_rtp(double16); +double16 __ovld __cnfn convert_double16_rtp(float16); +double16 __ovld __cnfn convert_double16_rtp(int16); +double16 __ovld __cnfn convert_double16_rtp(long16); +double16 __ovld __cnfn convert_double16_rtp(short16); +double16 __ovld __cnfn convert_double16_rtp(uchar16); +double16 __ovld __cnfn convert_double16_rtp(uint16); +double16 __ovld __cnfn convert_double16_rtp(ulong16); +double16 __ovld __cnfn convert_double16_rtp(ushort16); +double16 __ovld __cnfn convert_double16_rtz(char16); +double16 __ovld __cnfn convert_double16_rtz(double16); +double16 __ovld __cnfn convert_double16_rtz(float16); +double16 __ovld __cnfn convert_double16_rtz(int16); +double16 __ovld __cnfn convert_double16_rtz(long16); +double16 __ovld __cnfn convert_double16_rtz(short16); +double16 __ovld __cnfn convert_double16_rtz(uchar16); +double16 __ovld __cnfn convert_double16_rtz(uint16); +double16 __ovld __cnfn convert_double16_rtz(ulong16); +double16 __ovld __cnfn convert_double16_rtz(ushort16); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +// Convert half types to non-double types. +uchar __ovld __cnfn convert_uchar(half); +uchar __ovld __cnfn convert_uchar_rte(half); +uchar __ovld __cnfn convert_uchar_rtp(half); +uchar __ovld __cnfn convert_uchar_rtn(half); +uchar __ovld __cnfn convert_uchar_rtz(half); +uchar __ovld __cnfn convert_uchar_sat(half); +uchar __ovld __cnfn convert_uchar_sat_rte(half); +uchar __ovld __cnfn convert_uchar_sat_rtp(half); +uchar __ovld __cnfn convert_uchar_sat_rtn(half); +uchar __ovld __cnfn convert_uchar_sat_rtz(half); +uchar2 __ovld __cnfn convert_uchar2(half2); +uchar2 __ovld __cnfn convert_uchar2_rte(half2); +uchar2 __ovld __cnfn convert_uchar2_rtp(half2); +uchar2 __ovld __cnfn convert_uchar2_rtn(half2); +uchar2 __ovld __cnfn convert_uchar2_rtz(half2); +uchar2 __ovld __cnfn convert_uchar2_sat(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2); +uchar3 __ovld __cnfn convert_uchar3(half3); +uchar3 __ovld __cnfn convert_uchar3_rte(half3); +uchar3 __ovld __cnfn convert_uchar3_rtp(half3); +uchar3 __ovld __cnfn convert_uchar3_rtn(half3); +uchar3 __ovld __cnfn convert_uchar3_rtz(half3); +uchar3 __ovld __cnfn convert_uchar3_sat(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3); +uchar4 __ovld __cnfn convert_uchar4(half4); +uchar4 __ovld __cnfn convert_uchar4_rte(half4); +uchar4 __ovld __cnfn convert_uchar4_rtp(half4); +uchar4 __ovld __cnfn convert_uchar4_rtn(half4); +uchar4 __ovld __cnfn convert_uchar4_rtz(half4); +uchar4 __ovld __cnfn convert_uchar4_sat(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4); +uchar8 __ovld __cnfn convert_uchar8(half8); +uchar8 __ovld __cnfn convert_uchar8_rte(half8); +uchar8 __ovld __cnfn convert_uchar8_rtp(half8); +uchar8 __ovld __cnfn convert_uchar8_rtn(half8); +uchar8 __ovld __cnfn convert_uchar8_rtz(half8); +uchar8 __ovld __cnfn convert_uchar8_sat(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8); +uchar16 __ovld __cnfn convert_uchar16(half16); +uchar16 __ovld __cnfn convert_uchar16_rte(half16); +uchar16 __ovld __cnfn convert_uchar16_rtp(half16); +uchar16 __ovld __cnfn convert_uchar16_rtn(half16); +uchar16 __ovld __cnfn convert_uchar16_rtz(half16); +uchar16 __ovld __cnfn convert_uchar16_sat(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16); +ushort __ovld __cnfn convert_ushort(half); +ushort __ovld __cnfn convert_ushort_rte(half); +ushort __ovld __cnfn convert_ushort_rtp(half); +ushort __ovld __cnfn convert_ushort_rtn(half); +ushort __ovld __cnfn convert_ushort_rtz(half); +ushort __ovld __cnfn convert_ushort_sat(half); +ushort __ovld __cnfn convert_ushort_sat_rte(half); +ushort __ovld __cnfn convert_ushort_sat_rtp(half); +ushort __ovld __cnfn convert_ushort_sat_rtn(half); +ushort __ovld __cnfn convert_ushort_sat_rtz(half); +ushort2 __ovld __cnfn convert_ushort2(half2); +ushort2 __ovld __cnfn convert_ushort2_rte(half2); +ushort2 __ovld __cnfn convert_ushort2_rtp(half2); +ushort2 __ovld __cnfn convert_ushort2_rtn(half2); +ushort2 __ovld __cnfn convert_ushort2_rtz(half2); +ushort2 __ovld __cnfn convert_ushort2_sat(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2); +ushort3 __ovld __cnfn convert_ushort3(half3); +ushort3 __ovld __cnfn convert_ushort3_rte(half3); +ushort3 __ovld __cnfn convert_ushort3_rtp(half3); +ushort3 __ovld __cnfn convert_ushort3_rtn(half3); +ushort3 __ovld __cnfn convert_ushort3_rtz(half3); +ushort3 __ovld __cnfn convert_ushort3_sat(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3); +ushort4 __ovld __cnfn convert_ushort4(half4); +ushort4 __ovld __cnfn convert_ushort4_rte(half4); +ushort4 __ovld __cnfn convert_ushort4_rtp(half4); +ushort4 __ovld __cnfn convert_ushort4_rtn(half4); +ushort4 __ovld __cnfn convert_ushort4_rtz(half4); +ushort4 __ovld __cnfn convert_ushort4_sat(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4); +ushort8 __ovld __cnfn convert_ushort8(half8); +ushort8 __ovld __cnfn convert_ushort8_rte(half8); +ushort8 __ovld __cnfn convert_ushort8_rtp(half8); +ushort8 __ovld __cnfn convert_ushort8_rtn(half8); +ushort8 __ovld __cnfn convert_ushort8_rtz(half8); +ushort8 __ovld __cnfn convert_ushort8_sat(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8); +ushort16 __ovld __cnfn convert_ushort16(half16); +ushort16 __ovld __cnfn convert_ushort16_rte(half16); +ushort16 __ovld __cnfn convert_ushort16_rtp(half16); +ushort16 __ovld __cnfn convert_ushort16_rtn(half16); +ushort16 __ovld __cnfn convert_ushort16_rtz(half16); +ushort16 __ovld __cnfn convert_ushort16_sat(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16); +uint __ovld __cnfn convert_uint(half); +uint __ovld __cnfn convert_uint_rte(half); +uint __ovld __cnfn convert_uint_rtp(half); +uint __ovld __cnfn convert_uint_rtn(half); +uint __ovld __cnfn convert_uint_rtz(half); +uint __ovld __cnfn convert_uint_sat(half); +uint __ovld __cnfn convert_uint_sat_rte(half); +uint __ovld __cnfn convert_uint_sat_rtp(half); +uint __ovld __cnfn convert_uint_sat_rtn(half); +uint __ovld __cnfn convert_uint_sat_rtz(half); +uint2 __ovld __cnfn convert_uint2(half2); +uint2 __ovld __cnfn convert_uint2_rte(half2); +uint2 __ovld __cnfn convert_uint2_rtp(half2); +uint2 __ovld __cnfn convert_uint2_rtn(half2); +uint2 __ovld __cnfn convert_uint2_rtz(half2); +uint2 __ovld __cnfn convert_uint2_sat(half2); +uint2 __ovld __cnfn convert_uint2_sat_rte(half2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(half2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(half2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(half2); +uint3 __ovld __cnfn convert_uint3(half3); +uint3 __ovld __cnfn convert_uint3_rte(half3); +uint3 __ovld __cnfn convert_uint3_rtp(half3); +uint3 __ovld __cnfn convert_uint3_rtn(half3); +uint3 __ovld __cnfn convert_uint3_rtz(half3); +uint3 __ovld __cnfn convert_uint3_sat(half3); +uint3 __ovld __cnfn convert_uint3_sat_rte(half3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(half3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(half3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(half3); +uint4 __ovld __cnfn convert_uint4(half4); +uint4 __ovld __cnfn convert_uint4_rte(half4); +uint4 __ovld __cnfn convert_uint4_rtp(half4); +uint4 __ovld __cnfn convert_uint4_rtn(half4); +uint4 __ovld __cnfn convert_uint4_rtz(half4); +uint4 __ovld __cnfn convert_uint4_sat(half4); +uint4 __ovld __cnfn convert_uint4_sat_rte(half4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(half4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(half4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(half4); +uint8 __ovld __cnfn convert_uint8(half8); +uint8 __ovld __cnfn convert_uint8_rte(half8); +uint8 __ovld __cnfn convert_uint8_rtp(half8); +uint8 __ovld __cnfn convert_uint8_rtn(half8); +uint8 __ovld __cnfn convert_uint8_rtz(half8); +uint8 __ovld __cnfn convert_uint8_sat(half8); +uint8 __ovld __cnfn convert_uint8_sat_rte(half8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(half8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(half8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(half8); +uint16 __ovld __cnfn convert_uint16(half16); +uint16 __ovld __cnfn convert_uint16_rte(half16); +uint16 __ovld __cnfn convert_uint16_rtp(half16); +uint16 __ovld __cnfn convert_uint16_rtn(half16); +uint16 __ovld __cnfn convert_uint16_rtz(half16); +uint16 __ovld __cnfn convert_uint16_sat(half16); +uint16 __ovld __cnfn convert_uint16_sat_rte(half16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(half16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(half16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(half16); +ulong __ovld __cnfn convert_ulong(half); +ulong __ovld __cnfn convert_ulong_rte(half); +ulong __ovld __cnfn convert_ulong_rtp(half); +ulong __ovld __cnfn convert_ulong_rtn(half); +ulong __ovld __cnfn convert_ulong_rtz(half); +ulong __ovld __cnfn convert_ulong_sat(half); +ulong __ovld __cnfn convert_ulong_sat_rte(half); +ulong __ovld __cnfn convert_ulong_sat_rtp(half); +ulong __ovld __cnfn convert_ulong_sat_rtn(half); +ulong __ovld __cnfn convert_ulong_sat_rtz(half); +ulong2 __ovld __cnfn convert_ulong2(half2); +ulong2 __ovld __cnfn convert_ulong2_rte(half2); +ulong2 __ovld __cnfn convert_ulong2_rtp(half2); +ulong2 __ovld __cnfn convert_ulong2_rtn(half2); +ulong2 __ovld __cnfn convert_ulong2_rtz(half2); +ulong2 __ovld __cnfn convert_ulong2_sat(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2); +ulong3 __ovld __cnfn convert_ulong3(half3); +ulong3 __ovld __cnfn convert_ulong3_rte(half3); +ulong3 __ovld __cnfn convert_ulong3_rtp(half3); +ulong3 __ovld __cnfn convert_ulong3_rtn(half3); +ulong3 __ovld __cnfn convert_ulong3_rtz(half3); +ulong3 __ovld __cnfn convert_ulong3_sat(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3); +ulong4 __ovld __cnfn convert_ulong4(half4); +ulong4 __ovld __cnfn convert_ulong4_rte(half4); +ulong4 __ovld __cnfn convert_ulong4_rtp(half4); +ulong4 __ovld __cnfn convert_ulong4_rtn(half4); +ulong4 __ovld __cnfn convert_ulong4_rtz(half4); +ulong4 __ovld __cnfn convert_ulong4_sat(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4); +ulong8 __ovld __cnfn convert_ulong8(half8); +ulong8 __ovld __cnfn convert_ulong8_rte(half8); +ulong8 __ovld __cnfn convert_ulong8_rtp(half8); +ulong8 __ovld __cnfn convert_ulong8_rtn(half8); +ulong8 __ovld __cnfn convert_ulong8_rtz(half8); +ulong8 __ovld __cnfn convert_ulong8_sat(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8); +ulong16 __ovld __cnfn convert_ulong16(half16); +ulong16 __ovld __cnfn convert_ulong16_rte(half16); +ulong16 __ovld __cnfn convert_ulong16_rtp(half16); +ulong16 __ovld __cnfn convert_ulong16_rtn(half16); +ulong16 __ovld __cnfn convert_ulong16_rtz(half16); +ulong16 __ovld __cnfn convert_ulong16_sat(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16); +char __ovld __cnfn convert_char(half); +char __ovld __cnfn convert_char_rte(half); +char __ovld __cnfn convert_char_rtp(half); +char __ovld __cnfn convert_char_rtn(half); +char __ovld __cnfn convert_char_rtz(half); +char __ovld __cnfn convert_char_sat(half); +char __ovld __cnfn convert_char_sat_rte(half); +char __ovld __cnfn convert_char_sat_rtp(half); +char __ovld __cnfn convert_char_sat_rtn(half); +char __ovld __cnfn convert_char_sat_rtz(half); +char2 __ovld __cnfn convert_char2(half2); +char2 __ovld __cnfn convert_char2_rte(half2); +char2 __ovld __cnfn convert_char2_rtp(half2); +char2 __ovld __cnfn convert_char2_rtn(half2); +char2 __ovld __cnfn convert_char2_rtz(half2); +char2 __ovld __cnfn convert_char2_sat(half2); +char2 __ovld __cnfn convert_char2_sat_rte(half2); +char2 __ovld __cnfn convert_char2_sat_rtp(half2); +char2 __ovld __cnfn convert_char2_sat_rtn(half2); +char2 __ovld __cnfn convert_char2_sat_rtz(half2); +char3 __ovld __cnfn convert_char3(half3); +char3 __ovld __cnfn convert_char3_rte(half3); +char3 __ovld __cnfn convert_char3_rtp(half3); +char3 __ovld __cnfn convert_char3_rtn(half3); +char3 __ovld __cnfn convert_char3_rtz(half3); +char3 __ovld __cnfn convert_char3_sat(half3); +char3 __ovld __cnfn convert_char3_sat_rte(half3); +char3 __ovld __cnfn convert_char3_sat_rtp(half3); +char3 __ovld __cnfn convert_char3_sat_rtn(half3); +char3 __ovld __cnfn convert_char3_sat_rtz(half3); +char4 __ovld __cnfn convert_char4(half4); +char4 __ovld __cnfn convert_char4_rte(half4); +char4 __ovld __cnfn convert_char4_rtp(half4); +char4 __ovld __cnfn convert_char4_rtn(half4); +char4 __ovld __cnfn convert_char4_rtz(half4); +char4 __ovld __cnfn convert_char4_sat(half4); +char4 __ovld __cnfn convert_char4_sat_rte(half4); +char4 __ovld __cnfn convert_char4_sat_rtp(half4); +char4 __ovld __cnfn convert_char4_sat_rtn(half4); +char4 __ovld __cnfn convert_char4_sat_rtz(half4); +char8 __ovld __cnfn convert_char8(half8); +char8 __ovld __cnfn convert_char8_rte(half8); +char8 __ovld __cnfn convert_char8_rtp(half8); +char8 __ovld __cnfn convert_char8_rtn(half8); +char8 __ovld __cnfn convert_char8_rtz(half8); +char8 __ovld __cnfn convert_char8_sat(half8); +char8 __ovld __cnfn convert_char8_sat_rte(half8); +char8 __ovld __cnfn convert_char8_sat_rtp(half8); +char8 __ovld __cnfn convert_char8_sat_rtn(half8); +char8 __ovld __cnfn convert_char8_sat_rtz(half8); +char16 __ovld __cnfn convert_char16(half16); +char16 __ovld __cnfn convert_char16_rte(half16); +char16 __ovld __cnfn convert_char16_rtp(half16); +char16 __ovld __cnfn convert_char16_rtn(half16); +char16 __ovld __cnfn convert_char16_rtz(half16); +char16 __ovld __cnfn convert_char16_sat(half16); +char16 __ovld __cnfn convert_char16_sat_rte(half16); +char16 __ovld __cnfn convert_char16_sat_rtp(half16); +char16 __ovld __cnfn convert_char16_sat_rtn(half16); +char16 __ovld __cnfn convert_char16_sat_rtz(half16); +short __ovld __cnfn convert_short(half); +short __ovld __cnfn convert_short_rte(half); +short __ovld __cnfn convert_short_rtp(half); +short __ovld __cnfn convert_short_rtn(half); +short __ovld __cnfn convert_short_rtz(half); +short __ovld __cnfn convert_short_sat(half); +short __ovld __cnfn convert_short_sat_rte(half); +short __ovld __cnfn convert_short_sat_rtp(half); +short __ovld __cnfn convert_short_sat_rtn(half); +short __ovld __cnfn convert_short_sat_rtz(half); +short2 __ovld __cnfn convert_short2(half2); +short2 __ovld __cnfn convert_short2_rte(half2); +short2 __ovld __cnfn convert_short2_rtp(half2); +short2 __ovld __cnfn convert_short2_rtn(half2); +short2 __ovld __cnfn convert_short2_rtz(half2); +short2 __ovld __cnfn convert_short2_sat(half2); +short2 __ovld __cnfn convert_short2_sat_rte(half2); +short2 __ovld __cnfn convert_short2_sat_rtp(half2); +short2 __ovld __cnfn convert_short2_sat_rtn(half2); +short2 __ovld __cnfn convert_short2_sat_rtz(half2); +short3 __ovld __cnfn convert_short3(half3); +short3 __ovld __cnfn convert_short3_rte(half3); +short3 __ovld __cnfn convert_short3_rtp(half3); +short3 __ovld __cnfn convert_short3_rtn(half3); +short3 __ovld __cnfn convert_short3_rtz(half3); +short3 __ovld __cnfn convert_short3_sat(half3); +short3 __ovld __cnfn convert_short3_sat_rte(half3); +short3 __ovld __cnfn convert_short3_sat_rtp(half3); +short3 __ovld __cnfn convert_short3_sat_rtn(half3); +short3 __ovld __cnfn convert_short3_sat_rtz(half3); +short4 __ovld __cnfn convert_short4(half4); +short4 __ovld __cnfn convert_short4_rte(half4); +short4 __ovld __cnfn convert_short4_rtp(half4); +short4 __ovld __cnfn convert_short4_rtn(half4); +short4 __ovld __cnfn convert_short4_rtz(half4); +short4 __ovld __cnfn convert_short4_sat(half4); +short4 __ovld __cnfn convert_short4_sat_rte(half4); +short4 __ovld __cnfn convert_short4_sat_rtp(half4); +short4 __ovld __cnfn convert_short4_sat_rtn(half4); +short4 __ovld __cnfn convert_short4_sat_rtz(half4); +short8 __ovld __cnfn convert_short8(half8); +short8 __ovld __cnfn convert_short8_rte(half8); +short8 __ovld __cnfn convert_short8_rtp(half8); +short8 __ovld __cnfn convert_short8_rtn(half8); +short8 __ovld __cnfn convert_short8_rtz(half8); +short8 __ovld __cnfn convert_short8_sat(half8); +short8 __ovld __cnfn convert_short8_sat_rte(half8); +short8 __ovld __cnfn convert_short8_sat_rtp(half8); +short8 __ovld __cnfn convert_short8_sat_rtn(half8); +short8 __ovld __cnfn convert_short8_sat_rtz(half8); +short16 __ovld __cnfn convert_short16(half16); +short16 __ovld __cnfn convert_short16_rte(half16); +short16 __ovld __cnfn convert_short16_rtp(half16); +short16 __ovld __cnfn convert_short16_rtn(half16); +short16 __ovld __cnfn convert_short16_rtz(half16); +short16 __ovld __cnfn convert_short16_sat(half16); +short16 __ovld __cnfn convert_short16_sat_rte(half16); +short16 __ovld __cnfn convert_short16_sat_rtp(half16); +short16 __ovld __cnfn convert_short16_sat_rtn(half16); +short16 __ovld __cnfn convert_short16_sat_rtz(half16); +int __ovld __cnfn convert_int(half); +int __ovld __cnfn convert_int_rte(half); +int __ovld __cnfn convert_int_rtp(half); +int __ovld __cnfn convert_int_rtn(half); +int __ovld __cnfn convert_int_rtz(half); +int __ovld __cnfn convert_int_sat(half); +int __ovld __cnfn convert_int_sat_rte(half); +int __ovld __cnfn convert_int_sat_rtp(half); +int __ovld __cnfn convert_int_sat_rtn(half); +int __ovld __cnfn convert_int_sat_rtz(half); +int2 __ovld __cnfn convert_int2(half2); +int2 __ovld __cnfn convert_int2_rte(half2); +int2 __ovld __cnfn convert_int2_rtp(half2); +int2 __ovld __cnfn convert_int2_rtn(half2); +int2 __ovld __cnfn convert_int2_rtz(half2); +int2 __ovld __cnfn convert_int2_sat(half2); +int2 __ovld __cnfn convert_int2_sat_rte(half2); +int2 __ovld __cnfn convert_int2_sat_rtp(half2); +int2 __ovld __cnfn convert_int2_sat_rtn(half2); +int2 __ovld __cnfn convert_int2_sat_rtz(half2); +int3 __ovld __cnfn convert_int3(half3); +int3 __ovld __cnfn convert_int3_rte(half3); +int3 __ovld __cnfn convert_int3_rtp(half3); +int3 __ovld __cnfn convert_int3_rtn(half3); +int3 __ovld __cnfn convert_int3_rtz(half3); +int3 __ovld __cnfn convert_int3_sat(half3); +int3 __ovld __cnfn convert_int3_sat_rte(half3); +int3 __ovld __cnfn convert_int3_sat_rtp(half3); +int3 __ovld __cnfn convert_int3_sat_rtn(half3); +int3 __ovld __cnfn convert_int3_sat_rtz(half3); +int4 __ovld __cnfn convert_int4(half4); +int4 __ovld __cnfn convert_int4_rte(half4); +int4 __ovld __cnfn convert_int4_rtp(half4); +int4 __ovld __cnfn convert_int4_rtn(half4); +int4 __ovld __cnfn convert_int4_rtz(half4); +int4 __ovld __cnfn convert_int4_sat(half4); +int4 __ovld __cnfn convert_int4_sat_rte(half4); +int4 __ovld __cnfn convert_int4_sat_rtp(half4); +int4 __ovld __cnfn convert_int4_sat_rtn(half4); +int4 __ovld __cnfn convert_int4_sat_rtz(half4); +int8 __ovld __cnfn convert_int8(half8); +int8 __ovld __cnfn convert_int8_rte(half8); +int8 __ovld __cnfn convert_int8_rtp(half8); +int8 __ovld __cnfn convert_int8_rtn(half8); +int8 __ovld __cnfn convert_int8_rtz(half8); +int8 __ovld __cnfn convert_int8_sat(half8); +int8 __ovld __cnfn convert_int8_sat_rte(half8); +int8 __ovld __cnfn convert_int8_sat_rtp(half8); +int8 __ovld __cnfn convert_int8_sat_rtn(half8); +int8 __ovld __cnfn convert_int8_sat_rtz(half8); +int16 __ovld __cnfn convert_int16(half16); +int16 __ovld __cnfn convert_int16_rte(half16); +int16 __ovld __cnfn convert_int16_rtp(half16); +int16 __ovld __cnfn convert_int16_rtn(half16); +int16 __ovld __cnfn convert_int16_rtz(half16); +int16 __ovld __cnfn convert_int16_sat(half16); +int16 __ovld __cnfn convert_int16_sat_rte(half16); +int16 __ovld __cnfn convert_int16_sat_rtp(half16); +int16 __ovld __cnfn convert_int16_sat_rtn(half16); +int16 __ovld __cnfn convert_int16_sat_rtz(half16); +long __ovld __cnfn convert_long(half); +long __ovld __cnfn convert_long_rte(half); +long __ovld __cnfn convert_long_rtp(half); +long __ovld __cnfn convert_long_rtn(half); +long __ovld __cnfn convert_long_rtz(half); +long __ovld __cnfn convert_long_sat(half); +long __ovld __cnfn convert_long_sat_rte(half); +long __ovld __cnfn convert_long_sat_rtp(half); +long __ovld __cnfn convert_long_sat_rtn(half); +long __ovld __cnfn convert_long_sat_rtz(half); +long2 __ovld __cnfn convert_long2(half2); +long2 __ovld __cnfn convert_long2_rte(half2); +long2 __ovld __cnfn convert_long2_rtp(half2); +long2 __ovld __cnfn convert_long2_rtn(half2); +long2 __ovld __cnfn convert_long2_rtz(half2); +long2 __ovld __cnfn convert_long2_sat(half2); +long2 __ovld __cnfn convert_long2_sat_rte(half2); +long2 __ovld __cnfn convert_long2_sat_rtp(half2); +long2 __ovld __cnfn convert_long2_sat_rtn(half2); +long2 __ovld __cnfn convert_long2_sat_rtz(half2); +long3 __ovld __cnfn convert_long3(half3); +long3 __ovld __cnfn convert_long3_rte(half3); +long3 __ovld __cnfn convert_long3_rtp(half3); +long3 __ovld __cnfn convert_long3_rtn(half3); +long3 __ovld __cnfn convert_long3_rtz(half3); +long3 __ovld __cnfn convert_long3_sat(half3); +long3 __ovld __cnfn convert_long3_sat_rte(half3); +long3 __ovld __cnfn convert_long3_sat_rtp(half3); +long3 __ovld __cnfn convert_long3_sat_rtn(half3); +long3 __ovld __cnfn convert_long3_sat_rtz(half3); +long4 __ovld __cnfn convert_long4(half4); +long4 __ovld __cnfn convert_long4_rte(half4); +long4 __ovld __cnfn convert_long4_rtp(half4); +long4 __ovld __cnfn convert_long4_rtn(half4); +long4 __ovld __cnfn convert_long4_rtz(half4); +long4 __ovld __cnfn convert_long4_sat(half4); +long4 __ovld __cnfn convert_long4_sat_rte(half4); +long4 __ovld __cnfn convert_long4_sat_rtp(half4); +long4 __ovld __cnfn convert_long4_sat_rtn(half4); +long4 __ovld __cnfn convert_long4_sat_rtz(half4); +long8 __ovld __cnfn convert_long8(half8); +long8 __ovld __cnfn convert_long8_rte(half8); +long8 __ovld __cnfn convert_long8_rtp(half8); +long8 __ovld __cnfn convert_long8_rtn(half8); +long8 __ovld __cnfn convert_long8_rtz(half8); +long8 __ovld __cnfn convert_long8_sat(half8); +long8 __ovld __cnfn convert_long8_sat_rte(half8); +long8 __ovld __cnfn convert_long8_sat_rtp(half8); +long8 __ovld __cnfn convert_long8_sat_rtn(half8); +long8 __ovld __cnfn convert_long8_sat_rtz(half8); +long16 __ovld __cnfn convert_long16(half16); +long16 __ovld __cnfn convert_long16_rte(half16); +long16 __ovld __cnfn convert_long16_rtp(half16); +long16 __ovld __cnfn convert_long16_rtn(half16); +long16 __ovld __cnfn convert_long16_rtz(half16); +long16 __ovld __cnfn convert_long16_sat(half16); +long16 __ovld __cnfn convert_long16_sat_rte(half16); +long16 __ovld __cnfn convert_long16_sat_rtp(half16); +long16 __ovld __cnfn convert_long16_sat_rtn(half16); +long16 __ovld __cnfn convert_long16_sat_rtz(half16); +float __ovld __cnfn convert_float(half); +float __ovld __cnfn convert_float_rte(half); +float __ovld __cnfn convert_float_rtp(half); +float __ovld __cnfn convert_float_rtn(half); +float __ovld __cnfn convert_float_rtz(half); +float2 __ovld __cnfn convert_float2(half2); +float2 __ovld __cnfn convert_float2_rte(half2); +float2 __ovld __cnfn convert_float2_rtp(half2); +float2 __ovld __cnfn convert_float2_rtn(half2); +float2 __ovld __cnfn convert_float2_rtz(half2); +float3 __ovld __cnfn convert_float3(half3); +float3 __ovld __cnfn convert_float3_rte(half3); +float3 __ovld __cnfn convert_float3_rtp(half3); +float3 __ovld __cnfn convert_float3_rtn(half3); +float3 __ovld __cnfn convert_float3_rtz(half3); +float4 __ovld __cnfn convert_float4(half4); +float4 __ovld __cnfn convert_float4_rte(half4); +float4 __ovld __cnfn convert_float4_rtp(half4); +float4 __ovld __cnfn convert_float4_rtn(half4); +float4 __ovld __cnfn convert_float4_rtz(half4); +float8 __ovld __cnfn convert_float8(half8); +float8 __ovld __cnfn convert_float8_rte(half8); +float8 __ovld __cnfn convert_float8_rtp(half8); +float8 __ovld __cnfn convert_float8_rtn(half8); +float8 __ovld __cnfn convert_float8_rtz(half8); +float16 __ovld __cnfn convert_float16(half16); +float16 __ovld __cnfn convert_float16_rte(half16); +float16 __ovld __cnfn convert_float16_rtp(half16); +float16 __ovld __cnfn convert_float16_rtn(half16); +float16 __ovld __cnfn convert_float16_rtz(half16); + +// Convert non-double types to half types. +half __ovld __cnfn convert_half(uchar); +half __ovld __cnfn convert_half(ushort); +half __ovld __cnfn convert_half(uint); +half __ovld __cnfn convert_half(ulong); +half __ovld __cnfn convert_half(char); +half __ovld __cnfn convert_half(short); +half __ovld __cnfn convert_half(int); +half __ovld __cnfn convert_half(long); +half __ovld __cnfn convert_half(float); +half __ovld __cnfn convert_half(half); +half __ovld __cnfn convert_half_rte(uchar); +half __ovld __cnfn convert_half_rte(ushort); +half __ovld __cnfn convert_half_rte(uint); +half __ovld __cnfn convert_half_rte(ulong); +half __ovld __cnfn convert_half_rte(char); +half __ovld __cnfn convert_half_rte(short); +half __ovld __cnfn convert_half_rte(int); +half __ovld __cnfn convert_half_rte(long); +half __ovld __cnfn convert_half_rte(float); +half __ovld __cnfn convert_half_rte(half); +half __ovld __cnfn convert_half_rtp(uchar); +half __ovld __cnfn convert_half_rtp(ushort); +half __ovld __cnfn convert_half_rtp(uint); +half __ovld __cnfn convert_half_rtp(ulong); +half __ovld __cnfn convert_half_rtp(char); +half __ovld __cnfn convert_half_rtp(short); +half __ovld __cnfn convert_half_rtp(int); +half __ovld __cnfn convert_half_rtp(long); +half __ovld __cnfn convert_half_rtp(float); +half __ovld __cnfn convert_half_rtp(half); +half __ovld __cnfn convert_half_rtn(uchar); +half __ovld __cnfn convert_half_rtn(ushort); +half __ovld __cnfn convert_half_rtn(uint); +half __ovld __cnfn convert_half_rtn(ulong); +half __ovld __cnfn convert_half_rtn(char); +half __ovld __cnfn convert_half_rtn(short); +half __ovld __cnfn convert_half_rtn(int); +half __ovld __cnfn convert_half_rtn(long); +half __ovld __cnfn convert_half_rtn(float); +half __ovld __cnfn convert_half_rtn(half); +half __ovld __cnfn convert_half_rtz(uchar); +half __ovld __cnfn convert_half_rtz(ushort); +half __ovld __cnfn convert_half_rtz(uint); +half __ovld __cnfn convert_half_rtz(ulong); +half __ovld __cnfn convert_half_rtz(char); +half __ovld __cnfn convert_half_rtz(short); +half __ovld __cnfn convert_half_rtz(int); +half __ovld __cnfn convert_half_rtz(long); +half __ovld __cnfn convert_half_rtz(float); +half __ovld __cnfn convert_half_rtz(half); +half2 __ovld __cnfn convert_half2(char2); +half2 __ovld __cnfn convert_half2(uchar2); +half2 __ovld __cnfn convert_half2(short2); +half2 __ovld __cnfn convert_half2(ushort2); +half2 __ovld __cnfn convert_half2(int2); +half2 __ovld __cnfn convert_half2(uint2); +half2 __ovld __cnfn convert_half2(long2); +half2 __ovld __cnfn convert_half2(ulong2); +half2 __ovld __cnfn convert_half2(float2); +half2 __ovld __cnfn convert_half2(half2); +half2 __ovld __cnfn convert_half2_rte(char2); +half2 __ovld __cnfn convert_half2_rte(uchar2); +half2 __ovld __cnfn convert_half2_rte(short2); +half2 __ovld __cnfn convert_half2_rte(ushort2); +half2 __ovld __cnfn convert_half2_rte(int2); +half2 __ovld __cnfn convert_half2_rte(uint2); +half2 __ovld __cnfn convert_half2_rte(long2); +half2 __ovld __cnfn convert_half2_rte(ulong2); +half2 __ovld __cnfn convert_half2_rte(float2); +half2 __ovld __cnfn convert_half2_rte(half2); +half2 __ovld __cnfn convert_half2_rtp(char2); +half2 __ovld __cnfn convert_half2_rtp(uchar2); +half2 __ovld __cnfn convert_half2_rtp(short2); +half2 __ovld __cnfn convert_half2_rtp(ushort2); +half2 __ovld __cnfn convert_half2_rtp(int2); +half2 __ovld __cnfn convert_half2_rtp(uint2); +half2 __ovld __cnfn convert_half2_rtp(long2); +half2 __ovld __cnfn convert_half2_rtp(ulong2); +half2 __ovld __cnfn convert_half2_rtp(float2); +half2 __ovld __cnfn convert_half2_rtp(half2); +half2 __ovld __cnfn convert_half2_rtn(char2); +half2 __ovld __cnfn convert_half2_rtn(uchar2); +half2 __ovld __cnfn convert_half2_rtn(short2); +half2 __ovld __cnfn convert_half2_rtn(ushort2); +half2 __ovld __cnfn convert_half2_rtn(int2); +half2 __ovld __cnfn convert_half2_rtn(uint2); +half2 __ovld __cnfn convert_half2_rtn(long2); +half2 __ovld __cnfn convert_half2_rtn(ulong2); +half2 __ovld __cnfn convert_half2_rtn(float2); +half2 __ovld __cnfn convert_half2_rtn(half2); +half2 __ovld __cnfn convert_half2_rtz(char2); +half2 __ovld __cnfn convert_half2_rtz(uchar2); +half2 __ovld __cnfn convert_half2_rtz(short2); +half2 __ovld __cnfn convert_half2_rtz(ushort2); +half2 __ovld __cnfn convert_half2_rtz(int2); +half2 __ovld __cnfn convert_half2_rtz(uint2); +half2 __ovld __cnfn convert_half2_rtz(long2); +half2 __ovld __cnfn convert_half2_rtz(ulong2); +half2 __ovld __cnfn convert_half2_rtz(float2); +half2 __ovld __cnfn convert_half2_rtz(half2); +half3 __ovld __cnfn convert_half3(char3); +half3 __ovld __cnfn convert_half3(uchar3); +half3 __ovld __cnfn convert_half3(short3); +half3 __ovld __cnfn convert_half3(ushort3); +half3 __ovld __cnfn convert_half3(int3); +half3 __ovld __cnfn convert_half3(uint3); +half3 __ovld __cnfn convert_half3(long3); +half3 __ovld __cnfn convert_half3(ulong3); +half3 __ovld __cnfn convert_half3(float3); +half3 __ovld __cnfn convert_half3(half3); +half3 __ovld __cnfn convert_half3_rte(char3); +half3 __ovld __cnfn convert_half3_rte(uchar3); +half3 __ovld __cnfn convert_half3_rte(short3); +half3 __ovld __cnfn convert_half3_rte(ushort3); +half3 __ovld __cnfn convert_half3_rte(int3); +half3 __ovld __cnfn convert_half3_rte(uint3); +half3 __ovld __cnfn convert_half3_rte(long3); +half3 __ovld __cnfn convert_half3_rte(ulong3); +half3 __ovld __cnfn convert_half3_rte(float3); +half3 __ovld __cnfn convert_half3_rte(half3); +half3 __ovld __cnfn convert_half3_rtp(char3); +half3 __ovld __cnfn convert_half3_rtp(uchar3); +half3 __ovld __cnfn convert_half3_rtp(short3); +half3 __ovld __cnfn convert_half3_rtp(ushort3); +half3 __ovld __cnfn convert_half3_rtp(int3); +half3 __ovld __cnfn convert_half3_rtp(uint3); +half3 __ovld __cnfn convert_half3_rtp(long3); +half3 __ovld __cnfn convert_half3_rtp(ulong3); +half3 __ovld __cnfn convert_half3_rtp(float3); +half3 __ovld __cnfn convert_half3_rtp(half3); +half3 __ovld __cnfn convert_half3_rtn(char3); +half3 __ovld __cnfn convert_half3_rtn(uchar3); +half3 __ovld __cnfn convert_half3_rtn(short3); +half3 __ovld __cnfn convert_half3_rtn(ushort3); +half3 __ovld __cnfn convert_half3_rtn(int3); +half3 __ovld __cnfn convert_half3_rtn(uint3); +half3 __ovld __cnfn convert_half3_rtn(long3); +half3 __ovld __cnfn convert_half3_rtn(ulong3); +half3 __ovld __cnfn convert_half3_rtn(float3); +half3 __ovld __cnfn convert_half3_rtn(half3); +half3 __ovld __cnfn convert_half3_rtz(char3); +half3 __ovld __cnfn convert_half3_rtz(uchar3); +half3 __ovld __cnfn convert_half3_rtz(short3); +half3 __ovld __cnfn convert_half3_rtz(ushort3); +half3 __ovld __cnfn convert_half3_rtz(int3); +half3 __ovld __cnfn convert_half3_rtz(uint3); +half3 __ovld __cnfn convert_half3_rtz(long3); +half3 __ovld __cnfn convert_half3_rtz(ulong3); +half3 __ovld __cnfn convert_half3_rtz(float3); +half3 __ovld __cnfn convert_half3_rtz(half3); +half4 __ovld __cnfn convert_half4(char4); +half4 __ovld __cnfn convert_half4(uchar4); +half4 __ovld __cnfn convert_half4(short4); +half4 __ovld __cnfn convert_half4(ushort4); +half4 __ovld __cnfn convert_half4(int4); +half4 __ovld __cnfn convert_half4(uint4); +half4 __ovld __cnfn convert_half4(long4); +half4 __ovld __cnfn convert_half4(ulong4); +half4 __ovld __cnfn convert_half4(float4); +half4 __ovld __cnfn convert_half4(half4); +half4 __ovld __cnfn convert_half4_rte(char4); +half4 __ovld __cnfn convert_half4_rte(uchar4); +half4 __ovld __cnfn convert_half4_rte(short4); +half4 __ovld __cnfn convert_half4_rte(ushort4); +half4 __ovld __cnfn convert_half4_rte(int4); +half4 __ovld __cnfn convert_half4_rte(uint4); +half4 __ovld __cnfn convert_half4_rte(long4); +half4 __ovld __cnfn convert_half4_rte(ulong4); +half4 __ovld __cnfn convert_half4_rte(float4); +half4 __ovld __cnfn convert_half4_rte(half4); +half4 __ovld __cnfn convert_half4_rtp(char4); +half4 __ovld __cnfn convert_half4_rtp(uchar4); +half4 __ovld __cnfn convert_half4_rtp(short4); +half4 __ovld __cnfn convert_half4_rtp(ushort4); +half4 __ovld __cnfn convert_half4_rtp(int4); +half4 __ovld __cnfn convert_half4_rtp(uint4); +half4 __ovld __cnfn convert_half4_rtp(long4); +half4 __ovld __cnfn convert_half4_rtp(ulong4); +half4 __ovld __cnfn convert_half4_rtp(float4); +half4 __ovld __cnfn convert_half4_rtp(half4); +half4 __ovld __cnfn convert_half4_rtn(char4); +half4 __ovld __cnfn convert_half4_rtn(uchar4); +half4 __ovld __cnfn convert_half4_rtn(short4); +half4 __ovld __cnfn convert_half4_rtn(ushort4); +half4 __ovld __cnfn convert_half4_rtn(int4); +half4 __ovld __cnfn convert_half4_rtn(uint4); +half4 __ovld __cnfn convert_half4_rtn(long4); +half4 __ovld __cnfn convert_half4_rtn(ulong4); +half4 __ovld __cnfn convert_half4_rtn(float4); +half4 __ovld __cnfn convert_half4_rtn(half4); +half4 __ovld __cnfn convert_half4_rtz(char4); +half4 __ovld __cnfn convert_half4_rtz(uchar4); +half4 __ovld __cnfn convert_half4_rtz(short4); +half4 __ovld __cnfn convert_half4_rtz(ushort4); +half4 __ovld __cnfn convert_half4_rtz(int4); +half4 __ovld __cnfn convert_half4_rtz(uint4); +half4 __ovld __cnfn convert_half4_rtz(long4); +half4 __ovld __cnfn convert_half4_rtz(ulong4); +half4 __ovld __cnfn convert_half4_rtz(float4); +half4 __ovld __cnfn convert_half4_rtz(half4); +half8 __ovld __cnfn convert_half8(char8); +half8 __ovld __cnfn convert_half8(uchar8); +half8 __ovld __cnfn convert_half8(short8); +half8 __ovld __cnfn convert_half8(ushort8); +half8 __ovld __cnfn convert_half8(int8); +half8 __ovld __cnfn convert_half8(uint8); +half8 __ovld __cnfn convert_half8(long8); +half8 __ovld __cnfn convert_half8(ulong8); +half8 __ovld __cnfn convert_half8(float8); +half8 __ovld __cnfn convert_half8(half8); +half8 __ovld __cnfn convert_half8_rte(char8); +half8 __ovld __cnfn convert_half8_rte(uchar8); +half8 __ovld __cnfn convert_half8_rte(short8); +half8 __ovld __cnfn convert_half8_rte(ushort8); +half8 __ovld __cnfn convert_half8_rte(int8); +half8 __ovld __cnfn convert_half8_rte(uint8); +half8 __ovld __cnfn convert_half8_rte(long8); +half8 __ovld __cnfn convert_half8_rte(ulong8); +half8 __ovld __cnfn convert_half8_rte(float8); +half8 __ovld __cnfn convert_half8_rte(half8); +half8 __ovld __cnfn convert_half8_rtp(char8); +half8 __ovld __cnfn convert_half8_rtp(uchar8); +half8 __ovld __cnfn convert_half8_rtp(short8); +half8 __ovld __cnfn convert_half8_rtp(ushort8); +half8 __ovld __cnfn convert_half8_rtp(int8); +half8 __ovld __cnfn convert_half8_rtp(uint8); +half8 __ovld __cnfn convert_half8_rtp(long8); +half8 __ovld __cnfn convert_half8_rtp(ulong8); +half8 __ovld __cnfn convert_half8_rtp(float8); +half8 __ovld __cnfn convert_half8_rtp(half8); +half8 __ovld __cnfn convert_half8_rtn(char8); +half8 __ovld __cnfn convert_half8_rtn(uchar8); +half8 __ovld __cnfn convert_half8_rtn(short8); +half8 __ovld __cnfn convert_half8_rtn(ushort8); +half8 __ovld __cnfn convert_half8_rtn(int8); +half8 __ovld __cnfn convert_half8_rtn(uint8); +half8 __ovld __cnfn convert_half8_rtn(long8); +half8 __ovld __cnfn convert_half8_rtn(ulong8); +half8 __ovld __cnfn convert_half8_rtn(float8); +half8 __ovld __cnfn convert_half8_rtn(half8); +half8 __ovld __cnfn convert_half8_rtz(char8); +half8 __ovld __cnfn convert_half8_rtz(uchar8); +half8 __ovld __cnfn convert_half8_rtz(short8); +half8 __ovld __cnfn convert_half8_rtz(ushort8); +half8 __ovld __cnfn convert_half8_rtz(int8); +half8 __ovld __cnfn convert_half8_rtz(uint8); +half8 __ovld __cnfn convert_half8_rtz(long8); +half8 __ovld __cnfn convert_half8_rtz(ulong8); +half8 __ovld __cnfn convert_half8_rtz(float8); +half8 __ovld __cnfn convert_half8_rtz(half8); +half16 __ovld __cnfn convert_half16(char16); +half16 __ovld __cnfn convert_half16(uchar16); +half16 __ovld __cnfn convert_half16(short16); +half16 __ovld __cnfn convert_half16(ushort16); +half16 __ovld __cnfn convert_half16(int16); +half16 __ovld __cnfn convert_half16(uint16); +half16 __ovld __cnfn convert_half16(long16); +half16 __ovld __cnfn convert_half16(ulong16); +half16 __ovld __cnfn convert_half16(float16); +half16 __ovld __cnfn convert_half16(half16); +half16 __ovld __cnfn convert_half16_rte(char16); +half16 __ovld __cnfn convert_half16_rte(uchar16); +half16 __ovld __cnfn convert_half16_rte(short16); +half16 __ovld __cnfn convert_half16_rte(ushort16); +half16 __ovld __cnfn convert_half16_rte(int16); +half16 __ovld __cnfn convert_half16_rte(uint16); +half16 __ovld __cnfn convert_half16_rte(long16); +half16 __ovld __cnfn convert_half16_rte(ulong16); +half16 __ovld __cnfn convert_half16_rte(float16); +half16 __ovld __cnfn convert_half16_rte(half16); +half16 __ovld __cnfn convert_half16_rtp(char16); +half16 __ovld __cnfn convert_half16_rtp(uchar16); +half16 __ovld __cnfn convert_half16_rtp(short16); +half16 __ovld __cnfn convert_half16_rtp(ushort16); +half16 __ovld __cnfn convert_half16_rtp(int16); +half16 __ovld __cnfn convert_half16_rtp(uint16); +half16 __ovld __cnfn convert_half16_rtp(long16); +half16 __ovld __cnfn convert_half16_rtp(ulong16); +half16 __ovld __cnfn convert_half16_rtp(float16); +half16 __ovld __cnfn convert_half16_rtp(half16); +half16 __ovld __cnfn convert_half16_rtn(char16); +half16 __ovld __cnfn convert_half16_rtn(uchar16); +half16 __ovld __cnfn convert_half16_rtn(short16); +half16 __ovld __cnfn convert_half16_rtn(ushort16); +half16 __ovld __cnfn convert_half16_rtn(int16); +half16 __ovld __cnfn convert_half16_rtn(uint16); +half16 __ovld __cnfn convert_half16_rtn(long16); +half16 __ovld __cnfn convert_half16_rtn(ulong16); +half16 __ovld __cnfn convert_half16_rtn(float16); +half16 __ovld __cnfn convert_half16_rtn(half16); +half16 __ovld __cnfn convert_half16_rtz(char16); +half16 __ovld __cnfn convert_half16_rtz(uchar16); +half16 __ovld __cnfn convert_half16_rtz(short16); +half16 __ovld __cnfn convert_half16_rtz(ushort16); +half16 __ovld __cnfn convert_half16_rtz(int16); +half16 __ovld __cnfn convert_half16_rtz(uint16); +half16 __ovld __cnfn convert_half16_rtz(long16); +half16 __ovld __cnfn convert_half16_rtz(ulong16); +half16 __ovld __cnfn convert_half16_rtz(float16); +half16 __ovld __cnfn convert_half16_rtz(half16); + +// Convert half types to double types. +#ifdef cl_khr_fp64 +double __ovld __cnfn convert_double(half); +double __ovld __cnfn convert_double_rte(half); +double __ovld __cnfn convert_double_rtp(half); +double __ovld __cnfn convert_double_rtn(half); +double __ovld __cnfn convert_double_rtz(half); +double2 __ovld __cnfn convert_double2(half2); +double2 __ovld __cnfn convert_double2_rte(half2); +double2 __ovld __cnfn convert_double2_rtp(half2); +double2 __ovld __cnfn convert_double2_rtn(half2); +double2 __ovld __cnfn convert_double2_rtz(half2); +double3 __ovld __cnfn convert_double3(half3); +double3 __ovld __cnfn convert_double3_rte(half3); +double3 __ovld __cnfn convert_double3_rtp(half3); +double3 __ovld __cnfn convert_double3_rtn(half3); +double3 __ovld __cnfn convert_double3_rtz(half3); +double4 __ovld __cnfn convert_double4(half4); +double4 __ovld __cnfn convert_double4_rte(half4); +double4 __ovld __cnfn convert_double4_rtp(half4); +double4 __ovld __cnfn convert_double4_rtn(half4); +double4 __ovld __cnfn convert_double4_rtz(half4); +double8 __ovld __cnfn convert_double8(half8); +double8 __ovld __cnfn convert_double8_rte(half8); +double8 __ovld __cnfn convert_double8_rtp(half8); +double8 __ovld __cnfn convert_double8_rtn(half8); +double8 __ovld __cnfn convert_double8_rtz(half8); +double16 __ovld __cnfn convert_double16(half16); +double16 __ovld __cnfn convert_double16_rte(half16); +double16 __ovld __cnfn convert_double16_rtp(half16); +double16 __ovld __cnfn convert_double16_rtn(half16); +double16 __ovld __cnfn convert_double16_rtz(half16); + +// Convert double types to half types. +half __ovld __cnfn convert_half(double); +half __ovld __cnfn convert_half_rte(double); +half __ovld __cnfn convert_half_rtp(double); +half __ovld __cnfn convert_half_rtn(double); +half __ovld __cnfn convert_half_rtz(double); +half2 __ovld __cnfn convert_half2(double2); +half2 __ovld __cnfn convert_half2_rte(double2); +half2 __ovld __cnfn convert_half2_rtp(double2); +half2 __ovld __cnfn convert_half2_rtn(double2); +half2 __ovld __cnfn convert_half2_rtz(double2); +half3 __ovld __cnfn convert_half3(double3); +half3 __ovld __cnfn convert_half3_rte(double3); +half3 __ovld __cnfn convert_half3_rtp(double3); +half3 __ovld __cnfn convert_half3_rtn(double3); +half3 __ovld __cnfn convert_half3_rtz(double3); +half4 __ovld __cnfn convert_half4(double4); +half4 __ovld __cnfn convert_half4_rte(double4); +half4 __ovld __cnfn convert_half4_rtp(double4); +half4 __ovld __cnfn convert_half4_rtn(double4); +half4 __ovld __cnfn convert_half4_rtz(double4); +half8 __ovld __cnfn convert_half8(double8); +half8 __ovld __cnfn convert_half8_rte(double8); +half8 __ovld __cnfn convert_half8_rtp(double8); +half8 __ovld __cnfn convert_half8_rtn(double8); +half8 __ovld __cnfn convert_half8_rtz(double8); +half16 __ovld __cnfn convert_half16(double16); +half16 __ovld __cnfn convert_half16_rte(double16); +half16 __ovld __cnfn convert_half16_rtp(double16); +half16 __ovld __cnfn convert_half16_rtn(double16); +half16 __ovld __cnfn convert_half16_rtz(double16); +#endif //cl_khr_fp64 + +#endif // cl_khr_fp16 + +// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions + +/** + * Returns the number of dimensions in use. This is the + * value given to the work_dim argument specified in + * clEnqueueNDRangeKernel. + * For clEnqueueTask, this returns 1. + */ +uint __ovld __cnfn get_work_dim(void); + +/** + * Returns the number of global work-items specified for + * dimension identified by dimindx. This value is given by + * the global_work_size argument to + * clEnqueueNDRangeKernel. Valid values of dimindx + * are 0 to get_work_dim() - 1. For other values of + * dimindx, get_global_size() returns 1. + * For clEnqueueTask, this always returns 1. + */ +size_t __ovld __cnfn get_global_size(uint dimindx); + +/** + * Returns the unique global work-item ID value for + * dimension identified by dimindx. The global work-item + * ID specifies the work-item ID based on the number of + * global work-items specified to execute the kernel. Valid + * values of dimindx are 0 to get_work_dim() - 1. For + * other values of dimindx, get_global_id() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_global_id(uint dimindx); + +/** + * Returns the number of local work-items specified in + * dimension identified by dimindx. This value is given by + * the local_work_size argument to + * clEnqueueNDRangeKernel if local_work_size is not + * NULL; otherwise the OpenCL implementation chooses + * an appropriate local_work_size value which is returned + * by this function. Valid values of dimindx are 0 to + * get_work_dim() - 1. For other values of dimindx, + * get_local_size() returns 1. + * For clEnqueueTask, this always returns 1. + */ +size_t __ovld __cnfn get_local_size(uint dimindx); + +/** + * Returns the unique local work-item ID i.e. a work-item + * within a specific work-group for dimension identified by + * dimindx. Valid values of dimindx are 0 to + * get_work_dim() - 1. For other values of dimindx, + * get_local_id() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_local_id(uint dimindx); + +/** + * Returns the number of work-groups that will execute a + * kernel for dimension identified by dimindx. + * Valid values of dimindx are 0 to get_work_dim() - 1. + * For other values of dimindx, get_num_groups() returns 1. + * For clEnqueueTask, this always returns 1. + */ +size_t __ovld __cnfn get_num_groups(uint dimindx); + +/** + * get_group_id returns the work-group ID which is a + * number from 0 .. get_num_groups(dimindx) - 1. + * Valid values of dimindx are 0 to get_work_dim() - 1. + * For other values, get_group_id() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_group_id(uint dimindx); + +/** + * get_global_offset returns the offset values specified in + * global_work_offset argument to + * clEnqueueNDRangeKernel. + * Valid values of dimindx are 0 to get_work_dim() - 1. + * For other values, get_global_offset() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_global_offset(uint dimindx); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +size_t __ovld get_enqueued_local_size(uint dimindx); +size_t __ovld get_global_linear_id(void); +size_t __ovld get_local_linear_id(void); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions + +/** + * Arc cosine function. + */ +float __ovld __cnfn acos(float); +float2 __ovld __cnfn acos(float2); +float3 __ovld __cnfn acos(float3); +float4 __ovld __cnfn acos(float4); +float8 __ovld __cnfn acos(float8); +float16 __ovld __cnfn acos(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn acos(double); +double2 __ovld __cnfn acos(double2); +double3 __ovld __cnfn acos(double3); +double4 __ovld __cnfn acos(double4); +double8 __ovld __cnfn acos(double8); +double16 __ovld __cnfn acos(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn acos(half); +half2 __ovld __cnfn acos(half2); +half3 __ovld __cnfn acos(half3); +half4 __ovld __cnfn acos(half4); +half8 __ovld __cnfn acos(half8); +half16 __ovld __cnfn acos(half16); +#endif //cl_khr_fp16 + +/** + * Inverse hyperbolic cosine. + */ +float __ovld __cnfn acosh(float); +float2 __ovld __cnfn acosh(float2); +float3 __ovld __cnfn acosh(float3); +float4 __ovld __cnfn acosh(float4); +float8 __ovld __cnfn acosh(float8); +float16 __ovld __cnfn acosh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn acosh(double); +double2 __ovld __cnfn acosh(double2); +double3 __ovld __cnfn acosh(double3); +double4 __ovld __cnfn acosh(double4); +double8 __ovld __cnfn acosh(double8); +double16 __ovld __cnfn acosh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn acosh(half); +half2 __ovld __cnfn acosh(half2); +half3 __ovld __cnfn acosh(half3); +half4 __ovld __cnfn acosh(half4); +half8 __ovld __cnfn acosh(half8); +half16 __ovld __cnfn acosh(half16); +#endif //cl_khr_fp16 + +/** + * Compute acos (x) / PI. + */ +float __ovld __cnfn acospi(float x); +float2 __ovld __cnfn acospi(float2 x); +float3 __ovld __cnfn acospi(float3 x); +float4 __ovld __cnfn acospi(float4 x); +float8 __ovld __cnfn acospi(float8 x); +float16 __ovld __cnfn acospi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn acospi(double x); +double2 __ovld __cnfn acospi(double2 x); +double3 __ovld __cnfn acospi(double3 x); +double4 __ovld __cnfn acospi(double4 x); +double8 __ovld __cnfn acospi(double8 x); +double16 __ovld __cnfn acospi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn acospi(half x); +half2 __ovld __cnfn acospi(half2 x); +half3 __ovld __cnfn acospi(half3 x); +half4 __ovld __cnfn acospi(half4 x); +half8 __ovld __cnfn acospi(half8 x); +half16 __ovld __cnfn acospi(half16 x); +#endif //cl_khr_fp16 + +/** + * Arc sine function. + */ +float __ovld __cnfn asin(float); +float2 __ovld __cnfn asin(float2); +float3 __ovld __cnfn asin(float3); +float4 __ovld __cnfn asin(float4); +float8 __ovld __cnfn asin(float8); +float16 __ovld __cnfn asin(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn asin(double); +double2 __ovld __cnfn asin(double2); +double3 __ovld __cnfn asin(double3); +double4 __ovld __cnfn asin(double4); +double8 __ovld __cnfn asin(double8); +double16 __ovld __cnfn asin(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn asin(half); +half2 __ovld __cnfn asin(half2); +half3 __ovld __cnfn asin(half3); +half4 __ovld __cnfn asin(half4); +half8 __ovld __cnfn asin(half8); +half16 __ovld __cnfn asin(half16); +#endif //cl_khr_fp16 + +/** + * Inverse hyperbolic sine. + */ +float __ovld __cnfn asinh(float); +float2 __ovld __cnfn asinh(float2); +float3 __ovld __cnfn asinh(float3); +float4 __ovld __cnfn asinh(float4); +float8 __ovld __cnfn asinh(float8); +float16 __ovld __cnfn asinh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn asinh(double); +double2 __ovld __cnfn asinh(double2); +double3 __ovld __cnfn asinh(double3); +double4 __ovld __cnfn asinh(double4); +double8 __ovld __cnfn asinh(double8); +double16 __ovld __cnfn asinh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn asinh(half); +half2 __ovld __cnfn asinh(half2); +half3 __ovld __cnfn asinh(half3); +half4 __ovld __cnfn asinh(half4); +half8 __ovld __cnfn asinh(half8); +half16 __ovld __cnfn asinh(half16); +#endif //cl_khr_fp16 + +/** + * Compute asin (x) / PI. + */ +float __ovld __cnfn asinpi(float x); +float2 __ovld __cnfn asinpi(float2 x); +float3 __ovld __cnfn asinpi(float3 x); +float4 __ovld __cnfn asinpi(float4 x); +float8 __ovld __cnfn asinpi(float8 x); +float16 __ovld __cnfn asinpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn asinpi(double x); +double2 __ovld __cnfn asinpi(double2 x); +double3 __ovld __cnfn asinpi(double3 x); +double4 __ovld __cnfn asinpi(double4 x); +double8 __ovld __cnfn asinpi(double8 x); +double16 __ovld __cnfn asinpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn asinpi(half x); +half2 __ovld __cnfn asinpi(half2 x); +half3 __ovld __cnfn asinpi(half3 x); +half4 __ovld __cnfn asinpi(half4 x); +half8 __ovld __cnfn asinpi(half8 x); +half16 __ovld __cnfn asinpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Arc tangent function. + */ +float __ovld __cnfn atan(float y_over_x); +float2 __ovld __cnfn atan(float2 y_over_x); +float3 __ovld __cnfn atan(float3 y_over_x); +float4 __ovld __cnfn atan(float4 y_over_x); +float8 __ovld __cnfn atan(float8 y_over_x); +float16 __ovld __cnfn atan(float16 y_over_x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atan(double y_over_x); +double2 __ovld __cnfn atan(double2 y_over_x); +double3 __ovld __cnfn atan(double3 y_over_x); +double4 __ovld __cnfn atan(double4 y_over_x); +double8 __ovld __cnfn atan(double8 y_over_x); +double16 __ovld __cnfn atan(double16 y_over_x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atan(half y_over_x); +half2 __ovld __cnfn atan(half2 y_over_x); +half3 __ovld __cnfn atan(half3 y_over_x); +half4 __ovld __cnfn atan(half4 y_over_x); +half8 __ovld __cnfn atan(half8 y_over_x); +half16 __ovld __cnfn atan(half16 y_over_x); +#endif //cl_khr_fp16 + +/** + * Arc tangent of y / x. + */ +float __ovld __cnfn atan2(float y, float x); +float2 __ovld __cnfn atan2(float2 y, float2 x); +float3 __ovld __cnfn atan2(float3 y, float3 x); +float4 __ovld __cnfn atan2(float4 y, float4 x); +float8 __ovld __cnfn atan2(float8 y, float8 x); +float16 __ovld __cnfn atan2(float16 y, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atan2(double y, double x); +double2 __ovld __cnfn atan2(double2 y, double2 x); +double3 __ovld __cnfn atan2(double3 y, double3 x); +double4 __ovld __cnfn atan2(double4 y, double4 x); +double8 __ovld __cnfn atan2(double8 y, double8 x); +double16 __ovld __cnfn atan2(double16 y, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atan2(half y, half x); +half2 __ovld __cnfn atan2(half2 y, half2 x); +half3 __ovld __cnfn atan2(half3 y, half3 x); +half4 __ovld __cnfn atan2(half4 y, half4 x); +half8 __ovld __cnfn atan2(half8 y, half8 x); +half16 __ovld __cnfn atan2(half16 y, half16 x); +#endif //cl_khr_fp16 + +/** + * Hyperbolic arc tangent. + */ +float __ovld __cnfn atanh(float); +float2 __ovld __cnfn atanh(float2); +float3 __ovld __cnfn atanh(float3); +float4 __ovld __cnfn atanh(float4); +float8 __ovld __cnfn atanh(float8); +float16 __ovld __cnfn atanh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn atanh(double); +double2 __ovld __cnfn atanh(double2); +double3 __ovld __cnfn atanh(double3); +double4 __ovld __cnfn atanh(double4); +double8 __ovld __cnfn atanh(double8); +double16 __ovld __cnfn atanh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atanh(half); +half2 __ovld __cnfn atanh(half2); +half3 __ovld __cnfn atanh(half3); +half4 __ovld __cnfn atanh(half4); +half8 __ovld __cnfn atanh(half8); +half16 __ovld __cnfn atanh(half16); +#endif //cl_khr_fp16 + +/** + * Compute atan (x) / PI. + */ +float __ovld __cnfn atanpi(float x); +float2 __ovld __cnfn atanpi(float2 x); +float3 __ovld __cnfn atanpi(float3 x); +float4 __ovld __cnfn atanpi(float4 x); +float8 __ovld __cnfn atanpi(float8 x); +float16 __ovld __cnfn atanpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atanpi(double x); +double2 __ovld __cnfn atanpi(double2 x); +double3 __ovld __cnfn atanpi(double3 x); +double4 __ovld __cnfn atanpi(double4 x); +double8 __ovld __cnfn atanpi(double8 x); +double16 __ovld __cnfn atanpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atanpi(half x); +half2 __ovld __cnfn atanpi(half2 x); +half3 __ovld __cnfn atanpi(half3 x); +half4 __ovld __cnfn atanpi(half4 x); +half8 __ovld __cnfn atanpi(half8 x); +half16 __ovld __cnfn atanpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute atan2 (y, x) / PI. + */ +float __ovld __cnfn atan2pi(float y, float x); +float2 __ovld __cnfn atan2pi(float2 y, float2 x); +float3 __ovld __cnfn atan2pi(float3 y, float3 x); +float4 __ovld __cnfn atan2pi(float4 y, float4 x); +float8 __ovld __cnfn atan2pi(float8 y, float8 x); +float16 __ovld __cnfn atan2pi(float16 y, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atan2pi(double y, double x); +double2 __ovld __cnfn atan2pi(double2 y, double2 x); +double3 __ovld __cnfn atan2pi(double3 y, double3 x); +double4 __ovld __cnfn atan2pi(double4 y, double4 x); +double8 __ovld __cnfn atan2pi(double8 y, double8 x); +double16 __ovld __cnfn atan2pi(double16 y, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atan2pi(half y, half x); +half2 __ovld __cnfn atan2pi(half2 y, half2 x); +half3 __ovld __cnfn atan2pi(half3 y, half3 x); +half4 __ovld __cnfn atan2pi(half4 y, half4 x); +half8 __ovld __cnfn atan2pi(half8 y, half8 x); +half16 __ovld __cnfn atan2pi(half16 y, half16 x); +#endif //cl_khr_fp16 + +/** + * Compute cube-root. + */ +float __ovld __cnfn cbrt(float); +float2 __ovld __cnfn cbrt(float2); +float3 __ovld __cnfn cbrt(float3); +float4 __ovld __cnfn cbrt(float4); +float8 __ovld __cnfn cbrt(float8); +float16 __ovld __cnfn cbrt(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn cbrt(double); +double2 __ovld __cnfn cbrt(double2); +double3 __ovld __cnfn cbrt(double3); +double4 __ovld __cnfn cbrt(double4); +double8 __ovld __cnfn cbrt(double8); +double16 __ovld __cnfn cbrt(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cbrt(half); +half2 __ovld __cnfn cbrt(half2); +half3 __ovld __cnfn cbrt(half3); +half4 __ovld __cnfn cbrt(half4); +half8 __ovld __cnfn cbrt(half8); +half16 __ovld __cnfn cbrt(half16); +#endif //cl_khr_fp16 + +/** + * Round to integral value using the round to positive + * infinity rounding mode. + */ +float __ovld __cnfn ceil(float); +float2 __ovld __cnfn ceil(float2); +float3 __ovld __cnfn ceil(float3); +float4 __ovld __cnfn ceil(float4); +float8 __ovld __cnfn ceil(float8); +float16 __ovld __cnfn ceil(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn ceil(double); +double2 __ovld __cnfn ceil(double2); +double3 __ovld __cnfn ceil(double3); +double4 __ovld __cnfn ceil(double4); +double8 __ovld __cnfn ceil(double8); +double16 __ovld __cnfn ceil(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn ceil(half); +half2 __ovld __cnfn ceil(half2); +half3 __ovld __cnfn ceil(half3); +half4 __ovld __cnfn ceil(half4); +half8 __ovld __cnfn ceil(half8); +half16 __ovld __cnfn ceil(half16); +#endif //cl_khr_fp16 + +/** + * Returns x with its sign changed to match the sign of y. + */ +float __ovld __cnfn copysign(float x, float y); +float2 __ovld __cnfn copysign(float2 x, float2 y); +float3 __ovld __cnfn copysign(float3 x, float3 y); +float4 __ovld __cnfn copysign(float4 x, float4 y); +float8 __ovld __cnfn copysign(float8 x, float8 y); +float16 __ovld __cnfn copysign(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn copysign(double x, double y); +double2 __ovld __cnfn copysign(double2 x, double2 y); +double3 __ovld __cnfn copysign(double3 x, double3 y); +double4 __ovld __cnfn copysign(double4 x, double4 y); +double8 __ovld __cnfn copysign(double8 x, double8 y); +double16 __ovld __cnfn copysign(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn copysign(half x, half y); +half2 __ovld __cnfn copysign(half2 x, half2 y); +half3 __ovld __cnfn copysign(half3 x, half3 y); +half4 __ovld __cnfn copysign(half4 x, half4 y); +half8 __ovld __cnfn copysign(half8 x, half8 y); +half16 __ovld __cnfn copysign(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute cosine. + */ +float __ovld __cnfn cos(float); +float2 __ovld __cnfn cos(float2); +float3 __ovld __cnfn cos(float3); +float4 __ovld __cnfn cos(float4); +float8 __ovld __cnfn cos(float8); +float16 __ovld __cnfn cos(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn cos(double); +double2 __ovld __cnfn cos(double2); +double3 __ovld __cnfn cos(double3); +double4 __ovld __cnfn cos(double4); +double8 __ovld __cnfn cos(double8); +double16 __ovld __cnfn cos(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cos(half); +half2 __ovld __cnfn cos(half2); +half3 __ovld __cnfn cos(half3); +half4 __ovld __cnfn cos(half4); +half8 __ovld __cnfn cos(half8); +half16 __ovld __cnfn cos(half16); +#endif //cl_khr_fp16 + +/** + * Compute hyperbolic cosine. + */ +float __ovld __cnfn cosh(float); +float2 __ovld __cnfn cosh(float2); +float3 __ovld __cnfn cosh(float3); +float4 __ovld __cnfn cosh(float4); +float8 __ovld __cnfn cosh(float8); +float16 __ovld __cnfn cosh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn cosh(double); +double2 __ovld __cnfn cosh(double2); +double3 __ovld __cnfn cosh(double3); +double4 __ovld __cnfn cosh(double4); +double8 __ovld __cnfn cosh(double8); +double16 __ovld __cnfn cosh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cosh(half); +half2 __ovld __cnfn cosh(half2); +half3 __ovld __cnfn cosh(half3); +half4 __ovld __cnfn cosh(half4); +half8 __ovld __cnfn cosh(half8); +half16 __ovld __cnfn cosh(half16); +#endif //cl_khr_fp16 + +/** + * Compute cos (PI * x). + */ +float __ovld __cnfn cospi(float x); +float2 __ovld __cnfn cospi(float2 x); +float3 __ovld __cnfn cospi(float3 x); +float4 __ovld __cnfn cospi(float4 x); +float8 __ovld __cnfn cospi(float8 x); +float16 __ovld __cnfn cospi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn cospi(double x); +double2 __ovld __cnfn cospi(double2 x); +double3 __ovld __cnfn cospi(double3 x); +double4 __ovld __cnfn cospi(double4 x); +double8 __ovld __cnfn cospi(double8 x); +double16 __ovld __cnfn cospi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cospi(half x); +half2 __ovld __cnfn cospi(half2 x); +half3 __ovld __cnfn cospi(half3 x); +half4 __ovld __cnfn cospi(half4 x); +half8 __ovld __cnfn cospi(half8 x); +half16 __ovld __cnfn cospi(half16 x); +#endif //cl_khr_fp16 + +/** + * Complementary error function. + */ +float __ovld __cnfn erfc(float); +float2 __ovld __cnfn erfc(float2); +float3 __ovld __cnfn erfc(float3); +float4 __ovld __cnfn erfc(float4); +float8 __ovld __cnfn erfc(float8); +float16 __ovld __cnfn erfc(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn erfc(double); +double2 __ovld __cnfn erfc(double2); +double3 __ovld __cnfn erfc(double3); +double4 __ovld __cnfn erfc(double4); +double8 __ovld __cnfn erfc(double8); +double16 __ovld __cnfn erfc(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn erfc(half); +half2 __ovld __cnfn erfc(half2); +half3 __ovld __cnfn erfc(half3); +half4 __ovld __cnfn erfc(half4); +half8 __ovld __cnfn erfc(half8); +half16 __ovld __cnfn erfc(half16); +#endif //cl_khr_fp16 + +/** + * Error function encountered in integrating the + * normal distribution. + */ +float __ovld __cnfn erf(float); +float2 __ovld __cnfn erf(float2); +float3 __ovld __cnfn erf(float3); +float4 __ovld __cnfn erf(float4); +float8 __ovld __cnfn erf(float8); +float16 __ovld __cnfn erf(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn erf(double); +double2 __ovld __cnfn erf(double2); +double3 __ovld __cnfn erf(double3); +double4 __ovld __cnfn erf(double4); +double8 __ovld __cnfn erf(double8); +double16 __ovld __cnfn erf(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn erf(half); +half2 __ovld __cnfn erf(half2); +half3 __ovld __cnfn erf(half3); +half4 __ovld __cnfn erf(half4); +half8 __ovld __cnfn erf(half8); +half16 __ovld __cnfn erf(half16); +#endif //cl_khr_fp16 + +/** + * Compute the base e exponential function of x. + */ +float __ovld __cnfn exp(float x); +float2 __ovld __cnfn exp(float2 x); +float3 __ovld __cnfn exp(float3 x); +float4 __ovld __cnfn exp(float4 x); +float8 __ovld __cnfn exp(float8 x); +float16 __ovld __cnfn exp(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn exp(double x); +double2 __ovld __cnfn exp(double2 x); +double3 __ovld __cnfn exp(double3 x); +double4 __ovld __cnfn exp(double4 x); +double8 __ovld __cnfn exp(double8 x); +double16 __ovld __cnfn exp(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn exp(half x); +half2 __ovld __cnfn exp(half2 x); +half3 __ovld __cnfn exp(half3 x); +half4 __ovld __cnfn exp(half4 x); +half8 __ovld __cnfn exp(half8 x); +half16 __ovld __cnfn exp(half16 x); +#endif //cl_khr_fp16 + +/** + * Exponential base 2 function. + */ +float __ovld __cnfn exp2(float); +float2 __ovld __cnfn exp2(float2); +float3 __ovld __cnfn exp2(float3); +float4 __ovld __cnfn exp2(float4); +float8 __ovld __cnfn exp2(float8); +float16 __ovld __cnfn exp2(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn exp2(double); +double2 __ovld __cnfn exp2(double2); +double3 __ovld __cnfn exp2(double3); +double4 __ovld __cnfn exp2(double4); +double8 __ovld __cnfn exp2(double8); +double16 __ovld __cnfn exp2(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn exp2(half); +half2 __ovld __cnfn exp2(half2); +half3 __ovld __cnfn exp2(half3); +half4 __ovld __cnfn exp2(half4); +half8 __ovld __cnfn exp2(half8); +half16 __ovld __cnfn exp2(half16); +#endif //cl_khr_fp16 + +/** + * Exponential base 10 function. + */ +float __ovld __cnfn exp10(float); +float2 __ovld __cnfn exp10(float2); +float3 __ovld __cnfn exp10(float3); +float4 __ovld __cnfn exp10(float4); +float8 __ovld __cnfn exp10(float8); +float16 __ovld __cnfn exp10(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn exp10(double); +double2 __ovld __cnfn exp10(double2); +double3 __ovld __cnfn exp10(double3); +double4 __ovld __cnfn exp10(double4); +double8 __ovld __cnfn exp10(double8); +double16 __ovld __cnfn exp10(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn exp10(half); +half2 __ovld __cnfn exp10(half2); +half3 __ovld __cnfn exp10(half3); +half4 __ovld __cnfn exp10(half4); +half8 __ovld __cnfn exp10(half8); +half16 __ovld __cnfn exp10(half16); +#endif //cl_khr_fp16 + +/** + * Compute e^x- 1.0. + */ +float __ovld __cnfn expm1(float x); +float2 __ovld __cnfn expm1(float2 x); +float3 __ovld __cnfn expm1(float3 x); +float4 __ovld __cnfn expm1(float4 x); +float8 __ovld __cnfn expm1(float8 x); +float16 __ovld __cnfn expm1(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn expm1(double x); +double2 __ovld __cnfn expm1(double2 x); +double3 __ovld __cnfn expm1(double3 x); +double4 __ovld __cnfn expm1(double4 x); +double8 __ovld __cnfn expm1(double8 x); +double16 __ovld __cnfn expm1(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn expm1(half x); +half2 __ovld __cnfn expm1(half2 x); +half3 __ovld __cnfn expm1(half3 x); +half4 __ovld __cnfn expm1(half4 x); +half8 __ovld __cnfn expm1(half8 x); +half16 __ovld __cnfn expm1(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute absolute value of a floating-point number. + */ +float __ovld __cnfn fabs(float); +float2 __ovld __cnfn fabs(float2); +float3 __ovld __cnfn fabs(float3); +float4 __ovld __cnfn fabs(float4); +float8 __ovld __cnfn fabs(float8); +float16 __ovld __cnfn fabs(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn fabs(double); +double2 __ovld __cnfn fabs(double2); +double3 __ovld __cnfn fabs(double3); +double4 __ovld __cnfn fabs(double4); +double8 __ovld __cnfn fabs(double8); +double16 __ovld __cnfn fabs(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fabs(half); +half2 __ovld __cnfn fabs(half2); +half3 __ovld __cnfn fabs(half3); +half4 __ovld __cnfn fabs(half4); +half8 __ovld __cnfn fabs(half8); +half16 __ovld __cnfn fabs(half16); +#endif //cl_khr_fp16 + +/** + * x - y if x > y, +0 if x is less than or equal to y. + */ +float __ovld __cnfn fdim(float x, float y); +float2 __ovld __cnfn fdim(float2 x, float2 y); +float3 __ovld __cnfn fdim(float3 x, float3 y); +float4 __ovld __cnfn fdim(float4 x, float4 y); +float8 __ovld __cnfn fdim(float8 x, float8 y); +float16 __ovld __cnfn fdim(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fdim(double x, double y); +double2 __ovld __cnfn fdim(double2 x, double2 y); +double3 __ovld __cnfn fdim(double3 x, double3 y); +double4 __ovld __cnfn fdim(double4 x, double4 y); +double8 __ovld __cnfn fdim(double8 x, double8 y); +double16 __ovld __cnfn fdim(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fdim(half x, half y); +half2 __ovld __cnfn fdim(half2 x, half2 y); +half3 __ovld __cnfn fdim(half3 x, half3 y); +half4 __ovld __cnfn fdim(half4 x, half4 y); +half8 __ovld __cnfn fdim(half8 x, half8 y); +half16 __ovld __cnfn fdim(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Round to integral value using the round to -ve + * infinity rounding mode. + */ +float __ovld __cnfn floor(float); +float2 __ovld __cnfn floor(float2); +float3 __ovld __cnfn floor(float3); +float4 __ovld __cnfn floor(float4); +float8 __ovld __cnfn floor(float8); +float16 __ovld __cnfn floor(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn floor(double); +double2 __ovld __cnfn floor(double2); +double3 __ovld __cnfn floor(double3); +double4 __ovld __cnfn floor(double4); +double8 __ovld __cnfn floor(double8); +double16 __ovld __cnfn floor(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn floor(half); +half2 __ovld __cnfn floor(half2); +half3 __ovld __cnfn floor(half3); +half4 __ovld __cnfn floor(half4); +half8 __ovld __cnfn floor(half8); +half16 __ovld __cnfn floor(half16); +#endif //cl_khr_fp16 + +/** + * Returns the correctly rounded floating-point + * representation of the sum of c with the infinitely + * precise product of a and b. Rounding of + * intermediate products shall not occur. Edge case + * behavior is per the IEEE 754-2008 standard. + */ +float __ovld __cnfn fma(float a, float b, float c); +float2 __ovld __cnfn fma(float2 a, float2 b, float2 c); +float3 __ovld __cnfn fma(float3 a, float3 b, float3 c); +float4 __ovld __cnfn fma(float4 a, float4 b, float4 c); +float8 __ovld __cnfn fma(float8 a, float8 b, float8 c); +float16 __ovld __cnfn fma(float16 a, float16 b, float16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn fma(double a, double b, double c); +double2 __ovld __cnfn fma(double2 a, double2 b, double2 c); +double3 __ovld __cnfn fma(double3 a, double3 b, double3 c); +double4 __ovld __cnfn fma(double4 a, double4 b, double4 c); +double8 __ovld __cnfn fma(double8 a, double8 b, double8 c); +double16 __ovld __cnfn fma(double16 a, double16 b, double16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fma(half a, half b, half c); +half2 __ovld __cnfn fma(half2 a, half2 b, half2 c); +half3 __ovld __cnfn fma(half3 a, half3 b, half3 c); +half4 __ovld __cnfn fma(half4 a, half4 b, half4 c); +half8 __ovld __cnfn fma(half8 a, half8 b, half8 c); +half16 __ovld __cnfn fma(half16 a, half16 b, half16 c); +#endif //cl_khr_fp16 + +/** + * Returns y if x < y, otherwise it returns x. If one + * argument is a NaN, fmax() returns the other + * argument. If both arguments are NaNs, fmax() + * returns a NaN. + */ +float __ovld __cnfn fmax(float x, float y); +float2 __ovld __cnfn fmax(float2 x, float2 y); +float3 __ovld __cnfn fmax(float3 x, float3 y); +float4 __ovld __cnfn fmax(float4 x, float4 y); +float8 __ovld __cnfn fmax(float8 x, float8 y); +float16 __ovld __cnfn fmax(float16 x, float16 y); +float2 __ovld __cnfn fmax(float2 x, float y); +float3 __ovld __cnfn fmax(float3 x, float y); +float4 __ovld __cnfn fmax(float4 x, float y); +float8 __ovld __cnfn fmax(float8 x, float y); +float16 __ovld __cnfn fmax(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fmax(double x, double y); +double2 __ovld __cnfn fmax(double2 x, double2 y); +double3 __ovld __cnfn fmax(double3 x, double3 y); +double4 __ovld __cnfn fmax(double4 x, double4 y); +double8 __ovld __cnfn fmax(double8 x, double8 y); +double16 __ovld __cnfn fmax(double16 x, double16 y); +double2 __ovld __cnfn fmax(double2 x, double y); +double3 __ovld __cnfn fmax(double3 x, double y); +double4 __ovld __cnfn fmax(double4 x, double y); +double8 __ovld __cnfn fmax(double8 x, double y); +double16 __ovld __cnfn fmax(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fmax(half x, half y); +half2 __ovld __cnfn fmax(half2 x, half2 y); +half3 __ovld __cnfn fmax(half3 x, half3 y); +half4 __ovld __cnfn fmax(half4 x, half4 y); +half8 __ovld __cnfn fmax(half8 x, half8 y); +half16 __ovld __cnfn fmax(half16 x, half16 y); +half2 __ovld __cnfn fmax(half2 x, half y); +half3 __ovld __cnfn fmax(half3 x, half y); +half4 __ovld __cnfn fmax(half4 x, half y); +half8 __ovld __cnfn fmax(half8 x, half y); +half16 __ovld __cnfn fmax(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Returns y if y < x, otherwise it returns x. If one + * argument is a NaN, fmin() returns the other + * argument. If both arguments are NaNs, fmin() + * returns a NaN. + */ +float __ovld __cnfn fmin(float x, float y); +float2 __ovld __cnfn fmin(float2 x, float2 y); +float3 __ovld __cnfn fmin(float3 x, float3 y); +float4 __ovld __cnfn fmin(float4 x, float4 y); +float8 __ovld __cnfn fmin(float8 x, float8 y); +float16 __ovld __cnfn fmin(float16 x, float16 y); +float2 __ovld __cnfn fmin(float2 x, float y); +float3 __ovld __cnfn fmin(float3 x, float y); +float4 __ovld __cnfn fmin(float4 x, float y); +float8 __ovld __cnfn fmin(float8 x, float y); +float16 __ovld __cnfn fmin(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fmin(double x, double y); +double2 __ovld __cnfn fmin(double2 x, double2 y); +double3 __ovld __cnfn fmin(double3 x, double3 y); +double4 __ovld __cnfn fmin(double4 x, double4 y); +double8 __ovld __cnfn fmin(double8 x, double8 y); +double16 __ovld __cnfn fmin(double16 x, double16 y); +double2 __ovld __cnfn fmin(double2 x, double y); +double3 __ovld __cnfn fmin(double3 x, double y); +double4 __ovld __cnfn fmin(double4 x, double y); +double8 __ovld __cnfn fmin(double8 x, double y); +double16 __ovld __cnfn fmin(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fmin(half x, half y); +half2 __ovld __cnfn fmin(half2 x, half2 y); +half3 __ovld __cnfn fmin(half3 x, half3 y); +half4 __ovld __cnfn fmin(half4 x, half4 y); +half8 __ovld __cnfn fmin(half8 x, half8 y); +half16 __ovld __cnfn fmin(half16 x, half16 y); +half2 __ovld __cnfn fmin(half2 x, half y); +half3 __ovld __cnfn fmin(half3 x, half y); +half4 __ovld __cnfn fmin(half4 x, half y); +half8 __ovld __cnfn fmin(half8 x, half y); +half16 __ovld __cnfn fmin(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Modulus. Returns x - y * trunc (x/y). + */ +float __ovld __cnfn fmod(float x, float y); +float2 __ovld __cnfn fmod(float2 x, float2 y); +float3 __ovld __cnfn fmod(float3 x, float3 y); +float4 __ovld __cnfn fmod(float4 x, float4 y); +float8 __ovld __cnfn fmod(float8 x, float8 y); +float16 __ovld __cnfn fmod(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fmod(double x, double y); +double2 __ovld __cnfn fmod(double2 x, double2 y); +double3 __ovld __cnfn fmod(double3 x, double3 y); +double4 __ovld __cnfn fmod(double4 x, double4 y); +double8 __ovld __cnfn fmod(double8 x, double8 y); +double16 __ovld __cnfn fmod(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fmod(half x, half y); +half2 __ovld __cnfn fmod(half2 x, half2 y); +half3 __ovld __cnfn fmod(half3 x, half3 y); +half4 __ovld __cnfn fmod(half4 x, half4 y); +half8 __ovld __cnfn fmod(half8 x, half8 y); +half16 __ovld __cnfn fmod(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns fmin(x - floor (x), 0x1.fffffep-1f ). + * floor(x) is returned in iptr. + */ +#if defined(__opencl_c_generic_address_space) +float __ovld fract(float x, float *iptr); +float2 __ovld fract(float2 x, float2 *iptr); +float3 __ovld fract(float3 x, float3 *iptr); +float4 __ovld fract(float4 x, float4 *iptr); +float8 __ovld fract(float8 x, float8 *iptr); +float16 __ovld fract(float16 x, float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld fract(double x, double *iptr); +double2 __ovld fract(double2 x, double2 *iptr); +double3 __ovld fract(double3 x, double3 *iptr); +double4 __ovld fract(double4 x, double4 *iptr); +double8 __ovld fract(double8 x, double8 *iptr); +double16 __ovld fract(double16 x, double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld fract(half x, half *iptr); +half2 __ovld fract(half2 x, half2 *iptr); +half3 __ovld fract(half3 x, half3 *iptr); +half4 __ovld fract(half4 x, half4 *iptr); +half8 __ovld fract(half8 x, half8 *iptr); +half16 __ovld fract(half16 x, half16 *iptr); +#endif //cl_khr_fp16 +#else +float __ovld fract(float x, __global float *iptr); +float2 __ovld fract(float2 x, __global float2 *iptr); +float3 __ovld fract(float3 x, __global float3 *iptr); +float4 __ovld fract(float4 x, __global float4 *iptr); +float8 __ovld fract(float8 x, __global float8 *iptr); +float16 __ovld fract(float16 x, __global float16 *iptr); +float __ovld fract(float x, __local float *iptr); +float2 __ovld fract(float2 x, __local float2 *iptr); +float3 __ovld fract(float3 x, __local float3 *iptr); +float4 __ovld fract(float4 x, __local float4 *iptr); +float8 __ovld fract(float8 x, __local float8 *iptr); +float16 __ovld fract(float16 x, __local float16 *iptr); +float __ovld fract(float x, __private float *iptr); +float2 __ovld fract(float2 x, __private float2 *iptr); +float3 __ovld fract(float3 x, __private float3 *iptr); +float4 __ovld fract(float4 x, __private float4 *iptr); +float8 __ovld fract(float8 x, __private float8 *iptr); +float16 __ovld fract(float16 x, __private float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld fract(double x, __global double *iptr); +double2 __ovld fract(double2 x, __global double2 *iptr); +double3 __ovld fract(double3 x, __global double3 *iptr); +double4 __ovld fract(double4 x, __global double4 *iptr); +double8 __ovld fract(double8 x, __global double8 *iptr); +double16 __ovld fract(double16 x, __global double16 *iptr); +double __ovld fract(double x, __local double *iptr); +double2 __ovld fract(double2 x, __local double2 *iptr); +double3 __ovld fract(double3 x, __local double3 *iptr); +double4 __ovld fract(double4 x, __local double4 *iptr); +double8 __ovld fract(double8 x, __local double8 *iptr); +double16 __ovld fract(double16 x, __local double16 *iptr); +double __ovld fract(double x, __private double *iptr); +double2 __ovld fract(double2 x, __private double2 *iptr); +double3 __ovld fract(double3 x, __private double3 *iptr); +double4 __ovld fract(double4 x, __private double4 *iptr); +double8 __ovld fract(double8 x, __private double8 *iptr); +double16 __ovld fract(double16 x, __private double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld fract(half x, __global half *iptr); +half2 __ovld fract(half2 x, __global half2 *iptr); +half3 __ovld fract(half3 x, __global half3 *iptr); +half4 __ovld fract(half4 x, __global half4 *iptr); +half8 __ovld fract(half8 x, __global half8 *iptr); +half16 __ovld fract(half16 x, __global half16 *iptr); +half __ovld fract(half x, __local half *iptr); +half2 __ovld fract(half2 x, __local half2 *iptr); +half3 __ovld fract(half3 x, __local half3 *iptr); +half4 __ovld fract(half4 x, __local half4 *iptr); +half8 __ovld fract(half8 x, __local half8 *iptr); +half16 __ovld fract(half16 x, __local half16 *iptr); +half __ovld fract(half x, __private half *iptr); +half2 __ovld fract(half2 x, __private half2 *iptr); +half3 __ovld fract(half3 x, __private half3 *iptr); +half4 __ovld fract(half4 x, __private half4 *iptr); +half8 __ovld fract(half8 x, __private half8 *iptr); +half16 __ovld fract(half16 x, __private half16 *iptr); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +/** + * Extract mantissa and exponent from x. For each + * component the mantissa returned is a float with + * magnitude in the interval [1/2, 1) or 0. Each + * component of x equals mantissa returned * 2^exp. + */ +#if defined(__opencl_c_generic_address_space) +float __ovld frexp(float x, int *exp); +float2 __ovld frexp(float2 x, int2 *exp); +float3 __ovld frexp(float3 x, int3 *exp); +float4 __ovld frexp(float4 x, int4 *exp); +float8 __ovld frexp(float8 x, int8 *exp); +float16 __ovld frexp(float16 x, int16 *exp); +#ifdef cl_khr_fp64 +double __ovld frexp(double x, int *exp); +double2 __ovld frexp(double2 x, int2 *exp); +double3 __ovld frexp(double3 x, int3 *exp); +double4 __ovld frexp(double4 x, int4 *exp); +double8 __ovld frexp(double8 x, int8 *exp); +double16 __ovld frexp(double16 x, int16 *exp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld frexp(half x, int *exp); +half2 __ovld frexp(half2 x, int2 *exp); +half3 __ovld frexp(half3 x, int3 *exp); +half4 __ovld frexp(half4 x, int4 *exp); +half8 __ovld frexp(half8 x, int8 *exp); +half16 __ovld frexp(half16 x, int16 *exp); +#endif //cl_khr_fp16 +#else +float __ovld frexp(float x, __global int *exp); +float2 __ovld frexp(float2 x, __global int2 *exp); +float3 __ovld frexp(float3 x, __global int3 *exp); +float4 __ovld frexp(float4 x, __global int4 *exp); +float8 __ovld frexp(float8 x, __global int8 *exp); +float16 __ovld frexp(float16 x, __global int16 *exp); +float __ovld frexp(float x, __local int *exp); +float2 __ovld frexp(float2 x, __local int2 *exp); +float3 __ovld frexp(float3 x, __local int3 *exp); +float4 __ovld frexp(float4 x, __local int4 *exp); +float8 __ovld frexp(float8 x, __local int8 *exp); +float16 __ovld frexp(float16 x, __local int16 *exp); +float __ovld frexp(float x, __private int *exp); +float2 __ovld frexp(float2 x, __private int2 *exp); +float3 __ovld frexp(float3 x, __private int3 *exp); +float4 __ovld frexp(float4 x, __private int4 *exp); +float8 __ovld frexp(float8 x, __private int8 *exp); +float16 __ovld frexp(float16 x, __private int16 *exp); +#ifdef cl_khr_fp64 +double __ovld frexp(double x, __global int *exp); +double2 __ovld frexp(double2 x, __global int2 *exp); +double3 __ovld frexp(double3 x, __global int3 *exp); +double4 __ovld frexp(double4 x, __global int4 *exp); +double8 __ovld frexp(double8 x, __global int8 *exp); +double16 __ovld frexp(double16 x, __global int16 *exp); +double __ovld frexp(double x, __local int *exp); +double2 __ovld frexp(double2 x, __local int2 *exp); +double3 __ovld frexp(double3 x, __local int3 *exp); +double4 __ovld frexp(double4 x, __local int4 *exp); +double8 __ovld frexp(double8 x, __local int8 *exp); +double16 __ovld frexp(double16 x, __local int16 *exp); +double __ovld frexp(double x, __private int *exp); +double2 __ovld frexp(double2 x, __private int2 *exp); +double3 __ovld frexp(double3 x, __private int3 *exp); +double4 __ovld frexp(double4 x, __private int4 *exp); +double8 __ovld frexp(double8 x, __private int8 *exp); +double16 __ovld frexp(double16 x, __private int16 *exp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld frexp(half x, __global int *exp); +half2 __ovld frexp(half2 x, __global int2 *exp); +half3 __ovld frexp(half3 x, __global int3 *exp); +half4 __ovld frexp(half4 x, __global int4 *exp); +half8 __ovld frexp(half8 x, __global int8 *exp); +half16 __ovld frexp(half16 x, __global int16 *exp); +half __ovld frexp(half x, __local int *exp); +half2 __ovld frexp(half2 x, __local int2 *exp); +half3 __ovld frexp(half3 x, __local int3 *exp); +half4 __ovld frexp(half4 x, __local int4 *exp); +half8 __ovld frexp(half8 x, __local int8 *exp); +half16 __ovld frexp(half16 x, __local int16 *exp); +half __ovld frexp(half x, __private int *exp); +half2 __ovld frexp(half2 x, __private int2 *exp); +half3 __ovld frexp(half3 x, __private int3 *exp); +half4 __ovld frexp(half4 x, __private int4 *exp); +half8 __ovld frexp(half8 x, __private int8 *exp); +half16 __ovld frexp(half16 x, __private int16 *exp); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +/** + * Compute the value of the square root of x^2 + y^2 + * without undue overflow or underflow. + */ +float __ovld __cnfn hypot(float x, float y); +float2 __ovld __cnfn hypot(float2 x, float2 y); +float3 __ovld __cnfn hypot(float3 x, float3 y); +float4 __ovld __cnfn hypot(float4 x, float4 y); +float8 __ovld __cnfn hypot(float8 x, float8 y); +float16 __ovld __cnfn hypot(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn hypot(double x, double y); +double2 __ovld __cnfn hypot(double2 x, double2 y); +double3 __ovld __cnfn hypot(double3 x, double3 y); +double4 __ovld __cnfn hypot(double4 x, double4 y); +double8 __ovld __cnfn hypot(double8 x, double8 y); +double16 __ovld __cnfn hypot(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn hypot(half x, half y); +half2 __ovld __cnfn hypot(half2 x, half2 y); +half3 __ovld __cnfn hypot(half3 x, half3 y); +half4 __ovld __cnfn hypot(half4 x, half4 y); +half8 __ovld __cnfn hypot(half8 x, half8 y); +half16 __ovld __cnfn hypot(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Return the exponent as an integer value. + */ +int __ovld __cnfn ilogb(float x); +int2 __ovld __cnfn ilogb(float2 x); +int3 __ovld __cnfn ilogb(float3 x); +int4 __ovld __cnfn ilogb(float4 x); +int8 __ovld __cnfn ilogb(float8 x); +int16 __ovld __cnfn ilogb(float16 x); +#ifdef cl_khr_fp64 +int __ovld __cnfn ilogb(double x); +int2 __ovld __cnfn ilogb(double2 x); +int3 __ovld __cnfn ilogb(double3 x); +int4 __ovld __cnfn ilogb(double4 x); +int8 __ovld __cnfn ilogb(double8 x); +int16 __ovld __cnfn ilogb(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn ilogb(half x); +int2 __ovld __cnfn ilogb(half2 x); +int3 __ovld __cnfn ilogb(half3 x); +int4 __ovld __cnfn ilogb(half4 x); +int8 __ovld __cnfn ilogb(half8 x); +int16 __ovld __cnfn ilogb(half16 x); +#endif //cl_khr_fp16 + +/** + * Multiply x by 2 to the power n. + */ +float __ovld __cnfn ldexp(float x, int n); +float2 __ovld __cnfn ldexp(float2 x, int2 n); +float3 __ovld __cnfn ldexp(float3 x, int3 n); +float4 __ovld __cnfn ldexp(float4 x, int4 n); +float8 __ovld __cnfn ldexp(float8 x, int8 n); +float16 __ovld __cnfn ldexp(float16 x, int16 n); +float2 __ovld __cnfn ldexp(float2 x, int n); +float3 __ovld __cnfn ldexp(float3 x, int n); +float4 __ovld __cnfn ldexp(float4 x, int n); +float8 __ovld __cnfn ldexp(float8 x, int n); +float16 __ovld __cnfn ldexp(float16 x, int n); +#ifdef cl_khr_fp64 +double __ovld __cnfn ldexp(double x, int n); +double2 __ovld __cnfn ldexp(double2 x, int2 n); +double3 __ovld __cnfn ldexp(double3 x, int3 n); +double4 __ovld __cnfn ldexp(double4 x, int4 n); +double8 __ovld __cnfn ldexp(double8 x, int8 n); +double16 __ovld __cnfn ldexp(double16 x, int16 n); +double2 __ovld __cnfn ldexp(double2 x, int n); +double3 __ovld __cnfn ldexp(double3 x, int n); +double4 __ovld __cnfn ldexp(double4 x, int n); +double8 __ovld __cnfn ldexp(double8 x, int n); +double16 __ovld __cnfn ldexp(double16 x, int n); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn ldexp(half x, int n); +half2 __ovld __cnfn ldexp(half2 x, int2 n); +half3 __ovld __cnfn ldexp(half3 x, int3 n); +half4 __ovld __cnfn ldexp(half4 x, int4 n); +half8 __ovld __cnfn ldexp(half8 x, int8 n); +half16 __ovld __cnfn ldexp(half16 x, int16 n); +half2 __ovld __cnfn ldexp(half2 x, int n); +half3 __ovld __cnfn ldexp(half3 x, int n); +half4 __ovld __cnfn ldexp(half4 x, int n); +half8 __ovld __cnfn ldexp(half8 x, int n); +half16 __ovld __cnfn ldexp(half16 x, int n); +#endif //cl_khr_fp16 + +/** + * Log gamma function. Returns the natural + * logarithm of the absolute value of the gamma + * function. The sign of the gamma function is + * returned in the signp argument of lgamma_r. + */ +float __ovld __cnfn lgamma(float x); +float2 __ovld __cnfn lgamma(float2 x); +float3 __ovld __cnfn lgamma(float3 x); +float4 __ovld __cnfn lgamma(float4 x); +float8 __ovld __cnfn lgamma(float8 x); +float16 __ovld __cnfn lgamma(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn lgamma(double x); +double2 __ovld __cnfn lgamma(double2 x); +double3 __ovld __cnfn lgamma(double3 x); +double4 __ovld __cnfn lgamma(double4 x); +double8 __ovld __cnfn lgamma(double8 x); +double16 __ovld __cnfn lgamma(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn lgamma(half x); +half2 __ovld __cnfn lgamma(half2 x); +half3 __ovld __cnfn lgamma(half3 x); +half4 __ovld __cnfn lgamma(half4 x); +half8 __ovld __cnfn lgamma(half8 x); +half16 __ovld __cnfn lgamma(half16 x); +#endif //cl_khr_fp16 + +#if defined(__opencl_c_generic_address_space) +float __ovld lgamma_r(float x, int *signp); +float2 __ovld lgamma_r(float2 x, int2 *signp); +float3 __ovld lgamma_r(float3 x, int3 *signp); +float4 __ovld lgamma_r(float4 x, int4 *signp); +float8 __ovld lgamma_r(float8 x, int8 *signp); +float16 __ovld lgamma_r(float16 x, int16 *signp); +#ifdef cl_khr_fp64 +double __ovld lgamma_r(double x, int *signp); +double2 __ovld lgamma_r(double2 x, int2 *signp); +double3 __ovld lgamma_r(double3 x, int3 *signp); +double4 __ovld lgamma_r(double4 x, int4 *signp); +double8 __ovld lgamma_r(double8 x, int8 *signp); +double16 __ovld lgamma_r(double16 x, int16 *signp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld lgamma_r(half x, int *signp); +half2 __ovld lgamma_r(half2 x, int2 *signp); +half3 __ovld lgamma_r(half3 x, int3 *signp); +half4 __ovld lgamma_r(half4 x, int4 *signp); +half8 __ovld lgamma_r(half8 x, int8 *signp); +half16 __ovld lgamma_r(half16 x, int16 *signp); +#endif //cl_khr_fp16 +#else +float __ovld lgamma_r(float x, __global int *signp); +float2 __ovld lgamma_r(float2 x, __global int2 *signp); +float3 __ovld lgamma_r(float3 x, __global int3 *signp); +float4 __ovld lgamma_r(float4 x, __global int4 *signp); +float8 __ovld lgamma_r(float8 x, __global int8 *signp); +float16 __ovld lgamma_r(float16 x, __global int16 *signp); +float __ovld lgamma_r(float x, __local int *signp); +float2 __ovld lgamma_r(float2 x, __local int2 *signp); +float3 __ovld lgamma_r(float3 x, __local int3 *signp); +float4 __ovld lgamma_r(float4 x, __local int4 *signp); +float8 __ovld lgamma_r(float8 x, __local int8 *signp); +float16 __ovld lgamma_r(float16 x, __local int16 *signp); +float __ovld lgamma_r(float x, __private int *signp); +float2 __ovld lgamma_r(float2 x, __private int2 *signp); +float3 __ovld lgamma_r(float3 x, __private int3 *signp); +float4 __ovld lgamma_r(float4 x, __private int4 *signp); +float8 __ovld lgamma_r(float8 x, __private int8 *signp); +float16 __ovld lgamma_r(float16 x, __private int16 *signp); +#ifdef cl_khr_fp64 +double __ovld lgamma_r(double x, __global int *signp); +double2 __ovld lgamma_r(double2 x, __global int2 *signp); +double3 __ovld lgamma_r(double3 x, __global int3 *signp); +double4 __ovld lgamma_r(double4 x, __global int4 *signp); +double8 __ovld lgamma_r(double8 x, __global int8 *signp); +double16 __ovld lgamma_r(double16 x, __global int16 *signp); +double __ovld lgamma_r(double x, __local int *signp); +double2 __ovld lgamma_r(double2 x, __local int2 *signp); +double3 __ovld lgamma_r(double3 x, __local int3 *signp); +double4 __ovld lgamma_r(double4 x, __local int4 *signp); +double8 __ovld lgamma_r(double8 x, __local int8 *signp); +double16 __ovld lgamma_r(double16 x, __local int16 *signp); +double __ovld lgamma_r(double x, __private int *signp); +double2 __ovld lgamma_r(double2 x, __private int2 *signp); +double3 __ovld lgamma_r(double3 x, __private int3 *signp); +double4 __ovld lgamma_r(double4 x, __private int4 *signp); +double8 __ovld lgamma_r(double8 x, __private int8 *signp); +double16 __ovld lgamma_r(double16 x, __private int16 *signp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld lgamma_r(half x, __global int *signp); +half2 __ovld lgamma_r(half2 x, __global int2 *signp); +half3 __ovld lgamma_r(half3 x, __global int3 *signp); +half4 __ovld lgamma_r(half4 x, __global int4 *signp); +half8 __ovld lgamma_r(half8 x, __global int8 *signp); +half16 __ovld lgamma_r(half16 x, __global int16 *signp); +half __ovld lgamma_r(half x, __local int *signp); +half2 __ovld lgamma_r(half2 x, __local int2 *signp); +half3 __ovld lgamma_r(half3 x, __local int3 *signp); +half4 __ovld lgamma_r(half4 x, __local int4 *signp); +half8 __ovld lgamma_r(half8 x, __local int8 *signp); +half16 __ovld lgamma_r(half16 x, __local int16 *signp); +half __ovld lgamma_r(half x, __private int *signp); +half2 __ovld lgamma_r(half2 x, __private int2 *signp); +half3 __ovld lgamma_r(half3 x, __private int3 *signp); +half4 __ovld lgamma_r(half4 x, __private int4 *signp); +half8 __ovld lgamma_r(half8 x, __private int8 *signp); +half16 __ovld lgamma_r(half16 x, __private int16 *signp); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +/** + * Compute natural logarithm. + */ +float __ovld __cnfn log(float); +float2 __ovld __cnfn log(float2); +float3 __ovld __cnfn log(float3); +float4 __ovld __cnfn log(float4); +float8 __ovld __cnfn log(float8); +float16 __ovld __cnfn log(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn log(double); +double2 __ovld __cnfn log(double2); +double3 __ovld __cnfn log(double3); +double4 __ovld __cnfn log(double4); +double8 __ovld __cnfn log(double8); +double16 __ovld __cnfn log(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log(half); +half2 __ovld __cnfn log(half2); +half3 __ovld __cnfn log(half3); +half4 __ovld __cnfn log(half4); +half8 __ovld __cnfn log(half8); +half16 __ovld __cnfn log(half16); +#endif //cl_khr_fp16 + +/** + * Compute a base 2 logarithm. + */ +float __ovld __cnfn log2(float); +float2 __ovld __cnfn log2(float2); +float3 __ovld __cnfn log2(float3); +float4 __ovld __cnfn log2(float4); +float8 __ovld __cnfn log2(float8); +float16 __ovld __cnfn log2(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn log2(double); +double2 __ovld __cnfn log2(double2); +double3 __ovld __cnfn log2(double3); +double4 __ovld __cnfn log2(double4); +double8 __ovld __cnfn log2(double8); +double16 __ovld __cnfn log2(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log2(half); +half2 __ovld __cnfn log2(half2); +half3 __ovld __cnfn log2(half3); +half4 __ovld __cnfn log2(half4); +half8 __ovld __cnfn log2(half8); +half16 __ovld __cnfn log2(half16); +#endif //cl_khr_fp16 + +/** + * Compute a base 10 logarithm. + */ +float __ovld __cnfn log10(float); +float2 __ovld __cnfn log10(float2); +float3 __ovld __cnfn log10(float3); +float4 __ovld __cnfn log10(float4); +float8 __ovld __cnfn log10(float8); +float16 __ovld __cnfn log10(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn log10(double); +double2 __ovld __cnfn log10(double2); +double3 __ovld __cnfn log10(double3); +double4 __ovld __cnfn log10(double4); +double8 __ovld __cnfn log10(double8); +double16 __ovld __cnfn log10(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log10(half); +half2 __ovld __cnfn log10(half2); +half3 __ovld __cnfn log10(half3); +half4 __ovld __cnfn log10(half4); +half8 __ovld __cnfn log10(half8); +half16 __ovld __cnfn log10(half16); +#endif //cl_khr_fp16 + +/** + * Compute a base e logarithm of (1.0 + x). + */ +float __ovld __cnfn log1p(float x); +float2 __ovld __cnfn log1p(float2 x); +float3 __ovld __cnfn log1p(float3 x); +float4 __ovld __cnfn log1p(float4 x); +float8 __ovld __cnfn log1p(float8 x); +float16 __ovld __cnfn log1p(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn log1p(double x); +double2 __ovld __cnfn log1p(double2 x); +double3 __ovld __cnfn log1p(double3 x); +double4 __ovld __cnfn log1p(double4 x); +double8 __ovld __cnfn log1p(double8 x); +double16 __ovld __cnfn log1p(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log1p(half x); +half2 __ovld __cnfn log1p(half2 x); +half3 __ovld __cnfn log1p(half3 x); +half4 __ovld __cnfn log1p(half4 x); +half8 __ovld __cnfn log1p(half8 x); +half16 __ovld __cnfn log1p(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute the exponent of x, which is the integral + * part of logr | x |. + */ +float __ovld __cnfn logb(float x); +float2 __ovld __cnfn logb(float2 x); +float3 __ovld __cnfn logb(float3 x); +float4 __ovld __cnfn logb(float4 x); +float8 __ovld __cnfn logb(float8 x); +float16 __ovld __cnfn logb(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn logb(double x); +double2 __ovld __cnfn logb(double2 x); +double3 __ovld __cnfn logb(double3 x); +double4 __ovld __cnfn logb(double4 x); +double8 __ovld __cnfn logb(double8 x); +double16 __ovld __cnfn logb(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn logb(half x); +half2 __ovld __cnfn logb(half2 x); +half3 __ovld __cnfn logb(half3 x); +half4 __ovld __cnfn logb(half4 x); +half8 __ovld __cnfn logb(half8 x); +half16 __ovld __cnfn logb(half16 x); +#endif //cl_khr_fp16 + +/** + * mad approximates a * b + c. Whether or how the + * product of a * b is rounded and how supernormal or + * subnormal intermediate products are handled is not + * defined. mad is intended to be used where speed is + * preferred over accuracy. + */ +float __ovld __cnfn mad(float a, float b, float c); +float2 __ovld __cnfn mad(float2 a, float2 b, float2 c); +float3 __ovld __cnfn mad(float3 a, float3 b, float3 c); +float4 __ovld __cnfn mad(float4 a, float4 b, float4 c); +float8 __ovld __cnfn mad(float8 a, float8 b, float8 c); +float16 __ovld __cnfn mad(float16 a, float16 b, float16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn mad(double a, double b, double c); +double2 __ovld __cnfn mad(double2 a, double2 b, double2 c); +double3 __ovld __cnfn mad(double3 a, double3 b, double3 c); +double4 __ovld __cnfn mad(double4 a, double4 b, double4 c); +double8 __ovld __cnfn mad(double8 a, double8 b, double8 c); +double16 __ovld __cnfn mad(double16 a, double16 b, double16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn mad(half a, half b, half c); +half2 __ovld __cnfn mad(half2 a, half2 b, half2 c); +half3 __ovld __cnfn mad(half3 a, half3 b, half3 c); +half4 __ovld __cnfn mad(half4 a, half4 b, half4 c); +half8 __ovld __cnfn mad(half8 a, half8 b, half8 c); +half16 __ovld __cnfn mad(half16 a, half16 b, half16 c); +#endif //cl_khr_fp16 + +/** + * Returns x if | x | > | y |, y if | y | > | x |, otherwise + * fmax(x, y). + */ +float __ovld __cnfn maxmag(float x, float y); +float2 __ovld __cnfn maxmag(float2 x, float2 y); +float3 __ovld __cnfn maxmag(float3 x, float3 y); +float4 __ovld __cnfn maxmag(float4 x, float4 y); +float8 __ovld __cnfn maxmag(float8 x, float8 y); +float16 __ovld __cnfn maxmag(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn maxmag(double x, double y); +double2 __ovld __cnfn maxmag(double2 x, double2 y); +double3 __ovld __cnfn maxmag(double3 x, double3 y); +double4 __ovld __cnfn maxmag(double4 x, double4 y); +double8 __ovld __cnfn maxmag(double8 x, double8 y); +double16 __ovld __cnfn maxmag(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn maxmag(half x, half y); +half2 __ovld __cnfn maxmag(half2 x, half2 y); +half3 __ovld __cnfn maxmag(half3 x, half3 y); +half4 __ovld __cnfn maxmag(half4 x, half4 y); +half8 __ovld __cnfn maxmag(half8 x, half8 y); +half16 __ovld __cnfn maxmag(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns x if | x | < | y |, y if | y | < | x |, otherwise + * fmin(x, y). + */ +float __ovld __cnfn minmag(float x, float y); +float2 __ovld __cnfn minmag(float2 x, float2 y); +float3 __ovld __cnfn minmag(float3 x, float3 y); +float4 __ovld __cnfn minmag(float4 x, float4 y); +float8 __ovld __cnfn minmag(float8 x, float8 y); +float16 __ovld __cnfn minmag(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn minmag(double x, double y); +double2 __ovld __cnfn minmag(double2 x, double2 y); +double3 __ovld __cnfn minmag(double3 x, double3 y); +double4 __ovld __cnfn minmag(double4 x, double4 y); +double8 __ovld __cnfn minmag(double8 x, double8 y); +double16 __ovld __cnfn minmag(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn minmag(half x, half y); +half2 __ovld __cnfn minmag(half2 x, half2 y); +half3 __ovld __cnfn minmag(half3 x, half3 y); +half4 __ovld __cnfn minmag(half4 x, half4 y); +half8 __ovld __cnfn minmag(half8 x, half8 y); +half16 __ovld __cnfn minmag(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Decompose a floating-point number. The modf + * function breaks the argument x into integral and + * fractional parts, each of which has the same sign as + * the argument. It stores the integral part in the object + * pointed to by iptr. + */ +#if defined(__opencl_c_generic_address_space) +float __ovld modf(float x, float *iptr); +float2 __ovld modf(float2 x, float2 *iptr); +float3 __ovld modf(float3 x, float3 *iptr); +float4 __ovld modf(float4 x, float4 *iptr); +float8 __ovld modf(float8 x, float8 *iptr); +float16 __ovld modf(float16 x, float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld modf(double x, double *iptr); +double2 __ovld modf(double2 x, double2 *iptr); +double3 __ovld modf(double3 x, double3 *iptr); +double4 __ovld modf(double4 x, double4 *iptr); +double8 __ovld modf(double8 x, double8 *iptr); +double16 __ovld modf(double16 x, double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld modf(half x, half *iptr); +half2 __ovld modf(half2 x, half2 *iptr); +half3 __ovld modf(half3 x, half3 *iptr); +half4 __ovld modf(half4 x, half4 *iptr); +half8 __ovld modf(half8 x, half8 *iptr); +half16 __ovld modf(half16 x, half16 *iptr); +#endif //cl_khr_fp16 +#else +float __ovld modf(float x, __global float *iptr); +float2 __ovld modf(float2 x, __global float2 *iptr); +float3 __ovld modf(float3 x, __global float3 *iptr); +float4 __ovld modf(float4 x, __global float4 *iptr); +float8 __ovld modf(float8 x, __global float8 *iptr); +float16 __ovld modf(float16 x, __global float16 *iptr); +float __ovld modf(float x, __local float *iptr); +float2 __ovld modf(float2 x, __local float2 *iptr); +float3 __ovld modf(float3 x, __local float3 *iptr); +float4 __ovld modf(float4 x, __local float4 *iptr); +float8 __ovld modf(float8 x, __local float8 *iptr); +float16 __ovld modf(float16 x, __local float16 *iptr); +float __ovld modf(float x, __private float *iptr); +float2 __ovld modf(float2 x, __private float2 *iptr); +float3 __ovld modf(float3 x, __private float3 *iptr); +float4 __ovld modf(float4 x, __private float4 *iptr); +float8 __ovld modf(float8 x, __private float8 *iptr); +float16 __ovld modf(float16 x, __private float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld modf(double x, __global double *iptr); +double2 __ovld modf(double2 x, __global double2 *iptr); +double3 __ovld modf(double3 x, __global double3 *iptr); +double4 __ovld modf(double4 x, __global double4 *iptr); +double8 __ovld modf(double8 x, __global double8 *iptr); +double16 __ovld modf(double16 x, __global double16 *iptr); +double __ovld modf(double x, __local double *iptr); +double2 __ovld modf(double2 x, __local double2 *iptr); +double3 __ovld modf(double3 x, __local double3 *iptr); +double4 __ovld modf(double4 x, __local double4 *iptr); +double8 __ovld modf(double8 x, __local double8 *iptr); +double16 __ovld modf(double16 x, __local double16 *iptr); +double __ovld modf(double x, __private double *iptr); +double2 __ovld modf(double2 x, __private double2 *iptr); +double3 __ovld modf(double3 x, __private double3 *iptr); +double4 __ovld modf(double4 x, __private double4 *iptr); +double8 __ovld modf(double8 x, __private double8 *iptr); +double16 __ovld modf(double16 x, __private double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld modf(half x, __global half *iptr); +half2 __ovld modf(half2 x, __global half2 *iptr); +half3 __ovld modf(half3 x, __global half3 *iptr); +half4 __ovld modf(half4 x, __global half4 *iptr); +half8 __ovld modf(half8 x, __global half8 *iptr); +half16 __ovld modf(half16 x, __global half16 *iptr); +half __ovld modf(half x, __local half *iptr); +half2 __ovld modf(half2 x, __local half2 *iptr); +half3 __ovld modf(half3 x, __local half3 *iptr); +half4 __ovld modf(half4 x, __local half4 *iptr); +half8 __ovld modf(half8 x, __local half8 *iptr); +half16 __ovld modf(half16 x, __local half16 *iptr); +half __ovld modf(half x, __private half *iptr); +half2 __ovld modf(half2 x, __private half2 *iptr); +half3 __ovld modf(half3 x, __private half3 *iptr); +half4 __ovld modf(half4 x, __private half4 *iptr); +half8 __ovld modf(half8 x, __private half8 *iptr); +half16 __ovld modf(half16 x, __private half16 *iptr); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +/** + * Returns a quiet NaN. The nancode may be placed + * in the significand of the resulting NaN. + */ +float __ovld __cnfn nan(uint nancode); +float2 __ovld __cnfn nan(uint2 nancode); +float3 __ovld __cnfn nan(uint3 nancode); +float4 __ovld __cnfn nan(uint4 nancode); +float8 __ovld __cnfn nan(uint8 nancode); +float16 __ovld __cnfn nan(uint16 nancode); +#ifdef cl_khr_fp64 +double __ovld __cnfn nan(ulong nancode); +double2 __ovld __cnfn nan(ulong2 nancode); +double3 __ovld __cnfn nan(ulong3 nancode); +double4 __ovld __cnfn nan(ulong4 nancode); +double8 __ovld __cnfn nan(ulong8 nancode); +double16 __ovld __cnfn nan(ulong16 nancode); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn nan(ushort nancode); +half2 __ovld __cnfn nan(ushort2 nancode); +half3 __ovld __cnfn nan(ushort3 nancode); +half4 __ovld __cnfn nan(ushort4 nancode); +half8 __ovld __cnfn nan(ushort8 nancode); +half16 __ovld __cnfn nan(ushort16 nancode); +#endif //cl_khr_fp16 + +/** + * Computes the next representable single-precision + * floating-point value following x in the direction of + * y. Thus, if y is less than x, nextafter() returns the + * largest representable floating-point number less + * than x. + */ +float __ovld __cnfn nextafter(float x, float y); +float2 __ovld __cnfn nextafter(float2 x, float2 y); +float3 __ovld __cnfn nextafter(float3 x, float3 y); +float4 __ovld __cnfn nextafter(float4 x, float4 y); +float8 __ovld __cnfn nextafter(float8 x, float8 y); +float16 __ovld __cnfn nextafter(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn nextafter(double x, double y); +double2 __ovld __cnfn nextafter(double2 x, double2 y); +double3 __ovld __cnfn nextafter(double3 x, double3 y); +double4 __ovld __cnfn nextafter(double4 x, double4 y); +double8 __ovld __cnfn nextafter(double8 x, double8 y); +double16 __ovld __cnfn nextafter(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn nextafter(half x, half y); +half2 __ovld __cnfn nextafter(half2 x, half2 y); +half3 __ovld __cnfn nextafter(half3 x, half3 y); +half4 __ovld __cnfn nextafter(half4 x, half4 y); +half8 __ovld __cnfn nextafter(half8 x, half8 y); +half16 __ovld __cnfn nextafter(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute x to the power y. + */ +float __ovld __cnfn pow(float x, float y); +float2 __ovld __cnfn pow(float2 x, float2 y); +float3 __ovld __cnfn pow(float3 x, float3 y); +float4 __ovld __cnfn pow(float4 x, float4 y); +float8 __ovld __cnfn pow(float8 x, float8 y); +float16 __ovld __cnfn pow(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn pow(double x, double y); +double2 __ovld __cnfn pow(double2 x, double2 y); +double3 __ovld __cnfn pow(double3 x, double3 y); +double4 __ovld __cnfn pow(double4 x, double4 y); +double8 __ovld __cnfn pow(double8 x, double8 y); +double16 __ovld __cnfn pow(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn pow(half x, half y); +half2 __ovld __cnfn pow(half2 x, half2 y); +half3 __ovld __cnfn pow(half3 x, half3 y); +half4 __ovld __cnfn pow(half4 x, half4 y); +half8 __ovld __cnfn pow(half8 x, half8 y); +half16 __ovld __cnfn pow(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute x to the power y, where y is an integer. + */ +float __ovld __cnfn pown(float x, int y); +float2 __ovld __cnfn pown(float2 x, int2 y); +float3 __ovld __cnfn pown(float3 x, int3 y); +float4 __ovld __cnfn pown(float4 x, int4 y); +float8 __ovld __cnfn pown(float8 x, int8 y); +float16 __ovld __cnfn pown(float16 x, int16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn pown(double x, int y); +double2 __ovld __cnfn pown(double2 x, int2 y); +double3 __ovld __cnfn pown(double3 x, int3 y); +double4 __ovld __cnfn pown(double4 x, int4 y); +double8 __ovld __cnfn pown(double8 x, int8 y); +double16 __ovld __cnfn pown(double16 x, int16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn pown(half x, int y); +half2 __ovld __cnfn pown(half2 x, int2 y); +half3 __ovld __cnfn pown(half3 x, int3 y); +half4 __ovld __cnfn pown(half4 x, int4 y); +half8 __ovld __cnfn pown(half8 x, int8 y); +half16 __ovld __cnfn pown(half16 x, int16 y); +#endif //cl_khr_fp16 + +/** + * Compute x to the power y, where x is >= 0. + */ +float __ovld __cnfn powr(float x, float y); +float2 __ovld __cnfn powr(float2 x, float2 y); +float3 __ovld __cnfn powr(float3 x, float3 y); +float4 __ovld __cnfn powr(float4 x, float4 y); +float8 __ovld __cnfn powr(float8 x, float8 y); +float16 __ovld __cnfn powr(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn powr(double x, double y); +double2 __ovld __cnfn powr(double2 x, double2 y); +double3 __ovld __cnfn powr(double3 x, double3 y); +double4 __ovld __cnfn powr(double4 x, double4 y); +double8 __ovld __cnfn powr(double8 x, double8 y); +double16 __ovld __cnfn powr(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn powr(half x, half y); +half2 __ovld __cnfn powr(half2 x, half2 y); +half3 __ovld __cnfn powr(half3 x, half3 y); +half4 __ovld __cnfn powr(half4 x, half4 y); +half8 __ovld __cnfn powr(half8 x, half8 y); +half16 __ovld __cnfn powr(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute the value r such that r = x - n*y, where n + * is the integer nearest the exact value of x/y. If there + * are two integers closest to x/y, n shall be the even + * one. If r is zero, it is given the same sign as x. + */ +float __ovld __cnfn remainder(float x, float y); +float2 __ovld __cnfn remainder(float2 x, float2 y); +float3 __ovld __cnfn remainder(float3 x, float3 y); +float4 __ovld __cnfn remainder(float4 x, float4 y); +float8 __ovld __cnfn remainder(float8 x, float8 y); +float16 __ovld __cnfn remainder(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn remainder(double x, double y); +double2 __ovld __cnfn remainder(double2 x, double2 y); +double3 __ovld __cnfn remainder(double3 x, double3 y); +double4 __ovld __cnfn remainder(double4 x, double4 y); +double8 __ovld __cnfn remainder(double8 x, double8 y); +double16 __ovld __cnfn remainder(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn remainder(half x, half y); +half2 __ovld __cnfn remainder(half2 x, half2 y); +half3 __ovld __cnfn remainder(half3 x, half3 y); +half4 __ovld __cnfn remainder(half4 x, half4 y); +half8 __ovld __cnfn remainder(half8 x, half8 y); +half16 __ovld __cnfn remainder(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * The remquo function computes the value r such + * that r = x - n*y, where n is the integer nearest the + * exact value of x/y. If there are two integers closest + * to x/y, n shall be the even one. If r is zero, it is + * given the same sign as x. This is the same value + * that is returned by the remainder function. + * remquo also calculates the lower seven bits of the + * integral quotient x/y, and gives that value the same + * sign as x/y. It stores this signed value in the object + * pointed to by quo. + */ +#if defined(__opencl_c_generic_address_space) +float __ovld remquo(float x, float y, int *quo); +float2 __ovld remquo(float2 x, float2 y, int2 *quo); +float3 __ovld remquo(float3 x, float3 y, int3 *quo); +float4 __ovld remquo(float4 x, float4 y, int4 *quo); +float8 __ovld remquo(float8 x, float8 y, int8 *quo); +float16 __ovld remquo(float16 x, float16 y, int16 *quo); +#ifdef cl_khr_fp64 +double __ovld remquo(double x, double y, int *quo); +double2 __ovld remquo(double2 x, double2 y, int2 *quo); +double3 __ovld remquo(double3 x, double3 y, int3 *quo); +double4 __ovld remquo(double4 x, double4 y, int4 *quo); +double8 __ovld remquo(double8 x, double8 y, int8 *quo); +double16 __ovld remquo(double16 x, double16 y, int16 *quo); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld remquo(half x, half y, int *quo); +half2 __ovld remquo(half2 x, half2 y, int2 *quo); +half3 __ovld remquo(half3 x, half3 y, int3 *quo); +half4 __ovld remquo(half4 x, half4 y, int4 *quo); +half8 __ovld remquo(half8 x, half8 y, int8 *quo); +half16 __ovld remquo(half16 x, half16 y, int16 *quo); + +#endif //cl_khr_fp16 +#else +float __ovld remquo(float x, float y, __global int *quo); +float2 __ovld remquo(float2 x, float2 y, __global int2 *quo); +float3 __ovld remquo(float3 x, float3 y, __global int3 *quo); +float4 __ovld remquo(float4 x, float4 y, __global int4 *quo); +float8 __ovld remquo(float8 x, float8 y, __global int8 *quo); +float16 __ovld remquo(float16 x, float16 y, __global int16 *quo); +float __ovld remquo(float x, float y, __local int *quo); +float2 __ovld remquo(float2 x, float2 y, __local int2 *quo); +float3 __ovld remquo(float3 x, float3 y, __local int3 *quo); +float4 __ovld remquo(float4 x, float4 y, __local int4 *quo); +float8 __ovld remquo(float8 x, float8 y, __local int8 *quo); +float16 __ovld remquo(float16 x, float16 y, __local int16 *quo); +float __ovld remquo(float x, float y, __private int *quo); +float2 __ovld remquo(float2 x, float2 y, __private int2 *quo); +float3 __ovld remquo(float3 x, float3 y, __private int3 *quo); +float4 __ovld remquo(float4 x, float4 y, __private int4 *quo); +float8 __ovld remquo(float8 x, float8 y, __private int8 *quo); +float16 __ovld remquo(float16 x, float16 y, __private int16 *quo); +#ifdef cl_khr_fp64 +double __ovld remquo(double x, double y, __global int *quo); +double2 __ovld remquo(double2 x, double2 y, __global int2 *quo); +double3 __ovld remquo(double3 x, double3 y, __global int3 *quo); +double4 __ovld remquo(double4 x, double4 y, __global int4 *quo); +double8 __ovld remquo(double8 x, double8 y, __global int8 *quo); +double16 __ovld remquo(double16 x, double16 y, __global int16 *quo); +double __ovld remquo(double x, double y, __local int *quo); +double2 __ovld remquo(double2 x, double2 y, __local int2 *quo); +double3 __ovld remquo(double3 x, double3 y, __local int3 *quo); +double4 __ovld remquo(double4 x, double4 y, __local int4 *quo); +double8 __ovld remquo(double8 x, double8 y, __local int8 *quo); +double16 __ovld remquo(double16 x, double16 y, __local int16 *quo); +double __ovld remquo(double x, double y, __private int *quo); +double2 __ovld remquo(double2 x, double2 y, __private int2 *quo); +double3 __ovld remquo(double3 x, double3 y, __private int3 *quo); +double4 __ovld remquo(double4 x, double4 y, __private int4 *quo); +double8 __ovld remquo(double8 x, double8 y, __private int8 *quo); +double16 __ovld remquo(double16 x, double16 y, __private int16 *quo); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld remquo(half x, half y, __global int *quo); +half2 __ovld remquo(half2 x, half2 y, __global int2 *quo); +half3 __ovld remquo(half3 x, half3 y, __global int3 *quo); +half4 __ovld remquo(half4 x, half4 y, __global int4 *quo); +half8 __ovld remquo(half8 x, half8 y, __global int8 *quo); +half16 __ovld remquo(half16 x, half16 y, __global int16 *quo); +half __ovld remquo(half x, half y, __local int *quo); +half2 __ovld remquo(half2 x, half2 y, __local int2 *quo); +half3 __ovld remquo(half3 x, half3 y, __local int3 *quo); +half4 __ovld remquo(half4 x, half4 y, __local int4 *quo); +half8 __ovld remquo(half8 x, half8 y, __local int8 *quo); +half16 __ovld remquo(half16 x, half16 y, __local int16 *quo); +half __ovld remquo(half x, half y, __private int *quo); +half2 __ovld remquo(half2 x, half2 y, __private int2 *quo); +half3 __ovld remquo(half3 x, half3 y, __private int3 *quo); +half4 __ovld remquo(half4 x, half4 y, __private int4 *quo); +half8 __ovld remquo(half8 x, half8 y, __private int8 *quo); +half16 __ovld remquo(half16 x, half16 y, __private int16 *quo); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) +/** + * Round to integral value (using round to nearest + * even rounding mode) in floating-point format. + * Refer to section 7.1 for description of rounding + * modes. + */ +float __ovld __cnfn rint(float); +float2 __ovld __cnfn rint(float2); +float3 __ovld __cnfn rint(float3); +float4 __ovld __cnfn rint(float4); +float8 __ovld __cnfn rint(float8); +float16 __ovld __cnfn rint(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn rint(double); +double2 __ovld __cnfn rint(double2); +double3 __ovld __cnfn rint(double3); +double4 __ovld __cnfn rint(double4); +double8 __ovld __cnfn rint(double8); +double16 __ovld __cnfn rint(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn rint(half); +half2 __ovld __cnfn rint(half2); +half3 __ovld __cnfn rint(half3); +half4 __ovld __cnfn rint(half4); +half8 __ovld __cnfn rint(half8); +half16 __ovld __cnfn rint(half16); +#endif //cl_khr_fp16 + +/** + * Compute x to the power 1/y. + */ +float __ovld __cnfn rootn(float x, int y); +float2 __ovld __cnfn rootn(float2 x, int2 y); +float3 __ovld __cnfn rootn(float3 x, int3 y); +float4 __ovld __cnfn rootn(float4 x, int4 y); +float8 __ovld __cnfn rootn(float8 x, int8 y); +float16 __ovld __cnfn rootn(float16 x, int16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn rootn(double x, int y); +double2 __ovld __cnfn rootn(double2 x, int2 y); +double3 __ovld __cnfn rootn(double3 x, int3 y); +double4 __ovld __cnfn rootn(double4 x, int4 y); +double8 __ovld __cnfn rootn(double8 x, int8 y); +double16 __ovld __cnfn rootn(double16 x, int16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn rootn(half x, int y); +half2 __ovld __cnfn rootn(half2 x, int2 y); +half3 __ovld __cnfn rootn(half3 x, int3 y); +half4 __ovld __cnfn rootn(half4 x, int4 y); +half8 __ovld __cnfn rootn(half8 x, int8 y); +half16 __ovld __cnfn rootn(half16 x, int16 y); +#endif //cl_khr_fp16 + +/** + * Return the integral value nearest to x rounding + * halfway cases away from zero, regardless of the + * current rounding direction. + */ +float __ovld __cnfn round(float x); +float2 __ovld __cnfn round(float2 x); +float3 __ovld __cnfn round(float3 x); +float4 __ovld __cnfn round(float4 x); +float8 __ovld __cnfn round(float8 x); +float16 __ovld __cnfn round(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn round(double x); +double2 __ovld __cnfn round(double2 x); +double3 __ovld __cnfn round(double3 x); +double4 __ovld __cnfn round(double4 x); +double8 __ovld __cnfn round(double8 x); +double16 __ovld __cnfn round(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn round(half x); +half2 __ovld __cnfn round(half2 x); +half3 __ovld __cnfn round(half3 x); +half4 __ovld __cnfn round(half4 x); +half8 __ovld __cnfn round(half8 x); +half16 __ovld __cnfn round(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute inverse square root. + */ +float __ovld __cnfn rsqrt(float); +float2 __ovld __cnfn rsqrt(float2); +float3 __ovld __cnfn rsqrt(float3); +float4 __ovld __cnfn rsqrt(float4); +float8 __ovld __cnfn rsqrt(float8); +float16 __ovld __cnfn rsqrt(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn rsqrt(double); +double2 __ovld __cnfn rsqrt(double2); +double3 __ovld __cnfn rsqrt(double3); +double4 __ovld __cnfn rsqrt(double4); +double8 __ovld __cnfn rsqrt(double8); +double16 __ovld __cnfn rsqrt(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn rsqrt(half); +half2 __ovld __cnfn rsqrt(half2); +half3 __ovld __cnfn rsqrt(half3); +half4 __ovld __cnfn rsqrt(half4); +half8 __ovld __cnfn rsqrt(half8); +half16 __ovld __cnfn rsqrt(half16); +#endif //cl_khr_fp16 + +/** + * Compute sine. + */ +float __ovld __cnfn sin(float); +float2 __ovld __cnfn sin(float2); +float3 __ovld __cnfn sin(float3); +float4 __ovld __cnfn sin(float4); +float8 __ovld __cnfn sin(float8); +float16 __ovld __cnfn sin(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn sin(double); +double2 __ovld __cnfn sin(double2); +double3 __ovld __cnfn sin(double3); +double4 __ovld __cnfn sin(double4); +double8 __ovld __cnfn sin(double8); +double16 __ovld __cnfn sin(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sin(half); +half2 __ovld __cnfn sin(half2); +half3 __ovld __cnfn sin(half3); +half4 __ovld __cnfn sin(half4); +half8 __ovld __cnfn sin(half8); +half16 __ovld __cnfn sin(half16); +#endif //cl_khr_fp16 + +/** + * Compute sine and cosine of x. The computed sine + * is the return value and computed cosine is returned + * in cosval. + */ +#if defined(__opencl_c_generic_address_space) +float __ovld sincos(float x, float *cosval); +float2 __ovld sincos(float2 x, float2 *cosval); +float3 __ovld sincos(float3 x, float3 *cosval); +float4 __ovld sincos(float4 x, float4 *cosval); +float8 __ovld sincos(float8 x, float8 *cosval); +float16 __ovld sincos(float16 x, float16 *cosval); +#ifdef cl_khr_fp64 +double __ovld sincos(double x, double *cosval); +double2 __ovld sincos(double2 x, double2 *cosval); +double3 __ovld sincos(double3 x, double3 *cosval); +double4 __ovld sincos(double4 x, double4 *cosval); +double8 __ovld sincos(double8 x, double8 *cosval); +double16 __ovld sincos(double16 x, double16 *cosval); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld sincos(half x, half *cosval); +half2 __ovld sincos(half2 x, half2 *cosval); +half3 __ovld sincos(half3 x, half3 *cosval); +half4 __ovld sincos(half4 x, half4 *cosval); +half8 __ovld sincos(half8 x, half8 *cosval); +half16 __ovld sincos(half16 x, half16 *cosval); +#endif //cl_khr_fp16 +#else +float __ovld sincos(float x, __global float *cosval); +float2 __ovld sincos(float2 x, __global float2 *cosval); +float3 __ovld sincos(float3 x, __global float3 *cosval); +float4 __ovld sincos(float4 x, __global float4 *cosval); +float8 __ovld sincos(float8 x, __global float8 *cosval); +float16 __ovld sincos(float16 x, __global float16 *cosval); +float __ovld sincos(float x, __local float *cosval); +float2 __ovld sincos(float2 x, __local float2 *cosval); +float3 __ovld sincos(float3 x, __local float3 *cosval); +float4 __ovld sincos(float4 x, __local float4 *cosval); +float8 __ovld sincos(float8 x, __local float8 *cosval); +float16 __ovld sincos(float16 x, __local float16 *cosval); +float __ovld sincos(float x, __private float *cosval); +float2 __ovld sincos(float2 x, __private float2 *cosval); +float3 __ovld sincos(float3 x, __private float3 *cosval); +float4 __ovld sincos(float4 x, __private float4 *cosval); +float8 __ovld sincos(float8 x, __private float8 *cosval); +float16 __ovld sincos(float16 x, __private float16 *cosval); +#ifdef cl_khr_fp64 +double __ovld sincos(double x, __global double *cosval); +double2 __ovld sincos(double2 x, __global double2 *cosval); +double3 __ovld sincos(double3 x, __global double3 *cosval); +double4 __ovld sincos(double4 x, __global double4 *cosval); +double8 __ovld sincos(double8 x, __global double8 *cosval); +double16 __ovld sincos(double16 x, __global double16 *cosval); +double __ovld sincos(double x, __local double *cosval); +double2 __ovld sincos(double2 x, __local double2 *cosval); +double3 __ovld sincos(double3 x, __local double3 *cosval); +double4 __ovld sincos(double4 x, __local double4 *cosval); +double8 __ovld sincos(double8 x, __local double8 *cosval); +double16 __ovld sincos(double16 x, __local double16 *cosval); +double __ovld sincos(double x, __private double *cosval); +double2 __ovld sincos(double2 x, __private double2 *cosval); +double3 __ovld sincos(double3 x, __private double3 *cosval); +double4 __ovld sincos(double4 x, __private double4 *cosval); +double8 __ovld sincos(double8 x, __private double8 *cosval); +double16 __ovld sincos(double16 x, __private double16 *cosval); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld sincos(half x, __global half *cosval); +half2 __ovld sincos(half2 x, __global half2 *cosval); +half3 __ovld sincos(half3 x, __global half3 *cosval); +half4 __ovld sincos(half4 x, __global half4 *cosval); +half8 __ovld sincos(half8 x, __global half8 *cosval); +half16 __ovld sincos(half16 x, __global half16 *cosval); +half __ovld sincos(half x, __local half *cosval); +half2 __ovld sincos(half2 x, __local half2 *cosval); +half3 __ovld sincos(half3 x, __local half3 *cosval); +half4 __ovld sincos(half4 x, __local half4 *cosval); +half8 __ovld sincos(half8 x, __local half8 *cosval); +half16 __ovld sincos(half16 x, __local half16 *cosval); +half __ovld sincos(half x, __private half *cosval); +half2 __ovld sincos(half2 x, __private half2 *cosval); +half3 __ovld sincos(half3 x, __private half3 *cosval); +half4 __ovld sincos(half4 x, __private half4 *cosval); +half8 __ovld sincos(half8 x, __private half8 *cosval); +half16 __ovld sincos(half16 x, __private half16 *cosval); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +/** + * Compute hyperbolic sine. + */ +float __ovld __cnfn sinh(float); +float2 __ovld __cnfn sinh(float2); +float3 __ovld __cnfn sinh(float3); +float4 __ovld __cnfn sinh(float4); +float8 __ovld __cnfn sinh(float8); +float16 __ovld __cnfn sinh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn sinh(double); +double2 __ovld __cnfn sinh(double2); +double3 __ovld __cnfn sinh(double3); +double4 __ovld __cnfn sinh(double4); +double8 __ovld __cnfn sinh(double8); +double16 __ovld __cnfn sinh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sinh(half); +half2 __ovld __cnfn sinh(half2); +half3 __ovld __cnfn sinh(half3); +half4 __ovld __cnfn sinh(half4); +half8 __ovld __cnfn sinh(half8); +half16 __ovld __cnfn sinh(half16); +#endif //cl_khr_fp16 + +/** + * Compute sin (PI * x). + */ +float __ovld __cnfn sinpi(float x); +float2 __ovld __cnfn sinpi(float2 x); +float3 __ovld __cnfn sinpi(float3 x); +float4 __ovld __cnfn sinpi(float4 x); +float8 __ovld __cnfn sinpi(float8 x); +float16 __ovld __cnfn sinpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn sinpi(double x); +double2 __ovld __cnfn sinpi(double2 x); +double3 __ovld __cnfn sinpi(double3 x); +double4 __ovld __cnfn sinpi(double4 x); +double8 __ovld __cnfn sinpi(double8 x); +double16 __ovld __cnfn sinpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sinpi(half x); +half2 __ovld __cnfn sinpi(half2 x); +half3 __ovld __cnfn sinpi(half3 x); +half4 __ovld __cnfn sinpi(half4 x); +half8 __ovld __cnfn sinpi(half8 x); +half16 __ovld __cnfn sinpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute square root. + */ +float __ovld __cnfn sqrt(float); +float2 __ovld __cnfn sqrt(float2); +float3 __ovld __cnfn sqrt(float3); +float4 __ovld __cnfn sqrt(float4); +float8 __ovld __cnfn sqrt(float8); +float16 __ovld __cnfn sqrt(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn sqrt(double); +double2 __ovld __cnfn sqrt(double2); +double3 __ovld __cnfn sqrt(double3); +double4 __ovld __cnfn sqrt(double4); +double8 __ovld __cnfn sqrt(double8); +double16 __ovld __cnfn sqrt(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sqrt(half); +half2 __ovld __cnfn sqrt(half2); +half3 __ovld __cnfn sqrt(half3); +half4 __ovld __cnfn sqrt(half4); +half8 __ovld __cnfn sqrt(half8); +half16 __ovld __cnfn sqrt(half16); +#endif //cl_khr_fp16 + +/** + * Compute tangent. + */ +float __ovld __cnfn tan(float); +float2 __ovld __cnfn tan(float2); +float3 __ovld __cnfn tan(float3); +float4 __ovld __cnfn tan(float4); +float8 __ovld __cnfn tan(float8); +float16 __ovld __cnfn tan(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn tan(double); +double2 __ovld __cnfn tan(double2); +double3 __ovld __cnfn tan(double3); +double4 __ovld __cnfn tan(double4); +double8 __ovld __cnfn tan(double8); +double16 __ovld __cnfn tan(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tan(half); +half2 __ovld __cnfn tan(half2); +half3 __ovld __cnfn tan(half3); +half4 __ovld __cnfn tan(half4); +half8 __ovld __cnfn tan(half8); +half16 __ovld __cnfn tan(half16); +#endif //cl_khr_fp16 + +/** + * Compute hyperbolic tangent. + */ +float __ovld __cnfn tanh(float); +float2 __ovld __cnfn tanh(float2); +float3 __ovld __cnfn tanh(float3); +float4 __ovld __cnfn tanh(float4); +float8 __ovld __cnfn tanh(float8); +float16 __ovld __cnfn tanh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn tanh(double); +double2 __ovld __cnfn tanh(double2); +double3 __ovld __cnfn tanh(double3); +double4 __ovld __cnfn tanh(double4); +double8 __ovld __cnfn tanh(double8); +double16 __ovld __cnfn tanh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tanh(half); +half2 __ovld __cnfn tanh(half2); +half3 __ovld __cnfn tanh(half3); +half4 __ovld __cnfn tanh(half4); +half8 __ovld __cnfn tanh(half8); +half16 __ovld __cnfn tanh(half16); +#endif //cl_khr_fp16 + +/** + * Compute tan (PI * x). + */ +float __ovld __cnfn tanpi(float x); +float2 __ovld __cnfn tanpi(float2 x); +float3 __ovld __cnfn tanpi(float3 x); +float4 __ovld __cnfn tanpi(float4 x); +float8 __ovld __cnfn tanpi(float8 x); +float16 __ovld __cnfn tanpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn tanpi(double x); +double2 __ovld __cnfn tanpi(double2 x); +double3 __ovld __cnfn tanpi(double3 x); +double4 __ovld __cnfn tanpi(double4 x); +double8 __ovld __cnfn tanpi(double8 x); +double16 __ovld __cnfn tanpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tanpi(half x); +half2 __ovld __cnfn tanpi(half2 x); +half3 __ovld __cnfn tanpi(half3 x); +half4 __ovld __cnfn tanpi(half4 x); +half8 __ovld __cnfn tanpi(half8 x); +half16 __ovld __cnfn tanpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute the gamma function. + */ +float __ovld __cnfn tgamma(float); +float2 __ovld __cnfn tgamma(float2); +float3 __ovld __cnfn tgamma(float3); +float4 __ovld __cnfn tgamma(float4); +float8 __ovld __cnfn tgamma(float8); +float16 __ovld __cnfn tgamma(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn tgamma(double); +double2 __ovld __cnfn tgamma(double2); +double3 __ovld __cnfn tgamma(double3); +double4 __ovld __cnfn tgamma(double4); +double8 __ovld __cnfn tgamma(double8); +double16 __ovld __cnfn tgamma(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tgamma(half); +half2 __ovld __cnfn tgamma(half2); +half3 __ovld __cnfn tgamma(half3); +half4 __ovld __cnfn tgamma(half4); +half8 __ovld __cnfn tgamma(half8); +half16 __ovld __cnfn tgamma(half16); +#endif //cl_khr_fp16 + +/** + * Round to integral value using the round to zero + * rounding mode. + */ +float __ovld __cnfn trunc(float); +float2 __ovld __cnfn trunc(float2); +float3 __ovld __cnfn trunc(float3); +float4 __ovld __cnfn trunc(float4); +float8 __ovld __cnfn trunc(float8); +float16 __ovld __cnfn trunc(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn trunc(double); +double2 __ovld __cnfn trunc(double2); +double3 __ovld __cnfn trunc(double3); +double4 __ovld __cnfn trunc(double4); +double8 __ovld __cnfn trunc(double8); +double16 __ovld __cnfn trunc(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn trunc(half); +half2 __ovld __cnfn trunc(half2); +half3 __ovld __cnfn trunc(half3); +half4 __ovld __cnfn trunc(half4); +half8 __ovld __cnfn trunc(half8); +half16 __ovld __cnfn trunc(half16); +#endif //cl_khr_fp16 + +/** + * Compute cosine. x must be in the range -2^16 ... +2^16. + */ +float __ovld __cnfn half_cos(float x); +float2 __ovld __cnfn half_cos(float2 x); +float3 __ovld __cnfn half_cos(float3 x); +float4 __ovld __cnfn half_cos(float4 x); +float8 __ovld __cnfn half_cos(float8 x); +float16 __ovld __cnfn half_cos(float16 x); + +/** + * Compute x / y. + */ +float __ovld __cnfn half_divide(float x, float y); +float2 __ovld __cnfn half_divide(float2 x, float2 y); +float3 __ovld __cnfn half_divide(float3 x, float3 y); +float4 __ovld __cnfn half_divide(float4 x, float4 y); +float8 __ovld __cnfn half_divide(float8 x, float8 y); +float16 __ovld __cnfn half_divide(float16 x, float16 y); + +/** + * Compute the base- e exponential of x. + */ +float __ovld __cnfn half_exp(float x); +float2 __ovld __cnfn half_exp(float2 x); +float3 __ovld __cnfn half_exp(float3 x); +float4 __ovld __cnfn half_exp(float4 x); +float8 __ovld __cnfn half_exp(float8 x); +float16 __ovld __cnfn half_exp(float16 x); + +/** + * Compute the base- 2 exponential of x. + */ +float __ovld __cnfn half_exp2(float x); +float2 __ovld __cnfn half_exp2(float2 x); +float3 __ovld __cnfn half_exp2(float3 x); +float4 __ovld __cnfn half_exp2(float4 x); +float8 __ovld __cnfn half_exp2(float8 x); +float16 __ovld __cnfn half_exp2(float16 x); + +/** + * Compute the base- 10 exponential of x. + */ +float __ovld __cnfn half_exp10(float x); +float2 __ovld __cnfn half_exp10(float2 x); +float3 __ovld __cnfn half_exp10(float3 x); +float4 __ovld __cnfn half_exp10(float4 x); +float8 __ovld __cnfn half_exp10(float8 x); +float16 __ovld __cnfn half_exp10(float16 x); + +/** + * Compute natural logarithm. + */ +float __ovld __cnfn half_log(float x); +float2 __ovld __cnfn half_log(float2 x); +float3 __ovld __cnfn half_log(float3 x); +float4 __ovld __cnfn half_log(float4 x); +float8 __ovld __cnfn half_log(float8 x); +float16 __ovld __cnfn half_log(float16 x); + +/** + * Compute a base 2 logarithm. + */ +float __ovld __cnfn half_log2(float x); +float2 __ovld __cnfn half_log2(float2 x); +float3 __ovld __cnfn half_log2(float3 x); +float4 __ovld __cnfn half_log2(float4 x); +float8 __ovld __cnfn half_log2(float8 x); +float16 __ovld __cnfn half_log2(float16 x); + +/** + * Compute a base 10 logarithm. + */ +float __ovld __cnfn half_log10(float x); +float2 __ovld __cnfn half_log10(float2 x); +float3 __ovld __cnfn half_log10(float3 x); +float4 __ovld __cnfn half_log10(float4 x); +float8 __ovld __cnfn half_log10(float8 x); +float16 __ovld __cnfn half_log10(float16 x); + +/** + * Compute x to the power y, where x is >= 0. + */ +float __ovld __cnfn half_powr(float x, float y); +float2 __ovld __cnfn half_powr(float2 x, float2 y); +float3 __ovld __cnfn half_powr(float3 x, float3 y); +float4 __ovld __cnfn half_powr(float4 x, float4 y); +float8 __ovld __cnfn half_powr(float8 x, float8 y); +float16 __ovld __cnfn half_powr(float16 x, float16 y); + +/** + * Compute reciprocal. + */ +float __ovld __cnfn half_recip(float x); +float2 __ovld __cnfn half_recip(float2 x); +float3 __ovld __cnfn half_recip(float3 x); +float4 __ovld __cnfn half_recip(float4 x); +float8 __ovld __cnfn half_recip(float8 x); +float16 __ovld __cnfn half_recip(float16 x); + +/** + * Compute inverse square root. + */ +float __ovld __cnfn half_rsqrt(float x); +float2 __ovld __cnfn half_rsqrt(float2 x); +float3 __ovld __cnfn half_rsqrt(float3 x); +float4 __ovld __cnfn half_rsqrt(float4 x); +float8 __ovld __cnfn half_rsqrt(float8 x); +float16 __ovld __cnfn half_rsqrt(float16 x); + +/** + * Compute sine. x must be in the range -2^16 ... +2^16. + */ +float __ovld __cnfn half_sin(float x); +float2 __ovld __cnfn half_sin(float2 x); +float3 __ovld __cnfn half_sin(float3 x); +float4 __ovld __cnfn half_sin(float4 x); +float8 __ovld __cnfn half_sin(float8 x); +float16 __ovld __cnfn half_sin(float16 x); + +/** + * Compute square root. + */ +float __ovld __cnfn half_sqrt(float x); +float2 __ovld __cnfn half_sqrt(float2 x); +float3 __ovld __cnfn half_sqrt(float3 x); +float4 __ovld __cnfn half_sqrt(float4 x); +float8 __ovld __cnfn half_sqrt(float8 x); +float16 __ovld __cnfn half_sqrt(float16 x); + +/** + * Compute tangent. x must be in the range -216 ... +216. + */ +float __ovld __cnfn half_tan(float x); +float2 __ovld __cnfn half_tan(float2 x); +float3 __ovld __cnfn half_tan(float3 x); +float4 __ovld __cnfn half_tan(float4 x); +float8 __ovld __cnfn half_tan(float8 x); +float16 __ovld __cnfn half_tan(float16 x); + +/** + * Compute cosine over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_cos(float x); +float2 __ovld __cnfn native_cos(float2 x); +float3 __ovld __cnfn native_cos(float3 x); +float4 __ovld __cnfn native_cos(float4 x); +float8 __ovld __cnfn native_cos(float8 x); +float16 __ovld __cnfn native_cos(float16 x); + +/** + * Compute x / y over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_divide(float x, float y); +float2 __ovld __cnfn native_divide(float2 x, float2 y); +float3 __ovld __cnfn native_divide(float3 x, float3 y); +float4 __ovld __cnfn native_divide(float4 x, float4 y); +float8 __ovld __cnfn native_divide(float8 x, float8 y); +float16 __ovld __cnfn native_divide(float16 x, float16 y); + +/** + * Compute the base- e exponential of x over an + * implementation-defined range. The maximum error is + * implementation-defined. + */ +float __ovld __cnfn native_exp(float x); +float2 __ovld __cnfn native_exp(float2 x); +float3 __ovld __cnfn native_exp(float3 x); +float4 __ovld __cnfn native_exp(float4 x); +float8 __ovld __cnfn native_exp(float8 x); +float16 __ovld __cnfn native_exp(float16 x); + +/** + * Compute the base- 2 exponential of x over an + * implementation-defined range. The maximum error is + * implementation-defined. + */ +float __ovld __cnfn native_exp2(float x); +float2 __ovld __cnfn native_exp2(float2 x); +float3 __ovld __cnfn native_exp2(float3 x); +float4 __ovld __cnfn native_exp2(float4 x); +float8 __ovld __cnfn native_exp2(float8 x); +float16 __ovld __cnfn native_exp2(float16 x); + +/** + * Compute the base- 10 exponential of x over an + * implementation-defined range. The maximum error is + * implementation-defined. + */ +float __ovld __cnfn native_exp10(float x); +float2 __ovld __cnfn native_exp10(float2 x); +float3 __ovld __cnfn native_exp10(float3 x); +float4 __ovld __cnfn native_exp10(float4 x); +float8 __ovld __cnfn native_exp10(float8 x); +float16 __ovld __cnfn native_exp10(float16 x); + +/** + * Compute natural logarithm over an implementationdefined + * range. The maximum error is implementation + * defined. + */ +float __ovld __cnfn native_log(float x); +float2 __ovld __cnfn native_log(float2 x); +float3 __ovld __cnfn native_log(float3 x); +float4 __ovld __cnfn native_log(float4 x); +float8 __ovld __cnfn native_log(float8 x); +float16 __ovld __cnfn native_log(float16 x); + +/** + * Compute a base 2 logarithm over an implementationdefined + * range. The maximum error is implementationdefined. + */ +float __ovld __cnfn native_log2(float x); +float2 __ovld __cnfn native_log2(float2 x); +float3 __ovld __cnfn native_log2(float3 x); +float4 __ovld __cnfn native_log2(float4 x); +float8 __ovld __cnfn native_log2(float8 x); +float16 __ovld __cnfn native_log2(float16 x); + +/** + * Compute a base 10 logarithm over an implementationdefined + * range. The maximum error is implementationdefined. + */ +float __ovld __cnfn native_log10(float x); +float2 __ovld __cnfn native_log10(float2 x); +float3 __ovld __cnfn native_log10(float3 x); +float4 __ovld __cnfn native_log10(float4 x); +float8 __ovld __cnfn native_log10(float8 x); +float16 __ovld __cnfn native_log10(float16 x); + +/** + * Compute x to the power y, where x is >= 0. The range of + * x and y are implementation-defined. The maximum error + * is implementation-defined. + */ +float __ovld __cnfn native_powr(float x, float y); +float2 __ovld __cnfn native_powr(float2 x, float2 y); +float3 __ovld __cnfn native_powr(float3 x, float3 y); +float4 __ovld __cnfn native_powr(float4 x, float4 y); +float8 __ovld __cnfn native_powr(float8 x, float8 y); +float16 __ovld __cnfn native_powr(float16 x, float16 y); + +/** + * Compute reciprocal over an implementation-defined + * range. The maximum error is implementation-defined. + */ +float __ovld __cnfn native_recip(float x); +float2 __ovld __cnfn native_recip(float2 x); +float3 __ovld __cnfn native_recip(float3 x); +float4 __ovld __cnfn native_recip(float4 x); +float8 __ovld __cnfn native_recip(float8 x); +float16 __ovld __cnfn native_recip(float16 x); + +/** + * Compute inverse square root over an implementationdefined + * range. The maximum error is implementationdefined. + */ +float __ovld __cnfn native_rsqrt(float x); +float2 __ovld __cnfn native_rsqrt(float2 x); +float3 __ovld __cnfn native_rsqrt(float3 x); +float4 __ovld __cnfn native_rsqrt(float4 x); +float8 __ovld __cnfn native_rsqrt(float8 x); +float16 __ovld __cnfn native_rsqrt(float16 x); + +/** + * Compute sine over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_sin(float x); +float2 __ovld __cnfn native_sin(float2 x); +float3 __ovld __cnfn native_sin(float3 x); +float4 __ovld __cnfn native_sin(float4 x); +float8 __ovld __cnfn native_sin(float8 x); +float16 __ovld __cnfn native_sin(float16 x); + +/** + * Compute square root over an implementation-defined + * range. The maximum error is implementation-defined. + */ +float __ovld __cnfn native_sqrt(float x); +float2 __ovld __cnfn native_sqrt(float2 x); +float3 __ovld __cnfn native_sqrt(float3 x); +float4 __ovld __cnfn native_sqrt(float4 x); +float8 __ovld __cnfn native_sqrt(float8 x); +float16 __ovld __cnfn native_sqrt(float16 x); + +/** + * Compute tangent over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_tan(float x); +float2 __ovld __cnfn native_tan(float2 x); +float3 __ovld __cnfn native_tan(float3 x); +float4 __ovld __cnfn native_tan(float4 x); +float8 __ovld __cnfn native_tan(float8 x); +float16 __ovld __cnfn native_tan(float16 x); + +// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions + +/** + * Returns | x |. + */ +uchar __ovld __cnfn abs(char x); +uchar __ovld __cnfn abs(uchar x); +uchar2 __ovld __cnfn abs(char2 x); +uchar2 __ovld __cnfn abs(uchar2 x); +uchar3 __ovld __cnfn abs(char3 x); +uchar3 __ovld __cnfn abs(uchar3 x); +uchar4 __ovld __cnfn abs(char4 x); +uchar4 __ovld __cnfn abs(uchar4 x); +uchar8 __ovld __cnfn abs(char8 x); +uchar8 __ovld __cnfn abs(uchar8 x); +uchar16 __ovld __cnfn abs(char16 x); +uchar16 __ovld __cnfn abs(uchar16 x); +ushort __ovld __cnfn abs(short x); +ushort __ovld __cnfn abs(ushort x); +ushort2 __ovld __cnfn abs(short2 x); +ushort2 __ovld __cnfn abs(ushort2 x); +ushort3 __ovld __cnfn abs(short3 x); +ushort3 __ovld __cnfn abs(ushort3 x); +ushort4 __ovld __cnfn abs(short4 x); +ushort4 __ovld __cnfn abs(ushort4 x); +ushort8 __ovld __cnfn abs(short8 x); +ushort8 __ovld __cnfn abs(ushort8 x); +ushort16 __ovld __cnfn abs(short16 x); +ushort16 __ovld __cnfn abs(ushort16 x); +uint __ovld __cnfn abs(int x); +uint __ovld __cnfn abs(uint x); +uint2 __ovld __cnfn abs(int2 x); +uint2 __ovld __cnfn abs(uint2 x); +uint3 __ovld __cnfn abs(int3 x); +uint3 __ovld __cnfn abs(uint3 x); +uint4 __ovld __cnfn abs(int4 x); +uint4 __ovld __cnfn abs(uint4 x); +uint8 __ovld __cnfn abs(int8 x); +uint8 __ovld __cnfn abs(uint8 x); +uint16 __ovld __cnfn abs(int16 x); +uint16 __ovld __cnfn abs(uint16 x); +ulong __ovld __cnfn abs(long x); +ulong __ovld __cnfn abs(ulong x); +ulong2 __ovld __cnfn abs(long2 x); +ulong2 __ovld __cnfn abs(ulong2 x); +ulong3 __ovld __cnfn abs(long3 x); +ulong3 __ovld __cnfn abs(ulong3 x); +ulong4 __ovld __cnfn abs(long4 x); +ulong4 __ovld __cnfn abs(ulong4 x); +ulong8 __ovld __cnfn abs(long8 x); +ulong8 __ovld __cnfn abs(ulong8 x); +ulong16 __ovld __cnfn abs(long16 x); +ulong16 __ovld __cnfn abs(ulong16 x); + +/** + * Returns | x - y | without modulo overflow. + */ +uchar __ovld __cnfn abs_diff(char x, char y); +uchar __ovld __cnfn abs_diff(uchar x, uchar y); +uchar2 __ovld __cnfn abs_diff(char2 x, char2 y); +uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y); +uchar3 __ovld __cnfn abs_diff(char3 x, char3 y); +uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y); +uchar4 __ovld __cnfn abs_diff(char4 x, char4 y); +uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y); +uchar8 __ovld __cnfn abs_diff(char8 x, char8 y); +uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y); +uchar16 __ovld __cnfn abs_diff(char16 x, char16 y); +uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y); +ushort __ovld __cnfn abs_diff(short x, short y); +ushort __ovld __cnfn abs_diff(ushort x, ushort y); +ushort2 __ovld __cnfn abs_diff(short2 x, short2 y); +ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y); +ushort3 __ovld __cnfn abs_diff(short3 x, short3 y); +ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y); +ushort4 __ovld __cnfn abs_diff(short4 x, short4 y); +ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y); +ushort8 __ovld __cnfn abs_diff(short8 x, short8 y); +ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y); +ushort16 __ovld __cnfn abs_diff(short16 x, short16 y); +ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y); +uint __ovld __cnfn abs_diff(int x, int y); +uint __ovld __cnfn abs_diff(uint x, uint y); +uint2 __ovld __cnfn abs_diff(int2 x, int2 y); +uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y); +uint3 __ovld __cnfn abs_diff(int3 x, int3 y); +uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y); +uint4 __ovld __cnfn abs_diff(int4 x, int4 y); +uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y); +uint8 __ovld __cnfn abs_diff(int8 x, int8 y); +uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y); +uint16 __ovld __cnfn abs_diff(int16 x, int16 y); +uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y); +ulong __ovld __cnfn abs_diff(long x, long y); +ulong __ovld __cnfn abs_diff(ulong x, ulong y); +ulong2 __ovld __cnfn abs_diff(long2 x, long2 y); +ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y); +ulong3 __ovld __cnfn abs_diff(long3 x, long3 y); +ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y); +ulong4 __ovld __cnfn abs_diff(long4 x, long4 y); +ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y); +ulong8 __ovld __cnfn abs_diff(long8 x, long8 y); +ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y); +ulong16 __ovld __cnfn abs_diff(long16 x, long16 y); +ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y); + +/** + * Returns x + y and saturates the result. + */ +char __ovld __cnfn add_sat(char x, char y); +uchar __ovld __cnfn add_sat(uchar x, uchar y); +char2 __ovld __cnfn add_sat(char2 x, char2 y); +uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y); +char3 __ovld __cnfn add_sat(char3 x, char3 y); +uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y); +char4 __ovld __cnfn add_sat(char4 x, char4 y); +uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y); +char8 __ovld __cnfn add_sat(char8 x, char8 y); +uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y); +char16 __ovld __cnfn add_sat(char16 x, char16 y); +uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y); +short __ovld __cnfn add_sat(short x, short y); +ushort __ovld __cnfn add_sat(ushort x, ushort y); +short2 __ovld __cnfn add_sat(short2 x, short2 y); +ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y); +short3 __ovld __cnfn add_sat(short3 x, short3 y); +ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y); +short4 __ovld __cnfn add_sat(short4 x, short4 y); +ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y); +short8 __ovld __cnfn add_sat(short8 x, short8 y); +ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y); +short16 __ovld __cnfn add_sat(short16 x, short16 y); +ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y); +int __ovld __cnfn add_sat(int x, int y); +uint __ovld __cnfn add_sat(uint x, uint y); +int2 __ovld __cnfn add_sat(int2 x, int2 y); +uint2 __ovld __cnfn add_sat(uint2 x, uint2 y); +int3 __ovld __cnfn add_sat(int3 x, int3 y); +uint3 __ovld __cnfn add_sat(uint3 x, uint3 y); +int4 __ovld __cnfn add_sat(int4 x, int4 y); +uint4 __ovld __cnfn add_sat(uint4 x, uint4 y); +int8 __ovld __cnfn add_sat(int8 x, int8 y); +uint8 __ovld __cnfn add_sat(uint8 x, uint8 y); +int16 __ovld __cnfn add_sat(int16 x, int16 y); +uint16 __ovld __cnfn add_sat(uint16 x, uint16 y); +long __ovld __cnfn add_sat(long x, long y); +ulong __ovld __cnfn add_sat(ulong x, ulong y); +long2 __ovld __cnfn add_sat(long2 x, long2 y); +ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y); +long3 __ovld __cnfn add_sat(long3 x, long3 y); +ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y); +long4 __ovld __cnfn add_sat(long4 x, long4 y); +ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y); +long8 __ovld __cnfn add_sat(long8 x, long8 y); +ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y); +long16 __ovld __cnfn add_sat(long16 x, long16 y); +ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y); + +/** + * Returns (x + y) >> 1. The intermediate sum does + * not modulo overflow. + */ +char __ovld __cnfn hadd(char x, char y); +uchar __ovld __cnfn hadd(uchar x, uchar y); +char2 __ovld __cnfn hadd(char2 x, char2 y); +uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y); +char3 __ovld __cnfn hadd(char3 x, char3 y); +uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y); +char4 __ovld __cnfn hadd(char4 x, char4 y); +uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y); +char8 __ovld __cnfn hadd(char8 x, char8 y); +uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y); +char16 __ovld __cnfn hadd(char16 x, char16 y); +uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y); +short __ovld __cnfn hadd(short x, short y); +ushort __ovld __cnfn hadd(ushort x, ushort y); +short2 __ovld __cnfn hadd(short2 x, short2 y); +ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y); +short3 __ovld __cnfn hadd(short3 x, short3 y); +ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y); +short4 __ovld __cnfn hadd(short4 x, short4 y); +ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y); +short8 __ovld __cnfn hadd(short8 x, short8 y); +ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y); +short16 __ovld __cnfn hadd(short16 x, short16 y); +ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y); +int __ovld __cnfn hadd(int x, int y); +uint __ovld __cnfn hadd(uint x, uint y); +int2 __ovld __cnfn hadd(int2 x, int2 y); +uint2 __ovld __cnfn hadd(uint2 x, uint2 y); +int3 __ovld __cnfn hadd(int3 x, int3 y); +uint3 __ovld __cnfn hadd(uint3 x, uint3 y); +int4 __ovld __cnfn hadd(int4 x, int4 y); +uint4 __ovld __cnfn hadd(uint4 x, uint4 y); +int8 __ovld __cnfn hadd(int8 x, int8 y); +uint8 __ovld __cnfn hadd(uint8 x, uint8 y); +int16 __ovld __cnfn hadd(int16 x, int16 y); +uint16 __ovld __cnfn hadd(uint16 x, uint16 y); +long __ovld __cnfn hadd(long x, long y); +ulong __ovld __cnfn hadd(ulong x, ulong y); +long2 __ovld __cnfn hadd(long2 x, long2 y); +ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y); +long3 __ovld __cnfn hadd(long3 x, long3 y); +ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y); +long4 __ovld __cnfn hadd(long4 x, long4 y); +ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y); +long8 __ovld __cnfn hadd(long8 x, long8 y); +ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y); +long16 __ovld __cnfn hadd(long16 x, long16 y); +ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y); + +/** + * Returns (x + y + 1) >> 1. The intermediate sum + * does not modulo overflow. + */ +char __ovld __cnfn rhadd(char x, char y); +uchar __ovld __cnfn rhadd(uchar x, uchar y); +char2 __ovld __cnfn rhadd(char2 x, char2 y); +uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y); +char3 __ovld __cnfn rhadd(char3 x, char3 y); +uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y); +char4 __ovld __cnfn rhadd(char4 x, char4 y); +uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y); +char8 __ovld __cnfn rhadd(char8 x, char8 y); +uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y); +char16 __ovld __cnfn rhadd(char16 x, char16 y); +uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y); +short __ovld __cnfn rhadd(short x, short y); +ushort __ovld __cnfn rhadd(ushort x, ushort y); +short2 __ovld __cnfn rhadd(short2 x, short2 y); +ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y); +short3 __ovld __cnfn rhadd(short3 x, short3 y); +ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y); +short4 __ovld __cnfn rhadd(short4 x, short4 y); +ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y); +short8 __ovld __cnfn rhadd(short8 x, short8 y); +ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y); +short16 __ovld __cnfn rhadd(short16 x, short16 y); +ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y); +int __ovld __cnfn rhadd(int x, int y); +uint __ovld __cnfn rhadd(uint x, uint y); +int2 __ovld __cnfn rhadd(int2 x, int2 y); +uint2 __ovld __cnfn rhadd(uint2 x, uint2 y); +int3 __ovld __cnfn rhadd(int3 x, int3 y); +uint3 __ovld __cnfn rhadd(uint3 x, uint3 y); +int4 __ovld __cnfn rhadd(int4 x, int4 y); +uint4 __ovld __cnfn rhadd(uint4 x, uint4 y); +int8 __ovld __cnfn rhadd(int8 x, int8 y); +uint8 __ovld __cnfn rhadd(uint8 x, uint8 y); +int16 __ovld __cnfn rhadd(int16 x, int16 y); +uint16 __ovld __cnfn rhadd(uint16 x, uint16 y); +long __ovld __cnfn rhadd(long x, long y); +ulong __ovld __cnfn rhadd(ulong x, ulong y); +long2 __ovld __cnfn rhadd(long2 x, long2 y); +ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y); +long3 __ovld __cnfn rhadd(long3 x, long3 y); +ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y); +long4 __ovld __cnfn rhadd(long4 x, long4 y); +ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y); +long8 __ovld __cnfn rhadd(long8 x, long8 y); +ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y); +long16 __ovld __cnfn rhadd(long16 x, long16 y); +ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y); + +/** + * Returns min(max(x, minval), maxval). + * Results are undefined if minval > maxval. + */ +char __ovld __cnfn clamp(char x, char minval, char maxval); +uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval); +char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval); +uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval); +char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval); +uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval); +char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval); +uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval); +char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval); +uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval); +char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval); +uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval); +short __ovld __cnfn clamp(short x, short minval, short maxval); +ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval); +short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval); +ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval); +short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval); +ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval); +short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval); +ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval); +short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval); +ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval); +short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval); +ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval); +int __ovld __cnfn clamp(int x, int minval, int maxval); +uint __ovld __cnfn clamp(uint x, uint minval, uint maxval); +int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval); +uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval); +int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval); +uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval); +int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval); +uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval); +int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval); +uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval); +int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval); +uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval); +long __ovld __cnfn clamp(long x, long minval, long maxval); +ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval); +long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval); +ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval); +long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval); +ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval); +long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval); +ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval); +long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval); +ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval); +long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval); +ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval); +char2 __ovld __cnfn clamp(char2 x, char minval, char maxval); +uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval); +char3 __ovld __cnfn clamp(char3 x, char minval, char maxval); +uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval); +char4 __ovld __cnfn clamp(char4 x, char minval, char maxval); +uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval); +char8 __ovld __cnfn clamp(char8 x, char minval, char maxval); +uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval); +char16 __ovld __cnfn clamp(char16 x, char minval, char maxval); +uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval); +short2 __ovld __cnfn clamp(short2 x, short minval, short maxval); +ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval); +short3 __ovld __cnfn clamp(short3 x, short minval, short maxval); +ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval); +short4 __ovld __cnfn clamp(short4 x, short minval, short maxval); +ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval); +short8 __ovld __cnfn clamp(short8 x, short minval, short maxval); +ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval); +short16 __ovld __cnfn clamp(short16 x, short minval, short maxval); +ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval); +int2 __ovld __cnfn clamp(int2 x, int minval, int maxval); +uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval); +int3 __ovld __cnfn clamp(int3 x, int minval, int maxval); +uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval); +int4 __ovld __cnfn clamp(int4 x, int minval, int maxval); +uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval); +int8 __ovld __cnfn clamp(int8 x, int minval, int maxval); +uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval); +int16 __ovld __cnfn clamp(int16 x, int minval, int maxval); +uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval); +long2 __ovld __cnfn clamp(long2 x, long minval, long maxval); +ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval); +long3 __ovld __cnfn clamp(long3 x, long minval, long maxval); +ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval); +long4 __ovld __cnfn clamp(long4 x, long minval, long maxval); +ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval); +long8 __ovld __cnfn clamp(long8 x, long minval, long maxval); +ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval); +long16 __ovld __cnfn clamp(long16 x, long minval, long maxval); +ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval); + +/** + * Returns the number of leading 0-bits in x, starting + * at the most significant bit position. + */ +char __ovld __cnfn clz(char x); +uchar __ovld __cnfn clz(uchar x); +char2 __ovld __cnfn clz(char2 x); +uchar2 __ovld __cnfn clz(uchar2 x); +char3 __ovld __cnfn clz(char3 x); +uchar3 __ovld __cnfn clz(uchar3 x); +char4 __ovld __cnfn clz(char4 x); +uchar4 __ovld __cnfn clz(uchar4 x); +char8 __ovld __cnfn clz(char8 x); +uchar8 __ovld __cnfn clz(uchar8 x); +char16 __ovld __cnfn clz(char16 x); +uchar16 __ovld __cnfn clz(uchar16 x); +short __ovld __cnfn clz(short x); +ushort __ovld __cnfn clz(ushort x); +short2 __ovld __cnfn clz(short2 x); +ushort2 __ovld __cnfn clz(ushort2 x); +short3 __ovld __cnfn clz(short3 x); +ushort3 __ovld __cnfn clz(ushort3 x); +short4 __ovld __cnfn clz(short4 x); +ushort4 __ovld __cnfn clz(ushort4 x); +short8 __ovld __cnfn clz(short8 x); +ushort8 __ovld __cnfn clz(ushort8 x); +short16 __ovld __cnfn clz(short16 x); +ushort16 __ovld __cnfn clz(ushort16 x); +int __ovld __cnfn clz(int x); +uint __ovld __cnfn clz(uint x); +int2 __ovld __cnfn clz(int2 x); +uint2 __ovld __cnfn clz(uint2 x); +int3 __ovld __cnfn clz(int3 x); +uint3 __ovld __cnfn clz(uint3 x); +int4 __ovld __cnfn clz(int4 x); +uint4 __ovld __cnfn clz(uint4 x); +int8 __ovld __cnfn clz(int8 x); +uint8 __ovld __cnfn clz(uint8 x); +int16 __ovld __cnfn clz(int16 x); +uint16 __ovld __cnfn clz(uint16 x); +long __ovld __cnfn clz(long x); +ulong __ovld __cnfn clz(ulong x); +long2 __ovld __cnfn clz(long2 x); +ulong2 __ovld __cnfn clz(ulong2 x); +long3 __ovld __cnfn clz(long3 x); +ulong3 __ovld __cnfn clz(ulong3 x); +long4 __ovld __cnfn clz(long4 x); +ulong4 __ovld __cnfn clz(ulong4 x); +long8 __ovld __cnfn clz(long8 x); +ulong8 __ovld __cnfn clz(ulong8 x); +long16 __ovld __cnfn clz(long16 x); +ulong16 __ovld __cnfn clz(ulong16 x); + +/** + * Returns the count of trailing 0-bits in x. If x is 0, + * returns the size in bits of the type of x or + * component type of x, if x is a vector. + */ +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +char __ovld __cnfn ctz(char x); +uchar __ovld __cnfn ctz(uchar x); +char2 __ovld __cnfn ctz(char2 x); +uchar2 __ovld __cnfn ctz(uchar2 x); +char3 __ovld __cnfn ctz(char3 x); +uchar3 __ovld __cnfn ctz(uchar3 x); +char4 __ovld __cnfn ctz(char4 x); +uchar4 __ovld __cnfn ctz(uchar4 x); +char8 __ovld __cnfn ctz(char8 x); +uchar8 __ovld __cnfn ctz(uchar8 x); +char16 __ovld __cnfn ctz(char16 x); +uchar16 __ovld __cnfn ctz(uchar16 x); +short __ovld __cnfn ctz(short x); +ushort __ovld __cnfn ctz(ushort x); +short2 __ovld __cnfn ctz(short2 x); +ushort2 __ovld __cnfn ctz(ushort2 x); +short3 __ovld __cnfn ctz(short3 x); +ushort3 __ovld __cnfn ctz(ushort3 x); +short4 __ovld __cnfn ctz(short4 x); +ushort4 __ovld __cnfn ctz(ushort4 x); +short8 __ovld __cnfn ctz(short8 x); +ushort8 __ovld __cnfn ctz(ushort8 x); +short16 __ovld __cnfn ctz(short16 x); +ushort16 __ovld __cnfn ctz(ushort16 x); +int __ovld __cnfn ctz(int x); +uint __ovld __cnfn ctz(uint x); +int2 __ovld __cnfn ctz(int2 x); +uint2 __ovld __cnfn ctz(uint2 x); +int3 __ovld __cnfn ctz(int3 x); +uint3 __ovld __cnfn ctz(uint3 x); +int4 __ovld __cnfn ctz(int4 x); +uint4 __ovld __cnfn ctz(uint4 x); +int8 __ovld __cnfn ctz(int8 x); +uint8 __ovld __cnfn ctz(uint8 x); +int16 __ovld __cnfn ctz(int16 x); +uint16 __ovld __cnfn ctz(uint16 x); +long __ovld __cnfn ctz(long x); +ulong __ovld __cnfn ctz(ulong x); +long2 __ovld __cnfn ctz(long2 x); +ulong2 __ovld __cnfn ctz(ulong2 x); +long3 __ovld __cnfn ctz(long3 x); +ulong3 __ovld __cnfn ctz(ulong3 x); +long4 __ovld __cnfn ctz(long4 x); +ulong4 __ovld __cnfn ctz(ulong4 x); +long8 __ovld __cnfn ctz(long8 x); +ulong8 __ovld __cnfn ctz(ulong8 x); +long16 __ovld __cnfn ctz(long16 x); +ulong16 __ovld __cnfn ctz(ulong16 x); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Returns mul_hi(a, b) + c. + */ +char __ovld __cnfn mad_hi(char a, char b, char c); +uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c); +char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn mad_hi(short a, short b, short c); +ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c); +short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn mad_hi(int a, int b, int c); +uint __ovld __cnfn mad_hi(uint a, uint b, uint c); +int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn mad_hi(long a, long b, long c); +ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c); +long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c); + +/** + * Returns a * b + c and saturates the result. + */ +char __ovld __cnfn mad_sat(char a, char b, char c); +uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c); +char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn mad_sat(short a, short b, short c); +ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c); +short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn mad_sat(int a, int b, int c); +uint __ovld __cnfn mad_sat(uint a, uint b, uint c); +int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn mad_sat(long a, long b, long c); +ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c); +long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c); + +/** + * Returns y if x < y, otherwise it returns x. + */ +char __ovld __cnfn max(char x, char y); +uchar __ovld __cnfn max(uchar x, uchar y); +char2 __ovld __cnfn max(char2 x, char2 y); +uchar2 __ovld __cnfn max(uchar2 x, uchar2 y); +char3 __ovld __cnfn max(char3 x, char3 y); +uchar3 __ovld __cnfn max(uchar3 x, uchar3 y); +char4 __ovld __cnfn max(char4 x, char4 y); +uchar4 __ovld __cnfn max(uchar4 x, uchar4 y); +char8 __ovld __cnfn max(char8 x, char8 y); +uchar8 __ovld __cnfn max(uchar8 x, uchar8 y); +char16 __ovld __cnfn max(char16 x, char16 y); +uchar16 __ovld __cnfn max(uchar16 x, uchar16 y); +short __ovld __cnfn max(short x, short y); +ushort __ovld __cnfn max(ushort x, ushort y); +short2 __ovld __cnfn max(short2 x, short2 y); +ushort2 __ovld __cnfn max(ushort2 x, ushort2 y); +short3 __ovld __cnfn max(short3 x, short3 y); +ushort3 __ovld __cnfn max(ushort3 x, ushort3 y); +short4 __ovld __cnfn max(short4 x, short4 y); +ushort4 __ovld __cnfn max(ushort4 x, ushort4 y); +short8 __ovld __cnfn max(short8 x, short8 y); +ushort8 __ovld __cnfn max(ushort8 x, ushort8 y); +short16 __ovld __cnfn max(short16 x, short16 y); +ushort16 __ovld __cnfn max(ushort16 x, ushort16 y); +int __ovld __cnfn max(int x, int y); +uint __ovld __cnfn max(uint x, uint y); +int2 __ovld __cnfn max(int2 x, int2 y); +uint2 __ovld __cnfn max(uint2 x, uint2 y); +int3 __ovld __cnfn max(int3 x, int3 y); +uint3 __ovld __cnfn max(uint3 x, uint3 y); +int4 __ovld __cnfn max(int4 x, int4 y); +uint4 __ovld __cnfn max(uint4 x, uint4 y); +int8 __ovld __cnfn max(int8 x, int8 y); +uint8 __ovld __cnfn max(uint8 x, uint8 y); +int16 __ovld __cnfn max(int16 x, int16 y); +uint16 __ovld __cnfn max(uint16 x, uint16 y); +long __ovld __cnfn max(long x, long y); +ulong __ovld __cnfn max(ulong x, ulong y); +long2 __ovld __cnfn max(long2 x, long2 y); +ulong2 __ovld __cnfn max(ulong2 x, ulong2 y); +long3 __ovld __cnfn max(long3 x, long3 y); +ulong3 __ovld __cnfn max(ulong3 x, ulong3 y); +long4 __ovld __cnfn max(long4 x, long4 y); +ulong4 __ovld __cnfn max(ulong4 x, ulong4 y); +long8 __ovld __cnfn max(long8 x, long8 y); +ulong8 __ovld __cnfn max(ulong8 x, ulong8 y); +long16 __ovld __cnfn max(long16 x, long16 y); +ulong16 __ovld __cnfn max(ulong16 x, ulong16 y); +char2 __ovld __cnfn max(char2 x, char y); +uchar2 __ovld __cnfn max(uchar2 x, uchar y); +char3 __ovld __cnfn max(char3 x, char y); +uchar3 __ovld __cnfn max(uchar3 x, uchar y); +char4 __ovld __cnfn max(char4 x, char y); +uchar4 __ovld __cnfn max(uchar4 x, uchar y); +char8 __ovld __cnfn max(char8 x, char y); +uchar8 __ovld __cnfn max(uchar8 x, uchar y); +char16 __ovld __cnfn max(char16 x, char y); +uchar16 __ovld __cnfn max(uchar16 x, uchar y); +short2 __ovld __cnfn max(short2 x, short y); +ushort2 __ovld __cnfn max(ushort2 x, ushort y); +short3 __ovld __cnfn max(short3 x, short y); +ushort3 __ovld __cnfn max(ushort3 x, ushort y); +short4 __ovld __cnfn max(short4 x, short y); +ushort4 __ovld __cnfn max(ushort4 x, ushort y); +short8 __ovld __cnfn max(short8 x, short y); +ushort8 __ovld __cnfn max(ushort8 x, ushort y); +short16 __ovld __cnfn max(short16 x, short y); +ushort16 __ovld __cnfn max(ushort16 x, ushort y); +int2 __ovld __cnfn max(int2 x, int y); +uint2 __ovld __cnfn max(uint2 x, uint y); +int3 __ovld __cnfn max(int3 x, int y); +uint3 __ovld __cnfn max(uint3 x, uint y); +int4 __ovld __cnfn max(int4 x, int y); +uint4 __ovld __cnfn max(uint4 x, uint y); +int8 __ovld __cnfn max(int8 x, int y); +uint8 __ovld __cnfn max(uint8 x, uint y); +int16 __ovld __cnfn max(int16 x, int y); +uint16 __ovld __cnfn max(uint16 x, uint y); +long2 __ovld __cnfn max(long2 x, long y); +ulong2 __ovld __cnfn max(ulong2 x, ulong y); +long3 __ovld __cnfn max(long3 x, long y); +ulong3 __ovld __cnfn max(ulong3 x, ulong y); +long4 __ovld __cnfn max(long4 x, long y); +ulong4 __ovld __cnfn max(ulong4 x, ulong y); +long8 __ovld __cnfn max(long8 x, long y); +ulong8 __ovld __cnfn max(ulong8 x, ulong y); +long16 __ovld __cnfn max(long16 x, long y); +ulong16 __ovld __cnfn max(ulong16 x, ulong y); + +/** + * Returns y if y < x, otherwise it returns x. + */ +char __ovld __cnfn min(char x, char y); +uchar __ovld __cnfn min(uchar x, uchar y); +char2 __ovld __cnfn min(char2 x, char2 y); +uchar2 __ovld __cnfn min(uchar2 x, uchar2 y); +char3 __ovld __cnfn min(char3 x, char3 y); +uchar3 __ovld __cnfn min(uchar3 x, uchar3 y); +char4 __ovld __cnfn min(char4 x, char4 y); +uchar4 __ovld __cnfn min(uchar4 x, uchar4 y); +char8 __ovld __cnfn min(char8 x, char8 y); +uchar8 __ovld __cnfn min(uchar8 x, uchar8 y); +char16 __ovld __cnfn min(char16 x, char16 y); +uchar16 __ovld __cnfn min(uchar16 x, uchar16 y); +short __ovld __cnfn min(short x, short y); +ushort __ovld __cnfn min(ushort x, ushort y); +short2 __ovld __cnfn min(short2 x, short2 y); +ushort2 __ovld __cnfn min(ushort2 x, ushort2 y); +short3 __ovld __cnfn min(short3 x, short3 y); +ushort3 __ovld __cnfn min(ushort3 x, ushort3 y); +short4 __ovld __cnfn min(short4 x, short4 y); +ushort4 __ovld __cnfn min(ushort4 x, ushort4 y); +short8 __ovld __cnfn min(short8 x, short8 y); +ushort8 __ovld __cnfn min(ushort8 x, ushort8 y); +short16 __ovld __cnfn min(short16 x, short16 y); +ushort16 __ovld __cnfn min(ushort16 x, ushort16 y); +int __ovld __cnfn min(int x, int y); +uint __ovld __cnfn min(uint x, uint y); +int2 __ovld __cnfn min(int2 x, int2 y); +uint2 __ovld __cnfn min(uint2 x, uint2 y); +int3 __ovld __cnfn min(int3 x, int3 y); +uint3 __ovld __cnfn min(uint3 x, uint3 y); +int4 __ovld __cnfn min(int4 x, int4 y); +uint4 __ovld __cnfn min(uint4 x, uint4 y); +int8 __ovld __cnfn min(int8 x, int8 y); +uint8 __ovld __cnfn min(uint8 x, uint8 y); +int16 __ovld __cnfn min(int16 x, int16 y); +uint16 __ovld __cnfn min(uint16 x, uint16 y); +long __ovld __cnfn min(long x, long y); +ulong __ovld __cnfn min(ulong x, ulong y); +long2 __ovld __cnfn min(long2 x, long2 y); +ulong2 __ovld __cnfn min(ulong2 x, ulong2 y); +long3 __ovld __cnfn min(long3 x, long3 y); +ulong3 __ovld __cnfn min(ulong3 x, ulong3 y); +long4 __ovld __cnfn min(long4 x, long4 y); +ulong4 __ovld __cnfn min(ulong4 x, ulong4 y); +long8 __ovld __cnfn min(long8 x, long8 y); +ulong8 __ovld __cnfn min(ulong8 x, ulong8 y); +long16 __ovld __cnfn min(long16 x, long16 y); +ulong16 __ovld __cnfn min(ulong16 x, ulong16 y); +char2 __ovld __cnfn min(char2 x, char y); +uchar2 __ovld __cnfn min(uchar2 x, uchar y); +char3 __ovld __cnfn min(char3 x, char y); +uchar3 __ovld __cnfn min(uchar3 x, uchar y); +char4 __ovld __cnfn min(char4 x, char y); +uchar4 __ovld __cnfn min(uchar4 x, uchar y); +char8 __ovld __cnfn min(char8 x, char y); +uchar8 __ovld __cnfn min(uchar8 x, uchar y); +char16 __ovld __cnfn min(char16 x, char y); +uchar16 __ovld __cnfn min(uchar16 x, uchar y); +short2 __ovld __cnfn min(short2 x, short y); +ushort2 __ovld __cnfn min(ushort2 x, ushort y); +short3 __ovld __cnfn min(short3 x, short y); +ushort3 __ovld __cnfn min(ushort3 x, ushort y); +short4 __ovld __cnfn min(short4 x, short y); +ushort4 __ovld __cnfn min(ushort4 x, ushort y); +short8 __ovld __cnfn min(short8 x, short y); +ushort8 __ovld __cnfn min(ushort8 x, ushort y); +short16 __ovld __cnfn min(short16 x, short y); +ushort16 __ovld __cnfn min(ushort16 x, ushort y); +int2 __ovld __cnfn min(int2 x, int y); +uint2 __ovld __cnfn min(uint2 x, uint y); +int3 __ovld __cnfn min(int3 x, int y); +uint3 __ovld __cnfn min(uint3 x, uint y); +int4 __ovld __cnfn min(int4 x, int y); +uint4 __ovld __cnfn min(uint4 x, uint y); +int8 __ovld __cnfn min(int8 x, int y); +uint8 __ovld __cnfn min(uint8 x, uint y); +int16 __ovld __cnfn min(int16 x, int y); +uint16 __ovld __cnfn min(uint16 x, uint y); +long2 __ovld __cnfn min(long2 x, long y); +ulong2 __ovld __cnfn min(ulong2 x, ulong y); +long3 __ovld __cnfn min(long3 x, long y); +ulong3 __ovld __cnfn min(ulong3 x, ulong y); +long4 __ovld __cnfn min(long4 x, long y); +ulong4 __ovld __cnfn min(ulong4 x, ulong y); +long8 __ovld __cnfn min(long8 x, long y); +ulong8 __ovld __cnfn min(ulong8 x, ulong y); +long16 __ovld __cnfn min(long16 x, long y); +ulong16 __ovld __cnfn min(ulong16 x, ulong y); + +/** + * Computes x * y and returns the high half of the + * product of x and y. + */ +char __ovld __cnfn mul_hi(char x, char y); +uchar __ovld __cnfn mul_hi(uchar x, uchar y); +char2 __ovld __cnfn mul_hi(char2 x, char2 y); +uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y); +char3 __ovld __cnfn mul_hi(char3 x, char3 y); +uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y); +char4 __ovld __cnfn mul_hi(char4 x, char4 y); +uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y); +char8 __ovld __cnfn mul_hi(char8 x, char8 y); +uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y); +char16 __ovld __cnfn mul_hi(char16 x, char16 y); +uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y); +short __ovld __cnfn mul_hi(short x, short y); +ushort __ovld __cnfn mul_hi(ushort x, ushort y); +short2 __ovld __cnfn mul_hi(short2 x, short2 y); +ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y); +short3 __ovld __cnfn mul_hi(short3 x, short3 y); +ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y); +short4 __ovld __cnfn mul_hi(short4 x, short4 y); +ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y); +short8 __ovld __cnfn mul_hi(short8 x, short8 y); +ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y); +short16 __ovld __cnfn mul_hi(short16 x, short16 y); +ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y); +int __ovld __cnfn mul_hi(int x, int y); +uint __ovld __cnfn mul_hi(uint x, uint y); +int2 __ovld __cnfn mul_hi(int2 x, int2 y); +uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y); +int3 __ovld __cnfn mul_hi(int3 x, int3 y); +uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y); +int4 __ovld __cnfn mul_hi(int4 x, int4 y); +uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y); +int8 __ovld __cnfn mul_hi(int8 x, int8 y); +uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y); +int16 __ovld __cnfn mul_hi(int16 x, int16 y); +uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y); +long __ovld __cnfn mul_hi(long x, long y); +ulong __ovld __cnfn mul_hi(ulong x, ulong y); +long2 __ovld __cnfn mul_hi(long2 x, long2 y); +ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y); +long3 __ovld __cnfn mul_hi(long3 x, long3 y); +ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y); +long4 __ovld __cnfn mul_hi(long4 x, long4 y); +ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y); +long8 __ovld __cnfn mul_hi(long8 x, long8 y); +ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y); +long16 __ovld __cnfn mul_hi(long16 x, long16 y); +ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y); + +/** + * For each element in v, the bits are shifted left by + * the number of bits given by the corresponding + * element in i (subject to usual shift modulo rules + * described in section 6.3). Bits shifted off the left + * side of the element are shifted back in from the + * right. + */ +char __ovld __cnfn rotate(char v, char i); +uchar __ovld __cnfn rotate(uchar v, uchar i); +char2 __ovld __cnfn rotate(char2 v, char2 i); +uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i); +char3 __ovld __cnfn rotate(char3 v, char3 i); +uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i); +char4 __ovld __cnfn rotate(char4 v, char4 i); +uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i); +char8 __ovld __cnfn rotate(char8 v, char8 i); +uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i); +char16 __ovld __cnfn rotate(char16 v, char16 i); +uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i); +short __ovld __cnfn rotate(short v, short i); +ushort __ovld __cnfn rotate(ushort v, ushort i); +short2 __ovld __cnfn rotate(short2 v, short2 i); +ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i); +short3 __ovld __cnfn rotate(short3 v, short3 i); +ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i); +short4 __ovld __cnfn rotate(short4 v, short4 i); +ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i); +short8 __ovld __cnfn rotate(short8 v, short8 i); +ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i); +short16 __ovld __cnfn rotate(short16 v, short16 i); +ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i); +int __ovld __cnfn rotate(int v, int i); +uint __ovld __cnfn rotate(uint v, uint i); +int2 __ovld __cnfn rotate(int2 v, int2 i); +uint2 __ovld __cnfn rotate(uint2 v, uint2 i); +int3 __ovld __cnfn rotate(int3 v, int3 i); +uint3 __ovld __cnfn rotate(uint3 v, uint3 i); +int4 __ovld __cnfn rotate(int4 v, int4 i); +uint4 __ovld __cnfn rotate(uint4 v, uint4 i); +int8 __ovld __cnfn rotate(int8 v, int8 i); +uint8 __ovld __cnfn rotate(uint8 v, uint8 i); +int16 __ovld __cnfn rotate(int16 v, int16 i); +uint16 __ovld __cnfn rotate(uint16 v, uint16 i); +long __ovld __cnfn rotate(long v, long i); +ulong __ovld __cnfn rotate(ulong v, ulong i); +long2 __ovld __cnfn rotate(long2 v, long2 i); +ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i); +long3 __ovld __cnfn rotate(long3 v, long3 i); +ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i); +long4 __ovld __cnfn rotate(long4 v, long4 i); +ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i); +long8 __ovld __cnfn rotate(long8 v, long8 i); +ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i); +long16 __ovld __cnfn rotate(long16 v, long16 i); +ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i); + +/** + * Returns x - y and saturates the result. + */ +char __ovld __cnfn sub_sat(char x, char y); +uchar __ovld __cnfn sub_sat(uchar x, uchar y); +char2 __ovld __cnfn sub_sat(char2 x, char2 y); +uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y); +char3 __ovld __cnfn sub_sat(char3 x, char3 y); +uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y); +char4 __ovld __cnfn sub_sat(char4 x, char4 y); +uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y); +char8 __ovld __cnfn sub_sat(char8 x, char8 y); +uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y); +char16 __ovld __cnfn sub_sat(char16 x, char16 y); +uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y); +short __ovld __cnfn sub_sat(short x, short y); +ushort __ovld __cnfn sub_sat(ushort x, ushort y); +short2 __ovld __cnfn sub_sat(short2 x, short2 y); +ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y); +short3 __ovld __cnfn sub_sat(short3 x, short3 y); +ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y); +short4 __ovld __cnfn sub_sat(short4 x, short4 y); +ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y); +short8 __ovld __cnfn sub_sat(short8 x, short8 y); +ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y); +short16 __ovld __cnfn sub_sat(short16 x, short16 y); +ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y); +int __ovld __cnfn sub_sat(int x, int y); +uint __ovld __cnfn sub_sat(uint x, uint y); +int2 __ovld __cnfn sub_sat(int2 x, int2 y); +uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y); +int3 __ovld __cnfn sub_sat(int3 x, int3 y); +uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y); +int4 __ovld __cnfn sub_sat(int4 x, int4 y); +uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y); +int8 __ovld __cnfn sub_sat(int8 x, int8 y); +uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y); +int16 __ovld __cnfn sub_sat(int16 x, int16 y); +uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y); +long __ovld __cnfn sub_sat(long x, long y); +ulong __ovld __cnfn sub_sat(ulong x, ulong y); +long2 __ovld __cnfn sub_sat(long2 x, long2 y); +ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y); +long3 __ovld __cnfn sub_sat(long3 x, long3 y); +ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y); +long4 __ovld __cnfn sub_sat(long4 x, long4 y); +ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y); +long8 __ovld __cnfn sub_sat(long8 x, long8 y); +ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y); +long16 __ovld __cnfn sub_sat(long16 x, long16 y); +ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y); + +/** + * result[i] = ((short)hi[i] << 8) | lo[i] + * result[i] = ((ushort)hi[i] << 8) | lo[i] + */ +short __ovld __cnfn upsample(char hi, uchar lo); +ushort __ovld __cnfn upsample(uchar hi, uchar lo); +short2 __ovld __cnfn upsample(char2 hi, uchar2 lo); +short3 __ovld __cnfn upsample(char3 hi, uchar3 lo); +short4 __ovld __cnfn upsample(char4 hi, uchar4 lo); +short8 __ovld __cnfn upsample(char8 hi, uchar8 lo); +short16 __ovld __cnfn upsample(char16 hi, uchar16 lo); +ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo); +ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo); +ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo); +ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo); +ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo); + +/** + * result[i] = ((int)hi[i] << 16) | lo[i] + * result[i] = ((uint)hi[i] << 16) | lo[i] + */ +int __ovld __cnfn upsample(short hi, ushort lo); +uint __ovld __cnfn upsample(ushort hi, ushort lo); +int2 __ovld __cnfn upsample(short2 hi, ushort2 lo); +int3 __ovld __cnfn upsample(short3 hi, ushort3 lo); +int4 __ovld __cnfn upsample(short4 hi, ushort4 lo); +int8 __ovld __cnfn upsample(short8 hi, ushort8 lo); +int16 __ovld __cnfn upsample(short16 hi, ushort16 lo); +uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo); +uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo); +uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo); +uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo); +uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo); +/** + * result[i] = ((long)hi[i] << 32) | lo[i] + * result[i] = ((ulong)hi[i] << 32) | lo[i] + */ +long __ovld __cnfn upsample(int hi, uint lo); +ulong __ovld __cnfn upsample(uint hi, uint lo); +long2 __ovld __cnfn upsample(int2 hi, uint2 lo); +long3 __ovld __cnfn upsample(int3 hi, uint3 lo); +long4 __ovld __cnfn upsample(int4 hi, uint4 lo); +long8 __ovld __cnfn upsample(int8 hi, uint8 lo); +long16 __ovld __cnfn upsample(int16 hi, uint16 lo); +ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo); +ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo); +ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo); +ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo); +ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo); + +/* + * popcount(x): returns the number of set bit in x + */ +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) +char __ovld __cnfn popcount(char x); +uchar __ovld __cnfn popcount(uchar x); +char2 __ovld __cnfn popcount(char2 x); +uchar2 __ovld __cnfn popcount(uchar2 x); +char3 __ovld __cnfn popcount(char3 x); +uchar3 __ovld __cnfn popcount(uchar3 x); +char4 __ovld __cnfn popcount(char4 x); +uchar4 __ovld __cnfn popcount(uchar4 x); +char8 __ovld __cnfn popcount(char8 x); +uchar8 __ovld __cnfn popcount(uchar8 x); +char16 __ovld __cnfn popcount(char16 x); +uchar16 __ovld __cnfn popcount(uchar16 x); +short __ovld __cnfn popcount(short x); +ushort __ovld __cnfn popcount(ushort x); +short2 __ovld __cnfn popcount(short2 x); +ushort2 __ovld __cnfn popcount(ushort2 x); +short3 __ovld __cnfn popcount(short3 x); +ushort3 __ovld __cnfn popcount(ushort3 x); +short4 __ovld __cnfn popcount(short4 x); +ushort4 __ovld __cnfn popcount(ushort4 x); +short8 __ovld __cnfn popcount(short8 x); +ushort8 __ovld __cnfn popcount(ushort8 x); +short16 __ovld __cnfn popcount(short16 x); +ushort16 __ovld __cnfn popcount(ushort16 x); +int __ovld __cnfn popcount(int x); +uint __ovld __cnfn popcount(uint x); +int2 __ovld __cnfn popcount(int2 x); +uint2 __ovld __cnfn popcount(uint2 x); +int3 __ovld __cnfn popcount(int3 x); +uint3 __ovld __cnfn popcount(uint3 x); +int4 __ovld __cnfn popcount(int4 x); +uint4 __ovld __cnfn popcount(uint4 x); +int8 __ovld __cnfn popcount(int8 x); +uint8 __ovld __cnfn popcount(uint8 x); +int16 __ovld __cnfn popcount(int16 x); +uint16 __ovld __cnfn popcount(uint16 x); +long __ovld __cnfn popcount(long x); +ulong __ovld __cnfn popcount(ulong x); +long2 __ovld __cnfn popcount(long2 x); +ulong2 __ovld __cnfn popcount(ulong2 x); +long3 __ovld __cnfn popcount(long3 x); +ulong3 __ovld __cnfn popcount(ulong3 x); +long4 __ovld __cnfn popcount(long4 x); +ulong4 __ovld __cnfn popcount(ulong4 x); +long8 __ovld __cnfn popcount(long8 x); +ulong8 __ovld __cnfn popcount(ulong8 x); +long16 __ovld __cnfn popcount(long16 x); +ulong16 __ovld __cnfn popcount(ulong16 x); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) + +/** + * Multiply two 24-bit integer values x and y and add + * the 32-bit integer result to the 32-bit integer z. + * Refer to definition of mul24 to see how the 24-bit + * integer multiplication is performed. + */ +int __ovld __cnfn mad24(int x, int y, int z); +uint __ovld __cnfn mad24(uint x, uint y, uint z); +int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z); +uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z); +int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z); +uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z); +int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z); +uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z); +int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z); +uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z); +int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z); +uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z); + +/** + * Multiply two 24-bit integer values x and y. x and y + * are 32-bit integers but only the low 24-bits are used + * to perform the multiplication. mul24 should only + * be used when values in x and y are in the range [- + * 2^23, 2^23-1] if x and y are signed integers and in the + * range [0, 2^24-1] if x and y are unsigned integers. If + * x and y are not in this range, the multiplication + * result is implementation-defined. + */ +int __ovld __cnfn mul24(int x, int y); +uint __ovld __cnfn mul24(uint x, uint y); +int2 __ovld __cnfn mul24(int2 x, int2 y); +uint2 __ovld __cnfn mul24(uint2 x, uint2 y); +int3 __ovld __cnfn mul24(int3 x, int3 y); +uint3 __ovld __cnfn mul24(uint3 x, uint3 y); +int4 __ovld __cnfn mul24(int4 x, int4 y); +uint4 __ovld __cnfn mul24(uint4 x, uint4 y); +int8 __ovld __cnfn mul24(int8 x, int8 y); +uint8 __ovld __cnfn mul24(uint8 x, uint8 y); +int16 __ovld __cnfn mul24(int16 x, int16 y); +uint16 __ovld __cnfn mul24(uint16 x, uint16 y); + +// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions + +/** + * Returns fmin(fmax(x, minval), maxval). + * Results are undefined if minval > maxval. + */ +float __ovld __cnfn clamp(float x, float minval, float maxval); +float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval); +float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval); +float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval); +float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval); +float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval); +float2 __ovld __cnfn clamp(float2 x, float minval, float maxval); +float3 __ovld __cnfn clamp(float3 x, float minval, float maxval); +float4 __ovld __cnfn clamp(float4 x, float minval, float maxval); +float8 __ovld __cnfn clamp(float8 x, float minval, float maxval); +float16 __ovld __cnfn clamp(float16 x, float minval, float maxval); +#ifdef cl_khr_fp64 +double __ovld __cnfn clamp(double x, double minval, double maxval); +double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval); +double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval); +double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval); +double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval); +double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval); +double2 __ovld __cnfn clamp(double2 x, double minval, double maxval); +double3 __ovld __cnfn clamp(double3 x, double minval, double maxval); +double4 __ovld __cnfn clamp(double4 x, double minval, double maxval); +double8 __ovld __cnfn clamp(double8 x, double minval, double maxval); +double16 __ovld __cnfn clamp(double16 x, double minval, double maxval); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn clamp(half x, half minval, half maxval); +half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval); +half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval); +half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval); +half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval); +half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval); +half2 __ovld __cnfn clamp(half2 x, half minval, half maxval); +half3 __ovld __cnfn clamp(half3 x, half minval, half maxval); +half4 __ovld __cnfn clamp(half4 x, half minval, half maxval); +half8 __ovld __cnfn clamp(half8 x, half minval, half maxval); +half16 __ovld __cnfn clamp(half16 x, half minval, half maxval); +#endif //cl_khr_fp16 + +/** + * Converts radians to degrees, i.e. (180 / PI) * + * radians. + */ +float __ovld __cnfn degrees(float radians); +float2 __ovld __cnfn degrees(float2 radians); +float3 __ovld __cnfn degrees(float3 radians); +float4 __ovld __cnfn degrees(float4 radians); +float8 __ovld __cnfn degrees(float8 radians); +float16 __ovld __cnfn degrees(float16 radians); +#ifdef cl_khr_fp64 +double __ovld __cnfn degrees(double radians); +double2 __ovld __cnfn degrees(double2 radians); +double3 __ovld __cnfn degrees(double3 radians); +double4 __ovld __cnfn degrees(double4 radians); +double8 __ovld __cnfn degrees(double8 radians); +double16 __ovld __cnfn degrees(double16 radians); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn degrees(half radians); +half2 __ovld __cnfn degrees(half2 radians); +half3 __ovld __cnfn degrees(half3 radians); +half4 __ovld __cnfn degrees(half4 radians); +half8 __ovld __cnfn degrees(half8 radians); +half16 __ovld __cnfn degrees(half16 radians); +#endif //cl_khr_fp16 + +/** + * Returns y if x < y, otherwise it returns x. If x and y + * are infinite or NaN, the return values are undefined. + */ +float __ovld __cnfn max(float x, float y); +float2 __ovld __cnfn max(float2 x, float2 y); +float3 __ovld __cnfn max(float3 x, float3 y); +float4 __ovld __cnfn max(float4 x, float4 y); +float8 __ovld __cnfn max(float8 x, float8 y); +float16 __ovld __cnfn max(float16 x, float16 y); +float2 __ovld __cnfn max(float2 x, float y); +float3 __ovld __cnfn max(float3 x, float y); +float4 __ovld __cnfn max(float4 x, float y); +float8 __ovld __cnfn max(float8 x, float y); +float16 __ovld __cnfn max(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn max(double x, double y); +double2 __ovld __cnfn max(double2 x, double2 y); +double3 __ovld __cnfn max(double3 x, double3 y); +double4 __ovld __cnfn max(double4 x, double4 y); +double8 __ovld __cnfn max(double8 x, double8 y); +double16 __ovld __cnfn max(double16 x, double16 y); +double2 __ovld __cnfn max(double2 x, double y); +double3 __ovld __cnfn max(double3 x, double y); +double4 __ovld __cnfn max(double4 x, double y); +double8 __ovld __cnfn max(double8 x, double y); +double16 __ovld __cnfn max(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn max(half x, half y); +half2 __ovld __cnfn max(half2 x, half2 y); +half3 __ovld __cnfn max(half3 x, half3 y); +half4 __ovld __cnfn max(half4 x, half4 y); +half8 __ovld __cnfn max(half8 x, half8 y); +half16 __ovld __cnfn max(half16 x, half16 y); +half2 __ovld __cnfn max(half2 x, half y); +half3 __ovld __cnfn max(half3 x, half y); +half4 __ovld __cnfn max(half4 x, half y); +half8 __ovld __cnfn max(half8 x, half y); +half16 __ovld __cnfn max(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Returns y if y < x, otherwise it returns x. If x and y + * are infinite or NaN, the return values are undefined. + */ +float __ovld __cnfn min(float x, float y); +float2 __ovld __cnfn min(float2 x, float2 y); +float3 __ovld __cnfn min(float3 x, float3 y); +float4 __ovld __cnfn min(float4 x, float4 y); +float8 __ovld __cnfn min(float8 x, float8 y); +float16 __ovld __cnfn min(float16 x, float16 y); +float2 __ovld __cnfn min(float2 x, float y); +float3 __ovld __cnfn min(float3 x, float y); +float4 __ovld __cnfn min(float4 x, float y); +float8 __ovld __cnfn min(float8 x, float y); +float16 __ovld __cnfn min(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn min(double x, double y); +double2 __ovld __cnfn min(double2 x, double2 y); +double3 __ovld __cnfn min(double3 x, double3 y); +double4 __ovld __cnfn min(double4 x, double4 y); +double8 __ovld __cnfn min(double8 x, double8 y); +double16 __ovld __cnfn min(double16 x, double16 y); +double2 __ovld __cnfn min(double2 x, double y); +double3 __ovld __cnfn min(double3 x, double y); +double4 __ovld __cnfn min(double4 x, double y); +double8 __ovld __cnfn min(double8 x, double y); +double16 __ovld __cnfn min(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn min(half x, half y); +half2 __ovld __cnfn min(half2 x, half2 y); +half3 __ovld __cnfn min(half3 x, half3 y); +half4 __ovld __cnfn min(half4 x, half4 y); +half8 __ovld __cnfn min(half8 x, half8 y); +half16 __ovld __cnfn min(half16 x, half16 y); +half2 __ovld __cnfn min(half2 x, half y); +half3 __ovld __cnfn min(half3 x, half y); +half4 __ovld __cnfn min(half4 x, half y); +half8 __ovld __cnfn min(half8 x, half y); +half16 __ovld __cnfn min(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Returns the linear blend of x & y implemented as: + * x + (y - x) * a + * a must be a value in the range 0.0 ... 1.0. If a is not + * in the range 0.0 ... 1.0, the return values are + * undefined. + */ +float __ovld __cnfn mix(float x, float y, float a); +float2 __ovld __cnfn mix(float2 x, float2 y, float2 a); +float3 __ovld __cnfn mix(float3 x, float3 y, float3 a); +float4 __ovld __cnfn mix(float4 x, float4 y, float4 a); +float8 __ovld __cnfn mix(float8 x, float8 y, float8 a); +float16 __ovld __cnfn mix(float16 x, float16 y, float16 a); +float2 __ovld __cnfn mix(float2 x, float2 y, float a); +float3 __ovld __cnfn mix(float3 x, float3 y, float a); +float4 __ovld __cnfn mix(float4 x, float4 y, float a); +float8 __ovld __cnfn mix(float8 x, float8 y, float a); +float16 __ovld __cnfn mix(float16 x, float16 y, float a); +#ifdef cl_khr_fp64 +double __ovld __cnfn mix(double x, double y, double a); +double2 __ovld __cnfn mix(double2 x, double2 y, double2 a); +double3 __ovld __cnfn mix(double3 x, double3 y, double3 a); +double4 __ovld __cnfn mix(double4 x, double4 y, double4 a); +double8 __ovld __cnfn mix(double8 x, double8 y, double8 a); +double16 __ovld __cnfn mix(double16 x, double16 y, double16 a); +double2 __ovld __cnfn mix(double2 x, double2 y, double a); +double3 __ovld __cnfn mix(double3 x, double3 y, double a); +double4 __ovld __cnfn mix(double4 x, double4 y, double a); +double8 __ovld __cnfn mix(double8 x, double8 y, double a); +double16 __ovld __cnfn mix(double16 x, double16 y, double a); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn mix(half x, half y, half a); +half2 __ovld __cnfn mix(half2 x, half2 y, half2 a); +half3 __ovld __cnfn mix(half3 x, half3 y, half3 a); +half4 __ovld __cnfn mix(half4 x, half4 y, half4 a); +half8 __ovld __cnfn mix(half8 x, half8 y, half8 a); +half16 __ovld __cnfn mix(half16 x, half16 y, half16 a); +half2 __ovld __cnfn mix(half2 x, half2 y, half a); +half3 __ovld __cnfn mix(half3 x, half3 y, half a); +half4 __ovld __cnfn mix(half4 x, half4 y, half a); +half8 __ovld __cnfn mix(half8 x, half8 y, half a); +half16 __ovld __cnfn mix(half16 x, half16 y, half a); +#endif //cl_khr_fp16 + +/** + * Converts degrees to radians, i.e. (PI / 180) * + * degrees. + */ +float __ovld __cnfn radians(float degrees); +float2 __ovld __cnfn radians(float2 degrees); +float3 __ovld __cnfn radians(float3 degrees); +float4 __ovld __cnfn radians(float4 degrees); +float8 __ovld __cnfn radians(float8 degrees); +float16 __ovld __cnfn radians(float16 degrees); +#ifdef cl_khr_fp64 +double __ovld __cnfn radians(double degrees); +double2 __ovld __cnfn radians(double2 degrees); +double3 __ovld __cnfn radians(double3 degrees); +double4 __ovld __cnfn radians(double4 degrees); +double8 __ovld __cnfn radians(double8 degrees); +double16 __ovld __cnfn radians(double16 degrees); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn radians(half degrees); +half2 __ovld __cnfn radians(half2 degrees); +half3 __ovld __cnfn radians(half3 degrees); +half4 __ovld __cnfn radians(half4 degrees); +half8 __ovld __cnfn radians(half8 degrees); +half16 __ovld __cnfn radians(half16 degrees); +#endif //cl_khr_fp16 + +/** + * Returns 0.0 if x < edge, otherwise it returns 1.0. + */ +float __ovld __cnfn step(float edge, float x); +float2 __ovld __cnfn step(float2 edge, float2 x); +float3 __ovld __cnfn step(float3 edge, float3 x); +float4 __ovld __cnfn step(float4 edge, float4 x); +float8 __ovld __cnfn step(float8 edge, float8 x); +float16 __ovld __cnfn step(float16 edge, float16 x); +float2 __ovld __cnfn step(float edge, float2 x); +float3 __ovld __cnfn step(float edge, float3 x); +float4 __ovld __cnfn step(float edge, float4 x); +float8 __ovld __cnfn step(float edge, float8 x); +float16 __ovld __cnfn step(float edge, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn step(double edge, double x); +double2 __ovld __cnfn step(double2 edge, double2 x); +double3 __ovld __cnfn step(double3 edge, double3 x); +double4 __ovld __cnfn step(double4 edge, double4 x); +double8 __ovld __cnfn step(double8 edge, double8 x); +double16 __ovld __cnfn step(double16 edge, double16 x); +double2 __ovld __cnfn step(double edge, double2 x); +double3 __ovld __cnfn step(double edge, double3 x); +double4 __ovld __cnfn step(double edge, double4 x); +double8 __ovld __cnfn step(double edge, double8 x); +double16 __ovld __cnfn step(double edge, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn step(half edge, half x); +half2 __ovld __cnfn step(half2 edge, half2 x); +half3 __ovld __cnfn step(half3 edge, half3 x); +half4 __ovld __cnfn step(half4 edge, half4 x); +half8 __ovld __cnfn step(half8 edge, half8 x); +half16 __ovld __cnfn step(half16 edge, half16 x); +half2 __ovld __cnfn step(half edge, half2 x); +half3 __ovld __cnfn step(half edge, half3 x); +half4 __ovld __cnfn step(half edge, half4 x); +half8 __ovld __cnfn step(half edge, half8 x); +half16 __ovld __cnfn step(half edge, half16 x); +#endif //cl_khr_fp16 + +/** + * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and + * performs smooth Hermite interpolation between 0 + * and 1when edge0 < x < edge1. This is useful in + * cases where you would want a threshold function + * with a smooth transition. + * This is equivalent to: + * gentype t; + * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1); + * return t * t * (3 - 2 * t); + * Results are undefined if edge0 >= edge1 or if x, + * edge0 or edge1 is a NaN. + */ +float __ovld __cnfn smoothstep(float edge0, float edge1, float x); +float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x); +float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x); +float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x); +float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x); +float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x); +float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x); +float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x); +float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x); +float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x); +float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn smoothstep(double edge0, double edge1, double x); +double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x); +double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x); +double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x); +double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x); +double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x); +double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x); +double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x); +double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x); +double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x); +double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn smoothstep(half edge0, half edge1, half x); +half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x); +half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x); +half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x); +half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x); +half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x); +half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x); +half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x); +half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x); +half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x); +half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x); +#endif //cl_khr_fp16 + +/** + * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x = + * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN. + */ +float __ovld __cnfn sign(float x); +float2 __ovld __cnfn sign(float2 x); +float3 __ovld __cnfn sign(float3 x); +float4 __ovld __cnfn sign(float4 x); +float8 __ovld __cnfn sign(float8 x); +float16 __ovld __cnfn sign(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn sign(double x); +double2 __ovld __cnfn sign(double2 x); +double3 __ovld __cnfn sign(double3 x); +double4 __ovld __cnfn sign(double4 x); +double8 __ovld __cnfn sign(double8 x); +double16 __ovld __cnfn sign(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sign(half x); +half2 __ovld __cnfn sign(half2 x); +half3 __ovld __cnfn sign(half3 x); +half4 __ovld __cnfn sign(half4 x); +half8 __ovld __cnfn sign(half8 x); +half16 __ovld __cnfn sign(half16 x); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions + +/** + * Returns the cross product of p0.xyz and p1.xyz. The + * w component of float4 result returned will be 0.0. + */ +float4 __ovld __cnfn cross(float4 p0, float4 p1); +float3 __ovld __cnfn cross(float3 p0, float3 p1); +#ifdef cl_khr_fp64 +double4 __ovld __cnfn cross(double4 p0, double4 p1); +double3 __ovld __cnfn cross(double3 p0, double3 p1); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half4 __ovld __cnfn cross(half4 p0, half4 p1); +half3 __ovld __cnfn cross(half3 p0, half3 p1); +#endif //cl_khr_fp16 + +/** + * Compute dot product. + */ +float __ovld __cnfn dot(float p0, float p1); +float __ovld __cnfn dot(float2 p0, float2 p1); +float __ovld __cnfn dot(float3 p0, float3 p1); +float __ovld __cnfn dot(float4 p0, float4 p1); +#ifdef cl_khr_fp64 +double __ovld __cnfn dot(double p0, double p1); +double __ovld __cnfn dot(double2 p0, double2 p1); +double __ovld __cnfn dot(double3 p0, double3 p1); +double __ovld __cnfn dot(double4 p0, double4 p1); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn dot(half p0, half p1); +half __ovld __cnfn dot(half2 p0, half2 p1); +half __ovld __cnfn dot(half3 p0, half3 p1); +half __ovld __cnfn dot(half4 p0, half4 p1); +#endif //cl_khr_fp16 + +/** + * Returns the distance between p0 and p1. This is + * calculated as length(p0 - p1). + */ +float __ovld __cnfn distance(float p0, float p1); +float __ovld __cnfn distance(float2 p0, float2 p1); +float __ovld __cnfn distance(float3 p0, float3 p1); +float __ovld __cnfn distance(float4 p0, float4 p1); +#ifdef cl_khr_fp64 +double __ovld __cnfn distance(double p0, double p1); +double __ovld __cnfn distance(double2 p0, double2 p1); +double __ovld __cnfn distance(double3 p0, double3 p1); +double __ovld __cnfn distance(double4 p0, double4 p1); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn distance(half p0, half p1); +half __ovld __cnfn distance(half2 p0, half2 p1); +half __ovld __cnfn distance(half3 p0, half3 p1); +half __ovld __cnfn distance(half4 p0, half4 p1); +#endif //cl_khr_fp16 + +/** + * Return the length of vector p, i.e., + * sqrt(p.x2 + p.y 2 + ...) + */ +float __ovld __cnfn length(float p); +float __ovld __cnfn length(float2 p); +float __ovld __cnfn length(float3 p); +float __ovld __cnfn length(float4 p); +#ifdef cl_khr_fp64 +double __ovld __cnfn length(double p); +double __ovld __cnfn length(double2 p); +double __ovld __cnfn length(double3 p); +double __ovld __cnfn length(double4 p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn length(half p); +half __ovld __cnfn length(half2 p); +half __ovld __cnfn length(half3 p); +half __ovld __cnfn length(half4 p); +#endif //cl_khr_fp16 + +/** + * Returns a vector in the same direction as p but with a + * length of 1. + */ +float __ovld __cnfn normalize(float p); +float2 __ovld __cnfn normalize(float2 p); +float3 __ovld __cnfn normalize(float3 p); +float4 __ovld __cnfn normalize(float4 p); +#ifdef cl_khr_fp64 +double __ovld __cnfn normalize(double p); +double2 __ovld __cnfn normalize(double2 p); +double3 __ovld __cnfn normalize(double3 p); +double4 __ovld __cnfn normalize(double4 p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn normalize(half p); +half2 __ovld __cnfn normalize(half2 p); +half3 __ovld __cnfn normalize(half3 p); +half4 __ovld __cnfn normalize(half4 p); +#endif //cl_khr_fp16 + +/** + * Returns fast_length(p0 - p1). + */ +float __ovld __cnfn fast_distance(float p0, float p1); +float __ovld __cnfn fast_distance(float2 p0, float2 p1); +float __ovld __cnfn fast_distance(float3 p0, float3 p1); +float __ovld __cnfn fast_distance(float4 p0, float4 p1); +#ifdef cl_khr_fp16 +half __ovld __cnfn fast_distance(half p0, half p1); +half __ovld __cnfn fast_distance(half2 p0, half2 p1); +half __ovld __cnfn fast_distance(half3 p0, half3 p1); +half __ovld __cnfn fast_distance(half4 p0, half4 p1); +#endif //cl_khr_fp16 + +/** + * Returns the length of vector p computed as: + * half_sqrt(p.x2 + p.y2 + ...) + */ +float __ovld __cnfn fast_length(float p); +float __ovld __cnfn fast_length(float2 p); +float __ovld __cnfn fast_length(float3 p); +float __ovld __cnfn fast_length(float4 p); +#ifdef cl_khr_fp16 +half __ovld __cnfn fast_length(half p); +half __ovld __cnfn fast_length(half2 p); +half __ovld __cnfn fast_length(half3 p); +half __ovld __cnfn fast_length(half4 p); +#endif //cl_khr_fp16 + +/** + * Returns a vector in the same direction as p but with a + * length of 1. fast_normalize is computed as: + * p * half_rsqrt (p.x^2 + p.y^2 + ... ) + * The result shall be within 8192 ulps error from the + * infinitely precise result of + * if (all(p == 0.0f)) + * result = p; + * else + * result = p / sqrt (p.x^2 + p.y^2 + ...); + * with the following exceptions: + * 1) If the sum of squares is greater than FLT_MAX + * then the value of the floating-point values in the + * result vector are undefined. + * 2) If the sum of squares is less than FLT_MIN then + * the implementation may return back p. + * 3) If the device is in "denorms are flushed to zero" + * mode, individual operand elements with magnitude + * less than sqrt(FLT_MIN) may be flushed to zero + * before proceeding with the calculation. + */ +float __ovld __cnfn fast_normalize(float p); +float2 __ovld __cnfn fast_normalize(float2 p); +float3 __ovld __cnfn fast_normalize(float3 p); +float4 __ovld __cnfn fast_normalize(float4 p); +#ifdef cl_khr_fp16 +half __ovld __cnfn fast_normalize(half p); +half2 __ovld __cnfn fast_normalize(half2 p); +half3 __ovld __cnfn fast_normalize(half3 p); +half4 __ovld __cnfn fast_normalize(half4 p); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions + +/** + * intn isequal (floatn x, floatn y) + * Returns the component-wise compare of x == y. + */ +int __ovld __cnfn isequal(float x, float y); +int2 __ovld __cnfn isequal(float2 x, float2 y); +int3 __ovld __cnfn isequal(float3 x, float3 y); +int4 __ovld __cnfn isequal(float4 x, float4 y); +int8 __ovld __cnfn isequal(float8 x, float8 y); +int16 __ovld __cnfn isequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isequal(double x, double y); +long2 __ovld __cnfn isequal(double2 x, double2 y); +long3 __ovld __cnfn isequal(double3 x, double3 y); +long4 __ovld __cnfn isequal(double4 x, double4 y); +long8 __ovld __cnfn isequal(double8 x, double8 y); +long16 __ovld __cnfn isequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isequal(half x, half y); +short2 __ovld __cnfn isequal(half2 x, half2 y); +short3 __ovld __cnfn isequal(half3 x, half3 y); +short4 __ovld __cnfn isequal(half4 x, half4 y); +short8 __ovld __cnfn isequal(half8 x, half8 y); +short16 __ovld __cnfn isequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x != y. + */ +int __ovld __cnfn isnotequal(float x, float y); +int2 __ovld __cnfn isnotequal(float2 x, float2 y); +int3 __ovld __cnfn isnotequal(float3 x, float3 y); +int4 __ovld __cnfn isnotequal(float4 x, float4 y); +int8 __ovld __cnfn isnotequal(float8 x, float8 y); +int16 __ovld __cnfn isnotequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isnotequal(double x, double y); +long2 __ovld __cnfn isnotequal(double2 x, double2 y); +long3 __ovld __cnfn isnotequal(double3 x, double3 y); +long4 __ovld __cnfn isnotequal(double4 x, double4 y); +long8 __ovld __cnfn isnotequal(double8 x, double8 y); +long16 __ovld __cnfn isnotequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isnotequal(half x, half y); +short2 __ovld __cnfn isnotequal(half2 x, half2 y); +short3 __ovld __cnfn isnotequal(half3 x, half3 y); +short4 __ovld __cnfn isnotequal(half4 x, half4 y); +short8 __ovld __cnfn isnotequal(half8 x, half8 y); +short16 __ovld __cnfn isnotequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x > y. + */ +int __ovld __cnfn isgreater(float x, float y); +int2 __ovld __cnfn isgreater(float2 x, float2 y); +int3 __ovld __cnfn isgreater(float3 x, float3 y); +int4 __ovld __cnfn isgreater(float4 x, float4 y); +int8 __ovld __cnfn isgreater(float8 x, float8 y); +int16 __ovld __cnfn isgreater(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isgreater(double x, double y); +long2 __ovld __cnfn isgreater(double2 x, double2 y); +long3 __ovld __cnfn isgreater(double3 x, double3 y); +long4 __ovld __cnfn isgreater(double4 x, double4 y); +long8 __ovld __cnfn isgreater(double8 x, double8 y); +long16 __ovld __cnfn isgreater(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isgreater(half x, half y); +short2 __ovld __cnfn isgreater(half2 x, half2 y); +short3 __ovld __cnfn isgreater(half3 x, half3 y); +short4 __ovld __cnfn isgreater(half4 x, half4 y); +short8 __ovld __cnfn isgreater(half8 x, half8 y); +short16 __ovld __cnfn isgreater(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x >= y. + */ +int __ovld __cnfn isgreaterequal(float x, float y); +int2 __ovld __cnfn isgreaterequal(float2 x, float2 y); +int3 __ovld __cnfn isgreaterequal(float3 x, float3 y); +int4 __ovld __cnfn isgreaterequal(float4 x, float4 y); +int8 __ovld __cnfn isgreaterequal(float8 x, float8 y); +int16 __ovld __cnfn isgreaterequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isgreaterequal(double x, double y); +long2 __ovld __cnfn isgreaterequal(double2 x, double2 y); +long3 __ovld __cnfn isgreaterequal(double3 x, double3 y); +long4 __ovld __cnfn isgreaterequal(double4 x, double4 y); +long8 __ovld __cnfn isgreaterequal(double8 x, double8 y); +long16 __ovld __cnfn isgreaterequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isgreaterequal(half x, half y); +short2 __ovld __cnfn isgreaterequal(half2 x, half2 y); +short3 __ovld __cnfn isgreaterequal(half3 x, half3 y); +short4 __ovld __cnfn isgreaterequal(half4 x, half4 y); +short8 __ovld __cnfn isgreaterequal(half8 x, half8 y); +short16 __ovld __cnfn isgreaterequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x < y. + */ +int __ovld __cnfn isless(float x, float y); +int2 __ovld __cnfn isless(float2 x, float2 y); +int3 __ovld __cnfn isless(float3 x, float3 y); +int4 __ovld __cnfn isless(float4 x, float4 y); +int8 __ovld __cnfn isless(float8 x, float8 y); +int16 __ovld __cnfn isless(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isless(double x, double y); +long2 __ovld __cnfn isless(double2 x, double2 y); +long3 __ovld __cnfn isless(double3 x, double3 y); +long4 __ovld __cnfn isless(double4 x, double4 y); +long8 __ovld __cnfn isless(double8 x, double8 y); +long16 __ovld __cnfn isless(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isless(half x, half y); +short2 __ovld __cnfn isless(half2 x, half2 y); +short3 __ovld __cnfn isless(half3 x, half3 y); +short4 __ovld __cnfn isless(half4 x, half4 y); +short8 __ovld __cnfn isless(half8 x, half8 y); +short16 __ovld __cnfn isless(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x <= y. + */ +int __ovld __cnfn islessequal(float x, float y); +int2 __ovld __cnfn islessequal(float2 x, float2 y); +int3 __ovld __cnfn islessequal(float3 x, float3 y); +int4 __ovld __cnfn islessequal(float4 x, float4 y); +int8 __ovld __cnfn islessequal(float8 x, float8 y); +int16 __ovld __cnfn islessequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn islessequal(double x, double y); +long2 __ovld __cnfn islessequal(double2 x, double2 y); +long3 __ovld __cnfn islessequal(double3 x, double3 y); +long4 __ovld __cnfn islessequal(double4 x, double4 y); +long8 __ovld __cnfn islessequal(double8 x, double8 y); +long16 __ovld __cnfn islessequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn islessequal(half x, half y); +short2 __ovld __cnfn islessequal(half2 x, half2 y); +short3 __ovld __cnfn islessequal(half3 x, half3 y); +short4 __ovld __cnfn islessequal(half4 x, half4 y); +short8 __ovld __cnfn islessequal(half8 x, half8 y); +short16 __ovld __cnfn islessequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of + * (x < y) || (x > y) . + */ +int __ovld __cnfn islessgreater(float x, float y); +int2 __ovld __cnfn islessgreater(float2 x, float2 y); +int3 __ovld __cnfn islessgreater(float3 x, float3 y); +int4 __ovld __cnfn islessgreater(float4 x, float4 y); +int8 __ovld __cnfn islessgreater(float8 x, float8 y); +int16 __ovld __cnfn islessgreater(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn islessgreater(double x, double y); +long2 __ovld __cnfn islessgreater(double2 x, double2 y); +long3 __ovld __cnfn islessgreater(double3 x, double3 y); +long4 __ovld __cnfn islessgreater(double4 x, double4 y); +long8 __ovld __cnfn islessgreater(double8 x, double8 y); +long16 __ovld __cnfn islessgreater(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn islessgreater(half x, half y); +short2 __ovld __cnfn islessgreater(half2 x, half2 y); +short3 __ovld __cnfn islessgreater(half3 x, half3 y); +short4 __ovld __cnfn islessgreater(half4 x, half4 y); +short8 __ovld __cnfn islessgreater(half8 x, half8 y); +short16 __ovld __cnfn islessgreater(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Test for finite value. + */ +int __ovld __cnfn isfinite(float); +int2 __ovld __cnfn isfinite(float2); +int3 __ovld __cnfn isfinite(float3); +int4 __ovld __cnfn isfinite(float4); +int8 __ovld __cnfn isfinite(float8); +int16 __ovld __cnfn isfinite(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isfinite(double); +long2 __ovld __cnfn isfinite(double2); +long3 __ovld __cnfn isfinite(double3); +long4 __ovld __cnfn isfinite(double4); +long8 __ovld __cnfn isfinite(double8); +long16 __ovld __cnfn isfinite(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isfinite(half); +short2 __ovld __cnfn isfinite(half2); +short3 __ovld __cnfn isfinite(half3); +short4 __ovld __cnfn isfinite(half4); +short8 __ovld __cnfn isfinite(half8); +short16 __ovld __cnfn isfinite(half16); +#endif //cl_khr_fp16 + +/** + * Test for infinity value (+ve or -ve) . + */ +int __ovld __cnfn isinf(float); +int2 __ovld __cnfn isinf(float2); +int3 __ovld __cnfn isinf(float3); +int4 __ovld __cnfn isinf(float4); +int8 __ovld __cnfn isinf(float8); +int16 __ovld __cnfn isinf(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isinf(double); +long2 __ovld __cnfn isinf(double2); +long3 __ovld __cnfn isinf(double3); +long4 __ovld __cnfn isinf(double4); +long8 __ovld __cnfn isinf(double8); +long16 __ovld __cnfn isinf(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isinf(half); +short2 __ovld __cnfn isinf(half2); +short3 __ovld __cnfn isinf(half3); +short4 __ovld __cnfn isinf(half4); +short8 __ovld __cnfn isinf(half8); +short16 __ovld __cnfn isinf(half16); +#endif //cl_khr_fp16 + +/** + * Test for a NaN. + */ +int __ovld __cnfn isnan(float); +int2 __ovld __cnfn isnan(float2); +int3 __ovld __cnfn isnan(float3); +int4 __ovld __cnfn isnan(float4); +int8 __ovld __cnfn isnan(float8); +int16 __ovld __cnfn isnan(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isnan(double); +long2 __ovld __cnfn isnan(double2); +long3 __ovld __cnfn isnan(double3); +long4 __ovld __cnfn isnan(double4); +long8 __ovld __cnfn isnan(double8); +long16 __ovld __cnfn isnan(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isnan(half); +short2 __ovld __cnfn isnan(half2); +short3 __ovld __cnfn isnan(half3); +short4 __ovld __cnfn isnan(half4); +short8 __ovld __cnfn isnan(half8); +short16 __ovld __cnfn isnan(half16); +#endif //cl_khr_fp16 + +/** + * Test for a normal value. + */ +int __ovld __cnfn isnormal(float); +int2 __ovld __cnfn isnormal(float2); +int3 __ovld __cnfn isnormal(float3); +int4 __ovld __cnfn isnormal(float4); +int8 __ovld __cnfn isnormal(float8); +int16 __ovld __cnfn isnormal(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isnormal(double); +long2 __ovld __cnfn isnormal(double2); +long3 __ovld __cnfn isnormal(double3); +long4 __ovld __cnfn isnormal(double4); +long8 __ovld __cnfn isnormal(double8); +long16 __ovld __cnfn isnormal(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isnormal(half); +short2 __ovld __cnfn isnormal(half2); +short3 __ovld __cnfn isnormal(half3); +short4 __ovld __cnfn isnormal(half4); +short8 __ovld __cnfn isnormal(half8); +short16 __ovld __cnfn isnormal(half16); +#endif //cl_khr_fp16 + +/** + * Test if arguments are ordered. isordered() takes + * arguments x and y, and returns the result + * isequal(x, x) && isequal(y, y). + */ +int __ovld __cnfn isordered(float x, float y); +int2 __ovld __cnfn isordered(float2 x, float2 y); +int3 __ovld __cnfn isordered(float3 x, float3 y); +int4 __ovld __cnfn isordered(float4 x, float4 y); +int8 __ovld __cnfn isordered(float8 x, float8 y); +int16 __ovld __cnfn isordered(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isordered(double x, double y); +long2 __ovld __cnfn isordered(double2 x, double2 y); +long3 __ovld __cnfn isordered(double3 x, double3 y); +long4 __ovld __cnfn isordered(double4 x, double4 y); +long8 __ovld __cnfn isordered(double8 x, double8 y); +long16 __ovld __cnfn isordered(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isordered(half x, half y); +short2 __ovld __cnfn isordered(half2 x, half2 y); +short3 __ovld __cnfn isordered(half3 x, half3 y); +short4 __ovld __cnfn isordered(half4 x, half4 y); +short8 __ovld __cnfn isordered(half8 x, half8 y); +short16 __ovld __cnfn isordered(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Test if arguments are unordered. isunordered() + * takes arguments x and y, returning non-zero if x or y + * is NaN, and zero otherwise. + */ +int __ovld __cnfn isunordered(float x, float y); +int2 __ovld __cnfn isunordered(float2 x, float2 y); +int3 __ovld __cnfn isunordered(float3 x, float3 y); +int4 __ovld __cnfn isunordered(float4 x, float4 y); +int8 __ovld __cnfn isunordered(float8 x, float8 y); +int16 __ovld __cnfn isunordered(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isunordered(double x, double y); +long2 __ovld __cnfn isunordered(double2 x, double2 y); +long3 __ovld __cnfn isunordered(double3 x, double3 y); +long4 __ovld __cnfn isunordered(double4 x, double4 y); +long8 __ovld __cnfn isunordered(double8 x, double8 y); +long16 __ovld __cnfn isunordered(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isunordered(half x, half y); +short2 __ovld __cnfn isunordered(half2 x, half2 y); +short3 __ovld __cnfn isunordered(half3 x, half3 y); +short4 __ovld __cnfn isunordered(half4 x, half4 y); +short8 __ovld __cnfn isunordered(half8 x, half8 y); +short16 __ovld __cnfn isunordered(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Test for sign bit. The scalar version of the function + * returns a 1 if the sign bit in the float is set else returns + * 0. The vector version of the function returns the + * following for each component in floatn: a -1 if the + * sign bit in the float is set else returns 0. + */ +int __ovld __cnfn signbit(float); +int2 __ovld __cnfn signbit(float2); +int3 __ovld __cnfn signbit(float3); +int4 __ovld __cnfn signbit(float4); +int8 __ovld __cnfn signbit(float8); +int16 __ovld __cnfn signbit(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn signbit(double); +long2 __ovld __cnfn signbit(double2); +long3 __ovld __cnfn signbit(double3); +long4 __ovld __cnfn signbit(double4); +long8 __ovld __cnfn signbit(double8); +long16 __ovld __cnfn signbit(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn signbit(half); +short2 __ovld __cnfn signbit(half2); +short3 __ovld __cnfn signbit(half3); +short4 __ovld __cnfn signbit(half4); +short8 __ovld __cnfn signbit(half8); +short16 __ovld __cnfn signbit(half16); +#endif //cl_khr_fp16 + +/** + * Returns 1 if the most significant bit in any component + * of x is set; otherwise returns 0. + */ +int __ovld __cnfn any(char x); +int __ovld __cnfn any(char2 x); +int __ovld __cnfn any(char3 x); +int __ovld __cnfn any(char4 x); +int __ovld __cnfn any(char8 x); +int __ovld __cnfn any(char16 x); +int __ovld __cnfn any(short x); +int __ovld __cnfn any(short2 x); +int __ovld __cnfn any(short3 x); +int __ovld __cnfn any(short4 x); +int __ovld __cnfn any(short8 x); +int __ovld __cnfn any(short16 x); +int __ovld __cnfn any(int x); +int __ovld __cnfn any(int2 x); +int __ovld __cnfn any(int3 x); +int __ovld __cnfn any(int4 x); +int __ovld __cnfn any(int8 x); +int __ovld __cnfn any(int16 x); +int __ovld __cnfn any(long x); +int __ovld __cnfn any(long2 x); +int __ovld __cnfn any(long3 x); +int __ovld __cnfn any(long4 x); +int __ovld __cnfn any(long8 x); +int __ovld __cnfn any(long16 x); + +/** + * Returns 1 if the most significant bit in all components + * of x is set; otherwise returns 0. + */ +int __ovld __cnfn all(char x); +int __ovld __cnfn all(char2 x); +int __ovld __cnfn all(char3 x); +int __ovld __cnfn all(char4 x); +int __ovld __cnfn all(char8 x); +int __ovld __cnfn all(char16 x); +int __ovld __cnfn all(short x); +int __ovld __cnfn all(short2 x); +int __ovld __cnfn all(short3 x); +int __ovld __cnfn all(short4 x); +int __ovld __cnfn all(short8 x); +int __ovld __cnfn all(short16 x); +int __ovld __cnfn all(int x); +int __ovld __cnfn all(int2 x); +int __ovld __cnfn all(int3 x); +int __ovld __cnfn all(int4 x); +int __ovld __cnfn all(int8 x); +int __ovld __cnfn all(int16 x); +int __ovld __cnfn all(long x); +int __ovld __cnfn all(long2 x); +int __ovld __cnfn all(long3 x); +int __ovld __cnfn all(long4 x); +int __ovld __cnfn all(long8 x); +int __ovld __cnfn all(long16 x); + +/** + * Each bit of the result is the corresponding bit of a if + * the corresponding bit of c is 0. Otherwise it is the + * corresponding bit of b. + */ +char __ovld __cnfn bitselect(char a, char b, char c); +uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c); +char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn bitselect(short a, short b, short c); +ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c); +short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn bitselect(int a, int b, int c); +uint __ovld __cnfn bitselect(uint a, uint b, uint c); +int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn bitselect(long a, long b, long c); +ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c); +long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c); +float __ovld __cnfn bitselect(float a, float b, float c); +float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c); +float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c); +float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c); +float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c); +float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn bitselect(double a, double b, double c); +double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c); +double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c); +double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c); +double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c); +double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn bitselect(half a, half b, half c); +half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c); +half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c); +half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c); +half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c); +half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c); +#endif //cl_khr_fp16 + +/** + * For each component of a vector type, + * result[i] = if MSB of c[i] is set ? b[i] : a[i]. + * For a scalar type, result = c ? b : a. + * b and a must have the same type. + * c must have the same number of elements and bits as a. + */ +char __ovld __cnfn select(char a, char b, char c); +uchar __ovld __cnfn select(uchar a, uchar b, char c); +char2 __ovld __cnfn select(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c); +char3 __ovld __cnfn select(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c); +char4 __ovld __cnfn select(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c); +char8 __ovld __cnfn select(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c); +char16 __ovld __cnfn select(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c); + +short __ovld __cnfn select(short a, short b, short c); +ushort __ovld __cnfn select(ushort a, ushort b, short c); +short2 __ovld __cnfn select(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c); +short3 __ovld __cnfn select(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c); +short4 __ovld __cnfn select(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c); +short8 __ovld __cnfn select(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c); +short16 __ovld __cnfn select(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c); + +int __ovld __cnfn select(int a, int b, int c); +uint __ovld __cnfn select(uint a, uint b, int c); +int2 __ovld __cnfn select(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c); +int3 __ovld __cnfn select(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c); +int4 __ovld __cnfn select(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c); +int8 __ovld __cnfn select(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c); +int16 __ovld __cnfn select(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c); +float __ovld __cnfn select(float a, float b, int c); +float2 __ovld __cnfn select(float2 a, float2 b, int2 c); +float3 __ovld __cnfn select(float3 a, float3 b, int3 c); +float4 __ovld __cnfn select(float4 a, float4 b, int4 c); +float8 __ovld __cnfn select(float8 a, float8 b, int8 c); +float16 __ovld __cnfn select(float16 a, float16 b, int16 c); + +long __ovld __cnfn select(long a, long b, long c); +ulong __ovld __cnfn select(ulong a, ulong b, long c); +long2 __ovld __cnfn select(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c); +long3 __ovld __cnfn select(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c); +long4 __ovld __cnfn select(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c); +long8 __ovld __cnfn select(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c); +long16 __ovld __cnfn select(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c); + +char __ovld __cnfn select(char a, char b, uchar c); +uchar __ovld __cnfn select(uchar a, uchar b, uchar c); +char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c); + +short __ovld __cnfn select(short a, short b, ushort c); +ushort __ovld __cnfn select(ushort a, ushort b, ushort c); +short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c); + +int __ovld __cnfn select(int a, int b, uint c); +uint __ovld __cnfn select(uint a, uint b, uint c); +int2 __ovld __cnfn select(int2 a, int2 b, uint2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn select(int3 a, int3 b, uint3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn select(int4 a, int4 b, uint4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn select(int8 a, int8 b, uint8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn select(int16 a, int16 b, uint16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c); +float __ovld __cnfn select(float a, float b, uint c); +float2 __ovld __cnfn select(float2 a, float2 b, uint2 c); +float3 __ovld __cnfn select(float3 a, float3 b, uint3 c); +float4 __ovld __cnfn select(float4 a, float4 b, uint4 c); +float8 __ovld __cnfn select(float8 a, float8 b, uint8 c); +float16 __ovld __cnfn select(float16 a, float16 b, uint16 c); + +long __ovld __cnfn select(long a, long b, ulong c); +ulong __ovld __cnfn select(ulong a, ulong b, ulong c); +long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c); + +#ifdef cl_khr_fp64 +double __ovld __cnfn select(double a, double b, long c); +double2 __ovld __cnfn select(double2 a, double2 b, long2 c); +double3 __ovld __cnfn select(double3 a, double3 b, long3 c); +double4 __ovld __cnfn select(double4 a, double4 b, long4 c); +double8 __ovld __cnfn select(double8 a, double8 b, long8 c); +double16 __ovld __cnfn select(double16 a, double16 b, long16 c); +double __ovld __cnfn select(double a, double b, ulong c); +double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c); +double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c); +double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c); +double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c); +double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn select(half a, half b, short c); +half2 __ovld __cnfn select(half2 a, half2 b, short2 c); +half3 __ovld __cnfn select(half3 a, half3 b, short3 c); +half4 __ovld __cnfn select(half4 a, half4 b, short4 c); +half8 __ovld __cnfn select(half8 a, half8 b, short8 c); +half16 __ovld __cnfn select(half16 a, half16 b, short16 c); +half __ovld __cnfn select(half a, half b, ushort c); +half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c); +half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c); +half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c); +half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c); +half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions +// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type +/** + * Use generic type gentype to indicate the built-in data types + * char, uchar, short, ushort, int, uint, long, ulong, float, + * double or half. + * + * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)). + * + * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)). + * + * The address computed as (p + (offset * n)) must be + * 8-bit aligned if gentype is char, uchar; + * 16-bit aligned if gentype is short, ushort, half; + * 32-bit aligned if gentype is int, uint, float; + * 64-bit aligned if gentype is long, ulong, double. + */ + +char2 __ovld vload2(size_t offset, const __constant char *p); +uchar2 __ovld vload2(size_t offset, const __constant uchar *p); +short2 __ovld vload2(size_t offset, const __constant short *p); +ushort2 __ovld vload2(size_t offset, const __constant ushort *p); +int2 __ovld vload2(size_t offset, const __constant int *p); +uint2 __ovld vload2(size_t offset, const __constant uint *p); +long2 __ovld vload2(size_t offset, const __constant long *p); +ulong2 __ovld vload2(size_t offset, const __constant ulong *p); +float2 __ovld vload2(size_t offset, const __constant float *p); +char3 __ovld vload3(size_t offset, const __constant char *p); +uchar3 __ovld vload3(size_t offset, const __constant uchar *p); +short3 __ovld vload3(size_t offset, const __constant short *p); +ushort3 __ovld vload3(size_t offset, const __constant ushort *p); +int3 __ovld vload3(size_t offset, const __constant int *p); +uint3 __ovld vload3(size_t offset, const __constant uint *p); +long3 __ovld vload3(size_t offset, const __constant long *p); +ulong3 __ovld vload3(size_t offset, const __constant ulong *p); +float3 __ovld vload3(size_t offset, const __constant float *p); +char4 __ovld vload4(size_t offset, const __constant char *p); +uchar4 __ovld vload4(size_t offset, const __constant uchar *p); +short4 __ovld vload4(size_t offset, const __constant short *p); +ushort4 __ovld vload4(size_t offset, const __constant ushort *p); +int4 __ovld vload4(size_t offset, const __constant int *p); +uint4 __ovld vload4(size_t offset, const __constant uint *p); +long4 __ovld vload4(size_t offset, const __constant long *p); +ulong4 __ovld vload4(size_t offset, const __constant ulong *p); +float4 __ovld vload4(size_t offset, const __constant float *p); +char8 __ovld vload8(size_t offset, const __constant char *p); +uchar8 __ovld vload8(size_t offset, const __constant uchar *p); +short8 __ovld vload8(size_t offset, const __constant short *p); +ushort8 __ovld vload8(size_t offset, const __constant ushort *p); +int8 __ovld vload8(size_t offset, const __constant int *p); +uint8 __ovld vload8(size_t offset, const __constant uint *p); +long8 __ovld vload8(size_t offset, const __constant long *p); +ulong8 __ovld vload8(size_t offset, const __constant ulong *p); +float8 __ovld vload8(size_t offset, const __constant float *p); +char16 __ovld vload16(size_t offset, const __constant char *p); +uchar16 __ovld vload16(size_t offset, const __constant uchar *p); +short16 __ovld vload16(size_t offset, const __constant short *p); +ushort16 __ovld vload16(size_t offset, const __constant ushort *p); +int16 __ovld vload16(size_t offset, const __constant int *p); +uint16 __ovld vload16(size_t offset, const __constant uint *p); +long16 __ovld vload16(size_t offset, const __constant long *p); +ulong16 __ovld vload16(size_t offset, const __constant ulong *p); +float16 __ovld vload16(size_t offset, const __constant float *p); +#ifdef cl_khr_fp64 +double2 __ovld vload2(size_t offset, const __constant double *p); +double3 __ovld vload3(size_t offset, const __constant double *p); +double4 __ovld vload4(size_t offset, const __constant double *p); +double8 __ovld vload8(size_t offset, const __constant double *p); +double16 __ovld vload16(size_t offset, const __constant double *p); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld vload(size_t offset, const __constant half *p); +half2 __ovld vload2(size_t offset, const __constant half *p); +half3 __ovld vload3(size_t offset, const __constant half *p); +half4 __ovld vload4(size_t offset, const __constant half *p); +half8 __ovld vload8(size_t offset, const __constant half *p); +half16 __ovld vload16(size_t offset, const __constant half *p); +#endif //cl_khr_fp16 + +#if defined(__opencl_c_generic_address_space) +char2 __ovld vload2(size_t offset, const char *p); +uchar2 __ovld vload2(size_t offset, const uchar *p); +short2 __ovld vload2(size_t offset, const short *p); +ushort2 __ovld vload2(size_t offset, const ushort *p); +int2 __ovld vload2(size_t offset, const int *p); +uint2 __ovld vload2(size_t offset, const uint *p); +long2 __ovld vload2(size_t offset, const long *p); +ulong2 __ovld vload2(size_t offset, const ulong *p); +float2 __ovld vload2(size_t offset, const float *p); +char3 __ovld vload3(size_t offset, const char *p); +uchar3 __ovld vload3(size_t offset, const uchar *p); +short3 __ovld vload3(size_t offset, const short *p); +ushort3 __ovld vload3(size_t offset, const ushort *p); +int3 __ovld vload3(size_t offset, const int *p); +uint3 __ovld vload3(size_t offset, const uint *p); +long3 __ovld vload3(size_t offset, const long *p); +ulong3 __ovld vload3(size_t offset, const ulong *p); +float3 __ovld vload3(size_t offset, const float *p); +char4 __ovld vload4(size_t offset, const char *p); +uchar4 __ovld vload4(size_t offset, const uchar *p); +short4 __ovld vload4(size_t offset, const short *p); +ushort4 __ovld vload4(size_t offset, const ushort *p); +int4 __ovld vload4(size_t offset, const int *p); +uint4 __ovld vload4(size_t offset, const uint *p); +long4 __ovld vload4(size_t offset, const long *p); +ulong4 __ovld vload4(size_t offset, const ulong *p); +float4 __ovld vload4(size_t offset, const float *p); +char8 __ovld vload8(size_t offset, const char *p); +uchar8 __ovld vload8(size_t offset, const uchar *p); +short8 __ovld vload8(size_t offset, const short *p); +ushort8 __ovld vload8(size_t offset, const ushort *p); +int8 __ovld vload8(size_t offset, const int *p); +uint8 __ovld vload8(size_t offset, const uint *p); +long8 __ovld vload8(size_t offset, const long *p); +ulong8 __ovld vload8(size_t offset, const ulong *p); +float8 __ovld vload8(size_t offset, const float *p); +char16 __ovld vload16(size_t offset, const char *p); +uchar16 __ovld vload16(size_t offset, const uchar *p); +short16 __ovld vload16(size_t offset, const short *p); +ushort16 __ovld vload16(size_t offset, const ushort *p); +int16 __ovld vload16(size_t offset, const int *p); +uint16 __ovld vload16(size_t offset, const uint *p); +long16 __ovld vload16(size_t offset, const long *p); +ulong16 __ovld vload16(size_t offset, const ulong *p); +float16 __ovld vload16(size_t offset, const float *p); + +#ifdef cl_khr_fp64 +double2 __ovld vload2(size_t offset, const double *p); +double3 __ovld vload3(size_t offset, const double *p); +double4 __ovld vload4(size_t offset, const double *p); +double8 __ovld vload8(size_t offset, const double *p); +double16 __ovld vload16(size_t offset, const double *p); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld vload(size_t offset, const half *p); +half2 __ovld vload2(size_t offset, const half *p); +half3 __ovld vload3(size_t offset, const half *p); +half4 __ovld vload4(size_t offset, const half *p); +half8 __ovld vload8(size_t offset, const half *p); +half16 __ovld vload16(size_t offset, const half *p); +#endif //cl_khr_fp16 +#else +char2 __ovld vload2(size_t offset, const __global char *p); +uchar2 __ovld vload2(size_t offset, const __global uchar *p); +short2 __ovld vload2(size_t offset, const __global short *p); +ushort2 __ovld vload2(size_t offset, const __global ushort *p); +int2 __ovld vload2(size_t offset, const __global int *p); +uint2 __ovld vload2(size_t offset, const __global uint *p); +long2 __ovld vload2(size_t offset, const __global long *p); +ulong2 __ovld vload2(size_t offset, const __global ulong *p); +float2 __ovld vload2(size_t offset, const __global float *p); +char3 __ovld vload3(size_t offset, const __global char *p); +uchar3 __ovld vload3(size_t offset, const __global uchar *p); +short3 __ovld vload3(size_t offset, const __global short *p); +ushort3 __ovld vload3(size_t offset, const __global ushort *p); +int3 __ovld vload3(size_t offset, const __global int *p); +uint3 __ovld vload3(size_t offset, const __global uint *p); +long3 __ovld vload3(size_t offset, const __global long *p); +ulong3 __ovld vload3(size_t offset, const __global ulong *p); +float3 __ovld vload3(size_t offset, const __global float *p); +char4 __ovld vload4(size_t offset, const __global char *p); +uchar4 __ovld vload4(size_t offset, const __global uchar *p); +short4 __ovld vload4(size_t offset, const __global short *p); +ushort4 __ovld vload4(size_t offset, const __global ushort *p); +int4 __ovld vload4(size_t offset, const __global int *p); +uint4 __ovld vload4(size_t offset, const __global uint *p); +long4 __ovld vload4(size_t offset, const __global long *p); +ulong4 __ovld vload4(size_t offset, const __global ulong *p); +float4 __ovld vload4(size_t offset, const __global float *p); +char8 __ovld vload8(size_t offset, const __global char *p); +uchar8 __ovld vload8(size_t offset, const __global uchar *p); +short8 __ovld vload8(size_t offset, const __global short *p); +ushort8 __ovld vload8(size_t offset, const __global ushort *p); +int8 __ovld vload8(size_t offset, const __global int *p); +uint8 __ovld vload8(size_t offset, const __global uint *p); +long8 __ovld vload8(size_t offset, const __global long *p); +ulong8 __ovld vload8(size_t offset, const __global ulong *p); +float8 __ovld vload8(size_t offset, const __global float *p); +char16 __ovld vload16(size_t offset, const __global char *p); +uchar16 __ovld vload16(size_t offset, const __global uchar *p); +short16 __ovld vload16(size_t offset, const __global short *p); +ushort16 __ovld vload16(size_t offset, const __global ushort *p); +int16 __ovld vload16(size_t offset, const __global int *p); +uint16 __ovld vload16(size_t offset, const __global uint *p); +long16 __ovld vload16(size_t offset, const __global long *p); +ulong16 __ovld vload16(size_t offset, const __global ulong *p); +float16 __ovld vload16(size_t offset, const __global float *p); +char2 __ovld vload2(size_t offset, const __local char *p); +uchar2 __ovld vload2(size_t offset, const __local uchar *p); +short2 __ovld vload2(size_t offset, const __local short *p); +ushort2 __ovld vload2(size_t offset, const __local ushort *p); +int2 __ovld vload2(size_t offset, const __local int *p); +uint2 __ovld vload2(size_t offset, const __local uint *p); +long2 __ovld vload2(size_t offset, const __local long *p); +ulong2 __ovld vload2(size_t offset, const __local ulong *p); +float2 __ovld vload2(size_t offset, const __local float *p); +char3 __ovld vload3(size_t offset, const __local char *p); +uchar3 __ovld vload3(size_t offset, const __local uchar *p); +short3 __ovld vload3(size_t offset, const __local short *p); +ushort3 __ovld vload3(size_t offset, const __local ushort *p); +int3 __ovld vload3(size_t offset, const __local int *p); +uint3 __ovld vload3(size_t offset, const __local uint *p); +long3 __ovld vload3(size_t offset, const __local long *p); +ulong3 __ovld vload3(size_t offset, const __local ulong *p); +float3 __ovld vload3(size_t offset, const __local float *p); +char4 __ovld vload4(size_t offset, const __local char *p); +uchar4 __ovld vload4(size_t offset, const __local uchar *p); +short4 __ovld vload4(size_t offset, const __local short *p); +ushort4 __ovld vload4(size_t offset, const __local ushort *p); +int4 __ovld vload4(size_t offset, const __local int *p); +uint4 __ovld vload4(size_t offset, const __local uint *p); +long4 __ovld vload4(size_t offset, const __local long *p); +ulong4 __ovld vload4(size_t offset, const __local ulong *p); +float4 __ovld vload4(size_t offset, const __local float *p); +char8 __ovld vload8(size_t offset, const __local char *p); +uchar8 __ovld vload8(size_t offset, const __local uchar *p); +short8 __ovld vload8(size_t offset, const __local short *p); +ushort8 __ovld vload8(size_t offset, const __local ushort *p); +int8 __ovld vload8(size_t offset, const __local int *p); +uint8 __ovld vload8(size_t offset, const __local uint *p); +long8 __ovld vload8(size_t offset, const __local long *p); +ulong8 __ovld vload8(size_t offset, const __local ulong *p); +float8 __ovld vload8(size_t offset, const __local float *p); +char16 __ovld vload16(size_t offset, const __local char *p); +uchar16 __ovld vload16(size_t offset, const __local uchar *p); +short16 __ovld vload16(size_t offset, const __local short *p); +ushort16 __ovld vload16(size_t offset, const __local ushort *p); +int16 __ovld vload16(size_t offset, const __local int *p); +uint16 __ovld vload16(size_t offset, const __local uint *p); +long16 __ovld vload16(size_t offset, const __local long *p); +ulong16 __ovld vload16(size_t offset, const __local ulong *p); +float16 __ovld vload16(size_t offset, const __local float *p); +char2 __ovld vload2(size_t offset, const __private char *p); +uchar2 __ovld vload2(size_t offset, const __private uchar *p); +short2 __ovld vload2(size_t offset, const __private short *p); +ushort2 __ovld vload2(size_t offset, const __private ushort *p); +int2 __ovld vload2(size_t offset, const __private int *p); +uint2 __ovld vload2(size_t offset, const __private uint *p); +long2 __ovld vload2(size_t offset, const __private long *p); +ulong2 __ovld vload2(size_t offset, const __private ulong *p); +float2 __ovld vload2(size_t offset, const __private float *p); +char3 __ovld vload3(size_t offset, const __private char *p); +uchar3 __ovld vload3(size_t offset, const __private uchar *p); +short3 __ovld vload3(size_t offset, const __private short *p); +ushort3 __ovld vload3(size_t offset, const __private ushort *p); +int3 __ovld vload3(size_t offset, const __private int *p); +uint3 __ovld vload3(size_t offset, const __private uint *p); +long3 __ovld vload3(size_t offset, const __private long *p); +ulong3 __ovld vload3(size_t offset, const __private ulong *p); +float3 __ovld vload3(size_t offset, const __private float *p); +char4 __ovld vload4(size_t offset, const __private char *p); +uchar4 __ovld vload4(size_t offset, const __private uchar *p); +short4 __ovld vload4(size_t offset, const __private short *p); +ushort4 __ovld vload4(size_t offset, const __private ushort *p); +int4 __ovld vload4(size_t offset, const __private int *p); +uint4 __ovld vload4(size_t offset, const __private uint *p); +long4 __ovld vload4(size_t offset, const __private long *p); +ulong4 __ovld vload4(size_t offset, const __private ulong *p); +float4 __ovld vload4(size_t offset, const __private float *p); +char8 __ovld vload8(size_t offset, const __private char *p); +uchar8 __ovld vload8(size_t offset, const __private uchar *p); +short8 __ovld vload8(size_t offset, const __private short *p); +ushort8 __ovld vload8(size_t offset, const __private ushort *p); +int8 __ovld vload8(size_t offset, const __private int *p); +uint8 __ovld vload8(size_t offset, const __private uint *p); +long8 __ovld vload8(size_t offset, const __private long *p); +ulong8 __ovld vload8(size_t offset, const __private ulong *p); +float8 __ovld vload8(size_t offset, const __private float *p); +char16 __ovld vload16(size_t offset, const __private char *p); +uchar16 __ovld vload16(size_t offset, const __private uchar *p); +short16 __ovld vload16(size_t offset, const __private short *p); +ushort16 __ovld vload16(size_t offset, const __private ushort *p); +int16 __ovld vload16(size_t offset, const __private int *p); +uint16 __ovld vload16(size_t offset, const __private uint *p); +long16 __ovld vload16(size_t offset, const __private long *p); +ulong16 __ovld vload16(size_t offset, const __private ulong *p); +float16 __ovld vload16(size_t offset, const __private float *p); + +#ifdef cl_khr_fp64 +double2 __ovld vload2(size_t offset, const __global double *p); +double3 __ovld vload3(size_t offset, const __global double *p); +double4 __ovld vload4(size_t offset, const __global double *p); +double8 __ovld vload8(size_t offset, const __global double *p); +double16 __ovld vload16(size_t offset, const __global double *p); +double2 __ovld vload2(size_t offset, const __local double *p); +double3 __ovld vload3(size_t offset, const __local double *p); +double4 __ovld vload4(size_t offset, const __local double *p); +double8 __ovld vload8(size_t offset, const __local double *p); +double16 __ovld vload16(size_t offset, const __local double *p); +double2 __ovld vload2(size_t offset, const __private double *p); +double3 __ovld vload3(size_t offset, const __private double *p); +double4 __ovld vload4(size_t offset, const __private double *p); +double8 __ovld vload8(size_t offset, const __private double *p); +double16 __ovld vload16(size_t offset, const __private double *p); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld vload(size_t offset, const __global half *p); +half2 __ovld vload2(size_t offset, const __global half *p); +half3 __ovld vload3(size_t offset, const __global half *p); +half4 __ovld vload4(size_t offset, const __global half *p); +half8 __ovld vload8(size_t offset, const __global half *p); +half16 __ovld vload16(size_t offset, const __global half *p); +half __ovld vload(size_t offset, const __local half *p); +half2 __ovld vload2(size_t offset, const __local half *p); +half3 __ovld vload3(size_t offset, const __local half *p); +half4 __ovld vload4(size_t offset, const __local half *p); +half8 __ovld vload8(size_t offset, const __local half *p); +half16 __ovld vload16(size_t offset, const __local half *p); +half __ovld vload(size_t offset, const __private half *p); +half2 __ovld vload2(size_t offset, const __private half *p); +half3 __ovld vload3(size_t offset, const __private half *p); +half4 __ovld vload4(size_t offset, const __private half *p); +half8 __ovld vload8(size_t offset, const __private half *p); +half16 __ovld vload16(size_t offset, const __private half *p); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +#if defined(__opencl_c_generic_address_space) +void __ovld vstore2(char2 data, size_t offset, char *p); +void __ovld vstore2(uchar2 data, size_t offset, uchar *p); +void __ovld vstore2(short2 data, size_t offset, short *p); +void __ovld vstore2(ushort2 data, size_t offset, ushort *p); +void __ovld vstore2(int2 data, size_t offset, int *p); +void __ovld vstore2(uint2 data, size_t offset, uint *p); +void __ovld vstore2(long2 data, size_t offset, long *p); +void __ovld vstore2(ulong2 data, size_t offset, ulong *p); +void __ovld vstore2(float2 data, size_t offset, float *p); +void __ovld vstore3(char3 data, size_t offset, char *p); +void __ovld vstore3(uchar3 data, size_t offset, uchar *p); +void __ovld vstore3(short3 data, size_t offset, short *p); +void __ovld vstore3(ushort3 data, size_t offset, ushort *p); +void __ovld vstore3(int3 data, size_t offset, int *p); +void __ovld vstore3(uint3 data, size_t offset, uint *p); +void __ovld vstore3(long3 data, size_t offset, long *p); +void __ovld vstore3(ulong3 data, size_t offset, ulong *p); +void __ovld vstore3(float3 data, size_t offset, float *p); +void __ovld vstore4(char4 data, size_t offset, char *p); +void __ovld vstore4(uchar4 data, size_t offset, uchar *p); +void __ovld vstore4(short4 data, size_t offset, short *p); +void __ovld vstore4(ushort4 data, size_t offset, ushort *p); +void __ovld vstore4(int4 data, size_t offset, int *p); +void __ovld vstore4(uint4 data, size_t offset, uint *p); +void __ovld vstore4(long4 data, size_t offset, long *p); +void __ovld vstore4(ulong4 data, size_t offset, ulong *p); +void __ovld vstore4(float4 data, size_t offset, float *p); +void __ovld vstore8(char8 data, size_t offset, char *p); +void __ovld vstore8(uchar8 data, size_t offset, uchar *p); +void __ovld vstore8(short8 data, size_t offset, short *p); +void __ovld vstore8(ushort8 data, size_t offset, ushort *p); +void __ovld vstore8(int8 data, size_t offset, int *p); +void __ovld vstore8(uint8 data, size_t offset, uint *p); +void __ovld vstore8(long8 data, size_t offset, long *p); +void __ovld vstore8(ulong8 data, size_t offset, ulong *p); +void __ovld vstore8(float8 data, size_t offset, float *p); +void __ovld vstore16(char16 data, size_t offset, char *p); +void __ovld vstore16(uchar16 data, size_t offset, uchar *p); +void __ovld vstore16(short16 data, size_t offset, short *p); +void __ovld vstore16(ushort16 data, size_t offset, ushort *p); +void __ovld vstore16(int16 data, size_t offset, int *p); +void __ovld vstore16(uint16 data, size_t offset, uint *p); +void __ovld vstore16(long16 data, size_t offset, long *p); +void __ovld vstore16(ulong16 data, size_t offset, ulong *p); +void __ovld vstore16(float16 data, size_t offset, float *p); +#ifdef cl_khr_fp64 +void __ovld vstore2(double2 data, size_t offset, double *p); +void __ovld vstore3(double3 data, size_t offset, double *p); +void __ovld vstore4(double4 data, size_t offset, double *p); +void __ovld vstore8(double8 data, size_t offset, double *p); +void __ovld vstore16(double16 data, size_t offset, double *p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +void __ovld vstore(half data, size_t offset, half *p); +void __ovld vstore2(half2 data, size_t offset, half *p); +void __ovld vstore3(half3 data, size_t offset, half *p); +void __ovld vstore4(half4 data, size_t offset, half *p); +void __ovld vstore8(half8 data, size_t offset, half *p); +void __ovld vstore16(half16 data, size_t offset, half *p); +#endif //cl_khr_fp16 +#else +void __ovld vstore2(char2 data, size_t offset, __global char *p); +void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p); +void __ovld vstore2(short2 data, size_t offset, __global short *p); +void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p); +void __ovld vstore2(int2 data, size_t offset, __global int *p); +void __ovld vstore2(uint2 data, size_t offset, __global uint *p); +void __ovld vstore2(long2 data, size_t offset, __global long *p); +void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p); +void __ovld vstore2(float2 data, size_t offset, __global float *p); +void __ovld vstore3(char3 data, size_t offset, __global char *p); +void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p); +void __ovld vstore3(short3 data, size_t offset, __global short *p); +void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p); +void __ovld vstore3(int3 data, size_t offset, __global int *p); +void __ovld vstore3(uint3 data, size_t offset, __global uint *p); +void __ovld vstore3(long3 data, size_t offset, __global long *p); +void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p); +void __ovld vstore3(float3 data, size_t offset, __global float *p); +void __ovld vstore4(char4 data, size_t offset, __global char *p); +void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p); +void __ovld vstore4(short4 data, size_t offset, __global short *p); +void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p); +void __ovld vstore4(int4 data, size_t offset, __global int *p); +void __ovld vstore4(uint4 data, size_t offset, __global uint *p); +void __ovld vstore4(long4 data, size_t offset, __global long *p); +void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p); +void __ovld vstore4(float4 data, size_t offset, __global float *p); +void __ovld vstore8(char8 data, size_t offset, __global char *p); +void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p); +void __ovld vstore8(short8 data, size_t offset, __global short *p); +void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p); +void __ovld vstore8(int8 data, size_t offset, __global int *p); +void __ovld vstore8(uint8 data, size_t offset, __global uint *p); +void __ovld vstore8(long8 data, size_t offset, __global long *p); +void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p); +void __ovld vstore8(float8 data, size_t offset, __global float *p); +void __ovld vstore16(char16 data, size_t offset, __global char *p); +void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p); +void __ovld vstore16(short16 data, size_t offset, __global short *p); +void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p); +void __ovld vstore16(int16 data, size_t offset, __global int *p); +void __ovld vstore16(uint16 data, size_t offset, __global uint *p); +void __ovld vstore16(long16 data, size_t offset, __global long *p); +void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p); +void __ovld vstore16(float16 data, size_t offset, __global float *p); +void __ovld vstore2(char2 data, size_t offset, __local char *p); +void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p); +void __ovld vstore2(short2 data, size_t offset, __local short *p); +void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p); +void __ovld vstore2(int2 data, size_t offset, __local int *p); +void __ovld vstore2(uint2 data, size_t offset, __local uint *p); +void __ovld vstore2(long2 data, size_t offset, __local long *p); +void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p); +void __ovld vstore2(float2 data, size_t offset, __local float *p); +void __ovld vstore3(char3 data, size_t offset, __local char *p); +void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p); +void __ovld vstore3(short3 data, size_t offset, __local short *p); +void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p); +void __ovld vstore3(int3 data, size_t offset, __local int *p); +void __ovld vstore3(uint3 data, size_t offset, __local uint *p); +void __ovld vstore3(long3 data, size_t offset, __local long *p); +void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p); +void __ovld vstore3(float3 data, size_t offset, __local float *p); +void __ovld vstore4(char4 data, size_t offset, __local char *p); +void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p); +void __ovld vstore4(short4 data, size_t offset, __local short *p); +void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p); +void __ovld vstore4(int4 data, size_t offset, __local int *p); +void __ovld vstore4(uint4 data, size_t offset, __local uint *p); +void __ovld vstore4(long4 data, size_t offset, __local long *p); +void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p); +void __ovld vstore4(float4 data, size_t offset, __local float *p); +void __ovld vstore8(char8 data, size_t offset, __local char *p); +void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p); +void __ovld vstore8(short8 data, size_t offset, __local short *p); +void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p); +void __ovld vstore8(int8 data, size_t offset, __local int *p); +void __ovld vstore8(uint8 data, size_t offset, __local uint *p); +void __ovld vstore8(long8 data, size_t offset, __local long *p); +void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p); +void __ovld vstore8(float8 data, size_t offset, __local float *p); +void __ovld vstore16(char16 data, size_t offset, __local char *p); +void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p); +void __ovld vstore16(short16 data, size_t offset, __local short *p); +void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p); +void __ovld vstore16(int16 data, size_t offset, __local int *p); +void __ovld vstore16(uint16 data, size_t offset, __local uint *p); +void __ovld vstore16(long16 data, size_t offset, __local long *p); +void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p); +void __ovld vstore16(float16 data, size_t offset, __local float *p); +void __ovld vstore2(char2 data, size_t offset, __private char *p); +void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p); +void __ovld vstore2(short2 data, size_t offset, __private short *p); +void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p); +void __ovld vstore2(int2 data, size_t offset, __private int *p); +void __ovld vstore2(uint2 data, size_t offset, __private uint *p); +void __ovld vstore2(long2 data, size_t offset, __private long *p); +void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p); +void __ovld vstore2(float2 data, size_t offset, __private float *p); +void __ovld vstore3(char3 data, size_t offset, __private char *p); +void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p); +void __ovld vstore3(short3 data, size_t offset, __private short *p); +void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p); +void __ovld vstore3(int3 data, size_t offset, __private int *p); +void __ovld vstore3(uint3 data, size_t offset, __private uint *p); +void __ovld vstore3(long3 data, size_t offset, __private long *p); +void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p); +void __ovld vstore3(float3 data, size_t offset, __private float *p); +void __ovld vstore4(char4 data, size_t offset, __private char *p); +void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p); +void __ovld vstore4(short4 data, size_t offset, __private short *p); +void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p); +void __ovld vstore4(int4 data, size_t offset, __private int *p); +void __ovld vstore4(uint4 data, size_t offset, __private uint *p); +void __ovld vstore4(long4 data, size_t offset, __private long *p); +void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p); +void __ovld vstore4(float4 data, size_t offset, __private float *p); +void __ovld vstore8(char8 data, size_t offset, __private char *p); +void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p); +void __ovld vstore8(short8 data, size_t offset, __private short *p); +void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p); +void __ovld vstore8(int8 data, size_t offset, __private int *p); +void __ovld vstore8(uint8 data, size_t offset, __private uint *p); +void __ovld vstore8(long8 data, size_t offset, __private long *p); +void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p); +void __ovld vstore8(float8 data, size_t offset, __private float *p); +void __ovld vstore16(char16 data, size_t offset, __private char *p); +void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p); +void __ovld vstore16(short16 data, size_t offset, __private short *p); +void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p); +void __ovld vstore16(int16 data, size_t offset, __private int *p); +void __ovld vstore16(uint16 data, size_t offset, __private uint *p); +void __ovld vstore16(long16 data, size_t offset, __private long *p); +void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p); +void __ovld vstore16(float16 data, size_t offset, __private float *p); +#ifdef cl_khr_fp64 +void __ovld vstore2(double2 data, size_t offset, __global double *p); +void __ovld vstore3(double3 data, size_t offset, __global double *p); +void __ovld vstore4(double4 data, size_t offset, __global double *p); +void __ovld vstore8(double8 data, size_t offset, __global double *p); +void __ovld vstore16(double16 data, size_t offset, __global double *p); +void __ovld vstore2(double2 data, size_t offset, __local double *p); +void __ovld vstore3(double3 data, size_t offset, __local double *p); +void __ovld vstore4(double4 data, size_t offset, __local double *p); +void __ovld vstore8(double8 data, size_t offset, __local double *p); +void __ovld vstore16(double16 data, size_t offset, __local double *p); +void __ovld vstore2(double2 data, size_t offset, __private double *p); +void __ovld vstore3(double3 data, size_t offset, __private double *p); +void __ovld vstore4(double4 data, size_t offset, __private double *p); +void __ovld vstore8(double8 data, size_t offset, __private double *p); +void __ovld vstore16(double16 data, size_t offset, __private double *p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +void __ovld vstore(half data, size_t offset, __global half *p); +void __ovld vstore2(half2 data, size_t offset, __global half *p); +void __ovld vstore3(half3 data, size_t offset, __global half *p); +void __ovld vstore4(half4 data, size_t offset, __global half *p); +void __ovld vstore8(half8 data, size_t offset, __global half *p); +void __ovld vstore16(half16 data, size_t offset, __global half *p); +void __ovld vstore(half data, size_t offset, __local half *p); +void __ovld vstore2(half2 data, size_t offset, __local half *p); +void __ovld vstore3(half3 data, size_t offset, __local half *p); +void __ovld vstore4(half4 data, size_t offset, __local half *p); +void __ovld vstore8(half8 data, size_t offset, __local half *p); +void __ovld vstore16(half16 data, size_t offset, __local half *p); +void __ovld vstore(half data, size_t offset, __private half *p); +void __ovld vstore2(half2 data, size_t offset, __private half *p); +void __ovld vstore3(half3 data, size_t offset, __private half *p); +void __ovld vstore4(half4 data, size_t offset, __private half *p); +void __ovld vstore8(half8 data, size_t offset, __private half *p); +void __ovld vstore16(half16 data, size_t offset, __private half *p); +#endif //cl_khr_fp16 +#endif //defined(__opencl_c_generic_address_space) + +/** + * Read sizeof (half) bytes of data from address + * (p + offset). The data read is interpreted as a + * half value. The half value is converted to a + * float value and the float value is returned. + * The read address computed as (p + offset) + * must be 16-bit aligned. + */ +float __ovld vload_half(size_t offset, const __constant half *p); +#if defined(__opencl_c_generic_address_space) +float __ovld vload_half(size_t offset, const half *p); +#else +float __ovld vload_half(size_t offset, const __global half *p); +float __ovld vload_half(size_t offset, const __local half *p); +float __ovld vload_half(size_t offset, const __private half *p); +#endif //defined(__opencl_c_generic_address_space) + +/** + * Read sizeof (halfn) bytes of data from address + * (p + (offset * n)). The data read is interpreted + * as a halfn value. The halfn value read is + * converted to a floatn value and the floatn + * value is returned. The read address computed + * as (p + (offset * n)) must be 16-bit aligned. + */ +float2 __ovld vload_half2(size_t offset, const __constant half *p); +float3 __ovld vload_half3(size_t offset, const __constant half *p); +float4 __ovld vload_half4(size_t offset, const __constant half *p); +float8 __ovld vload_half8(size_t offset, const __constant half *p); +float16 __ovld vload_half16(size_t offset, const __constant half *p); +#if defined(__opencl_c_generic_address_space) +float2 __ovld vload_half2(size_t offset, const half *p); +float3 __ovld vload_half3(size_t offset, const half *p); +float4 __ovld vload_half4(size_t offset, const half *p); +float8 __ovld vload_half8(size_t offset, const half *p); +float16 __ovld vload_half16(size_t offset, const half *p); +#else +float2 __ovld vload_half2(size_t offset, const __global half *p); +float3 __ovld vload_half3(size_t offset, const __global half *p); +float4 __ovld vload_half4(size_t offset, const __global half *p); +float8 __ovld vload_half8(size_t offset, const __global half *p); +float16 __ovld vload_half16(size_t offset, const __global half *p); +float2 __ovld vload_half2(size_t offset, const __local half *p); +float3 __ovld vload_half3(size_t offset, const __local half *p); +float4 __ovld vload_half4(size_t offset, const __local half *p); +float8 __ovld vload_half8(size_t offset, const __local half *p); +float16 __ovld vload_half16(size_t offset, const __local half *p); +float2 __ovld vload_half2(size_t offset, const __private half *p); +float3 __ovld vload_half3(size_t offset, const __private half *p); +float4 __ovld vload_half4(size_t offset, const __private half *p); +float8 __ovld vload_half8(size_t offset, const __private half *p); +float16 __ovld vload_half16(size_t offset, const __private half *p); +#endif //defined(__opencl_c_generic_address_space) + +/** + * The float value given by data is first + * converted to a half value using the appropriate + * rounding mode. The half value is then written + * to address computed as (p + offset). The + * address computed as (p + offset) must be 16- + * bit aligned. + * vstore_half use the current rounding mode. + * The default current rounding mode is round to + * nearest even. + */ +#if defined(__opencl_c_generic_address_space) +void __ovld vstore_half(float data, size_t offset, half *p); +void __ovld vstore_half_rte(float data, size_t offset, half *p); +void __ovld vstore_half_rtz(float data, size_t offset, half *p); +void __ovld vstore_half_rtp(float data, size_t offset, half *p); +void __ovld vstore_half_rtn(float data, size_t offset, half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half(double data, size_t offset, half *p); +void __ovld vstore_half_rte(double data, size_t offset, half *p); +void __ovld vstore_half_rtz(double data, size_t offset, half *p); +void __ovld vstore_half_rtp(double data, size_t offset, half *p); +void __ovld vstore_half_rtn(double data, size_t offset, half *p); +#endif //cl_khr_fp64 +#else +void __ovld vstore_half(float data, size_t offset, __global half *p); +void __ovld vstore_half_rte(float data, size_t offset, __global half *p); +void __ovld vstore_half_rtz(float data, size_t offset, __global half *p); +void __ovld vstore_half_rtp(float data, size_t offset, __global half *p); +void __ovld vstore_half_rtn(float data, size_t offset, __global half *p); +void __ovld vstore_half(float data, size_t offset, __local half *p); +void __ovld vstore_half_rte(float data, size_t offset, __local half *p); +void __ovld vstore_half_rtz(float data, size_t offset, __local half *p); +void __ovld vstore_half_rtp(float data, size_t offset, __local half *p); +void __ovld vstore_half_rtn(float data, size_t offset, __local half *p); +void __ovld vstore_half(float data, size_t offset, __private half *p); +void __ovld vstore_half_rte(float data, size_t offset, __private half *p); +void __ovld vstore_half_rtz(float data, size_t offset, __private half *p); +void __ovld vstore_half_rtp(float data, size_t offset, __private half *p); +void __ovld vstore_half_rtn(float data, size_t offset, __private half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half(double data, size_t offset, __global half *p); +void __ovld vstore_half_rte(double data, size_t offset, __global half *p); +void __ovld vstore_half_rtz(double data, size_t offset, __global half *p); +void __ovld vstore_half_rtp(double data, size_t offset, __global half *p); +void __ovld vstore_half_rtn(double data, size_t offset, __global half *p); +void __ovld vstore_half(double data, size_t offset, __local half *p); +void __ovld vstore_half_rte(double data, size_t offset, __local half *p); +void __ovld vstore_half_rtz(double data, size_t offset, __local half *p); +void __ovld vstore_half_rtp(double data, size_t offset, __local half *p); +void __ovld vstore_half_rtn(double data, size_t offset, __local half *p); +void __ovld vstore_half(double data, size_t offset, __private half *p); +void __ovld vstore_half_rte(double data, size_t offset, __private half *p); +void __ovld vstore_half_rtz(double data, size_t offset, __private half *p); +void __ovld vstore_half_rtp(double data, size_t offset, __private half *p); +void __ovld vstore_half_rtn(double data, size_t offset, __private half *p); +#endif //cl_khr_fp64 +#endif //defined(__opencl_c_generic_address_space) + +/** + * The floatn value given by data is converted to + * a halfn value using the appropriate rounding + * mode. The halfn value is then written to + * address computed as (p + (offset * n)). The + * address computed as (p + (offset * n)) must be + * 16-bit aligned. + * vstore_halfn uses the current rounding mode. + * The default current rounding mode is round to + * nearest even. + */ +#if defined(__opencl_c_generic_address_space) +void __ovld vstore_half2(float2 data, size_t offset, half *p); +void __ovld vstore_half3(float3 data, size_t offset, half *p); +void __ovld vstore_half4(float4 data, size_t offset, half *p); +void __ovld vstore_half8(float8 data, size_t offset, half *p); +void __ovld vstore_half16(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half2(double2 data, size_t offset, half *p); +void __ovld vstore_half3(double3 data, size_t offset, half *p); +void __ovld vstore_half4(double4 data, size_t offset, half *p); +void __ovld vstore_half8(double8 data, size_t offset, half *p); +void __ovld vstore_half16(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p); +#endif //cl_khr_fp64 +#else +void __ovld vstore_half2(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half2(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p); +#endif //cl_khr_fp64 +#endif //defined(__opencl_c_generic_address_space) + +/** + * For n = 1, 2, 4, 8 and 16 read sizeof (halfn) + * bytes of data from address (p + (offset * n)). + * The data read is interpreted as a halfn value. + * The halfn value read is converted to a floatn + * value and the floatn value is returned. + * The address computed as (p + (offset * n)) + * must be aligned to sizeof (halfn) bytes. + * For n = 3, vloada_half3 reads a half3 from + * address (p + (offset * 4)) and returns a float3. + * The address computed as (p + (offset * 4)) + * must be aligned to sizeof (half) * 4 bytes. + */ +float __ovld vloada_half(size_t offset, const __constant half *p); +float2 __ovld vloada_half2(size_t offset, const __constant half *p); +float3 __ovld vloada_half3(size_t offset, const __constant half *p); +float4 __ovld vloada_half4(size_t offset, const __constant half *p); +float8 __ovld vloada_half8(size_t offset, const __constant half *p); +float16 __ovld vloada_half16(size_t offset, const __constant half *p); +#if defined(__opencl_c_generic_address_space) +float __ovld vloada_half(size_t offset, const half *p); +float2 __ovld vloada_half2(size_t offset, const half *p); +float3 __ovld vloada_half3(size_t offset, const half *p); +float4 __ovld vloada_half4(size_t offset, const half *p); +float8 __ovld vloada_half8(size_t offset, const half *p); +float16 __ovld vloada_half16(size_t offset, const half *p); +#else +float __ovld vloada_half(size_t offset, const __global half *p); +float2 __ovld vloada_half2(size_t offset, const __global half *p); +float3 __ovld vloada_half3(size_t offset, const __global half *p); +float4 __ovld vloada_half4(size_t offset, const __global half *p); +float8 __ovld vloada_half8(size_t offset, const __global half *p); +float16 __ovld vloada_half16(size_t offset, const __global half *p); +float __ovld vloada_half(size_t offset, const __local half *p); +float2 __ovld vloada_half2(size_t offset, const __local half *p); +float3 __ovld vloada_half3(size_t offset, const __local half *p); +float4 __ovld vloada_half4(size_t offset, const __local half *p); +float8 __ovld vloada_half8(size_t offset, const __local half *p); +float16 __ovld vloada_half16(size_t offset, const __local half *p); +float __ovld vloada_half(size_t offset, const __private half *p); +float2 __ovld vloada_half2(size_t offset, const __private half *p); +float3 __ovld vloada_half3(size_t offset, const __private half *p); +float4 __ovld vloada_half4(size_t offset, const __private half *p); +float8 __ovld vloada_half8(size_t offset, const __private half *p); +float16 __ovld vloada_half16(size_t offset, const __private half *p); +#endif //defined(__opencl_c_generic_address_space) + +/** + * The floatn value given by data is converted to + * a halfn value using the appropriate rounding + * mode. + * For n = 1, 2, 4, 8 and 16, the halfn value is + * written to the address computed as (p + (offset + * * n)). The address computed as (p + (offset * + * n)) must be aligned to sizeof (halfn) bytes. + * For n = 3, the half3 value is written to the + * address computed as (p + (offset * 4)). The + * address computed as (p + (offset * 4)) must be + * aligned to sizeof (half) * 4 bytes. + * vstorea_halfn uses the current rounding + * mode. The default current rounding mode is + * round to nearest even. + */ +#if defined(__opencl_c_generic_address_space) +void __ovld vstorea_half(float data, size_t offset, half *p); +void __ovld vstorea_half2(float2 data, size_t offset, half *p); +void __ovld vstorea_half3(float3 data, size_t offset, half *p); +void __ovld vstorea_half4(float4 data, size_t offset, half *p); +void __ovld vstorea_half8(float8 data, size_t offset, half *p); +void __ovld vstorea_half16(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p); + +#ifdef cl_khr_fp64 +void __ovld vstorea_half(double data, size_t offset, half *p); +void __ovld vstorea_half2(double2 data, size_t offset, half *p); +void __ovld vstorea_half3(double3 data, size_t offset, half *p); +void __ovld vstorea_half4(double4 data, size_t offset, half *p); +void __ovld vstorea_half8(double8 data, size_t offset, half *p); +void __ovld vstorea_half16(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, half *p); +void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p); +#endif //cl_khr_fp64 + +#else +void __ovld vstorea_half(float data, size_t offset, __global half *p); +void __ovld vstorea_half2(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half(float data, size_t offset, __local half *p); +void __ovld vstorea_half2(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half(float data, size_t offset, __private half *p); +void __ovld vstorea_half2(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p); + +#ifdef cl_khr_fp64 +void __ovld vstorea_half(double data, size_t offset, __global half *p); +void __ovld vstorea_half2(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half(double data, size_t offset, __local half *p); +void __ovld vstorea_half2(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half(double data, size_t offset, __private half *p); +void __ovld vstorea_half2(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p); +void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p); +void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p); +void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p); +void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p); +#endif //cl_khr_fp64 +#endif //defined(__opencl_c_generic_address_space) + +// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions + +/** + * All work-items in a work-group executing the kernel + * on a processor must execute this function before any + * are allowed to continue execution beyond the barrier. + * This function must be encountered by all work-items in + * a work-group executing the kernel. + * If barrier is inside a conditional statement, then all + * work-items must enter the conditional if any work-item + * enters the conditional statement and executes the + * barrier. + * If barrer is inside a loop, all work-items must execute + * the barrier for each iteration of the loop before any are + * allowed to continue execution beyond the barrier. + * The barrier function also queues a memory fence + * (reads and writes) to ensure correct ordering of + * memory operations to local or global memory. + * The flags argument specifies the memory address space + * and can be set to a combination of the following literal + * values. + * CLK_LOCAL_MEM_FENCE - The barrier function + * will either flush any variables stored in local memory + * or queue a memory fence to ensure correct ordering of + * memory operations to local memory. + * CLK_GLOBAL_MEM_FENCE - The barrier function + * will queue a memory fence to ensure correct ordering + * of memory operations to global memory. This can be + * useful when work-items, for example, write to buffer or + * image objects and then want to read the updated data. + */ + +void __ovld __conv barrier(cl_mem_fence_flags flags); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope); +void __ovld __conv work_group_barrier(cl_mem_fence_flags flags); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions + +/** + * Orders loads and stores of a work-item + * executing a kernel. This means that loads + * and stores preceding the mem_fence will + * be committed to memory before any loads + * and stores following the mem_fence. + * The flags argument specifies the memory + * address space and can be set to a + * combination of the following literal + * values: + * CLK_LOCAL_MEM_FENCE + * CLK_GLOBAL_MEM_FENCE. + */ +void __ovld mem_fence(cl_mem_fence_flags flags); + +/** + * Read memory barrier that orders only + * loads. + * The flags argument specifies the memory + * address space and can be set to a + * combination of the following literal + * values: + * CLK_LOCAL_MEM_FENCE + * CLK_GLOBAL_MEM_FENCE. + */ +void __ovld read_mem_fence(cl_mem_fence_flags flags); + +/** + * Write memory barrier that orders only + * stores. + * The flags argument specifies the memory + * address space and can be set to a + * combination of the following literal + * values: + * CLK_LOCAL_MEM_FENCE + * CLK_GLOBAL_MEM_FENCE. + */ +void __ovld write_mem_fence(cl_mem_fence_flags flags); + +// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions + +#if defined(__opencl_c_generic_address_space) +cl_mem_fence_flags __ovld get_fence(const void *ptr); +cl_mem_fence_flags __ovld get_fence(void *ptr); + +/** + * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions + * and checked in Sema since they should be declared as + * addr gentype* to_addr (gentype*); + * where gentype is builtin type or user defined type. + */ + +#endif //defined(__opencl_c_generic_address_space) + +// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch + +/** + * event_t async_work_group_copy ( + * __global gentype *dst, + * const __local gentype *src, + * size_t num_elements, + * event_t event) + * Perform an async copy of num_elements + * gentype elements from src to dst. The async + * copy is performed by all work-items in a workgroup + * and this built-in function must therefore + * be encountered by all work-items in a workgroup + * executing the kernel with the same + * argument values; otherwise the results are + * undefined. + * Returns an event object that can be used by + * wait_group_events to wait for the async copy + * to finish. The event argument can also be used + * to associate the async_work_group_copy with + * a previous async copy allowing an event to be + * shared by multiple async copies; otherwise event + * should be zero. + * If event argument is non-zero, the event object + * supplied in event argument will be returned. + * This function does not perform any implicit + * synchronization of source data such as using a + * barrier before performing the copy. + */ +event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event); +#ifdef cl_khr_fp64 +event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event); +#endif //cl_khr_fp16 + +/** + * Perform an async gather of num_elements + * gentype elements from src to dst. The + * src_stride is the stride in elements for each + * gentype element read from src. The dst_stride + * is the stride in elements for each gentype + * element written to dst. The async gather is + * performed by all work-items in a work-group. + * This built-in function must therefore be + * encountered by all work-items in a work-group + * executing the kernel with the same argument + * values; otherwise the results are undefined. + * Returns an event object that can be used by + * wait_group_events to wait for the async copy + * to finish. The event argument can also be used + * to associate the + * async_work_group_strided_copy with a + * previous async copy allowing an event to be + * shared by multiple async copies; otherwise event + * should be zero. + * If event argument is non-zero, the event object + * supplied in event argument will be returned. + * This function does not perform any implicit + * synchronization of source data such as using a + * barrier before performing the copy. + */ +event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event); +#ifdef cl_khr_fp64 +event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event); +#endif //cl_khr_fp16 + +/** + * Wait for events that identify the + * async_work_group_copy operations to + * complete. The event objects specified in + * event_list will be released after the wait is + * performed. + * This function must be encountered by all workitems + * in a work-group executing the kernel with + * the same num_events and event objects specified + * in event_list; otherwise the results are undefined. + */ +void __ovld wait_group_events(int num_events, event_t *event_list); + +/** + * Prefetch num_elements * sizeof(gentype) + * bytes into the global cache. The prefetch + * instruction is applied to a work-item in a workgroup + * and does not affect the functional + * behavior of the kernel. + */ +void __ovld prefetch(const __global char *p, size_t num_elements); +void __ovld prefetch(const __global uchar *p, size_t num_elements); +void __ovld prefetch(const __global short *p, size_t num_elements); +void __ovld prefetch(const __global ushort *p, size_t num_elements); +void __ovld prefetch(const __global int *p, size_t num_elements); +void __ovld prefetch(const __global uint *p, size_t num_elements); +void __ovld prefetch(const __global long *p, size_t num_elements); +void __ovld prefetch(const __global ulong *p, size_t num_elements); +void __ovld prefetch(const __global float *p, size_t num_elements); +void __ovld prefetch(const __global char2 *p, size_t num_elements); +void __ovld prefetch(const __global uchar2 *p, size_t num_elements); +void __ovld prefetch(const __global short2 *p, size_t num_elements); +void __ovld prefetch(const __global ushort2 *p, size_t num_elements); +void __ovld prefetch(const __global int2 *p, size_t num_elements); +void __ovld prefetch(const __global uint2 *p, size_t num_elements); +void __ovld prefetch(const __global long2 *p, size_t num_elements); +void __ovld prefetch(const __global ulong2 *p, size_t num_elements); +void __ovld prefetch(const __global float2 *p, size_t num_elements); +void __ovld prefetch(const __global char3 *p, size_t num_elements); +void __ovld prefetch(const __global uchar3 *p, size_t num_elements); +void __ovld prefetch(const __global short3 *p, size_t num_elements); +void __ovld prefetch(const __global ushort3 *p, size_t num_elements); +void __ovld prefetch(const __global int3 *p, size_t num_elements); +void __ovld prefetch(const __global uint3 *p, size_t num_elements); +void __ovld prefetch(const __global long3 *p, size_t num_elements); +void __ovld prefetch(const __global ulong3 *p, size_t num_elements); +void __ovld prefetch(const __global float3 *p, size_t num_elements); +void __ovld prefetch(const __global char4 *p, size_t num_elements); +void __ovld prefetch(const __global uchar4 *p, size_t num_elements); +void __ovld prefetch(const __global short4 *p, size_t num_elements); +void __ovld prefetch(const __global ushort4 *p, size_t num_elements); +void __ovld prefetch(const __global int4 *p, size_t num_elements); +void __ovld prefetch(const __global uint4 *p, size_t num_elements); +void __ovld prefetch(const __global long4 *p, size_t num_elements); +void __ovld prefetch(const __global ulong4 *p, size_t num_elements); +void __ovld prefetch(const __global float4 *p, size_t num_elements); +void __ovld prefetch(const __global char8 *p, size_t num_elements); +void __ovld prefetch(const __global uchar8 *p, size_t num_elements); +void __ovld prefetch(const __global short8 *p, size_t num_elements); +void __ovld prefetch(const __global ushort8 *p, size_t num_elements); +void __ovld prefetch(const __global int8 *p, size_t num_elements); +void __ovld prefetch(const __global uint8 *p, size_t num_elements); +void __ovld prefetch(const __global long8 *p, size_t num_elements); +void __ovld prefetch(const __global ulong8 *p, size_t num_elements); +void __ovld prefetch(const __global float8 *p, size_t num_elements); +void __ovld prefetch(const __global char16 *p, size_t num_elements); +void __ovld prefetch(const __global uchar16 *p, size_t num_elements); +void __ovld prefetch(const __global short16 *p, size_t num_elements); +void __ovld prefetch(const __global ushort16 *p, size_t num_elements); +void __ovld prefetch(const __global int16 *p, size_t num_elements); +void __ovld prefetch(const __global uint16 *p, size_t num_elements); +void __ovld prefetch(const __global long16 *p, size_t num_elements); +void __ovld prefetch(const __global ulong16 *p, size_t num_elements); +void __ovld prefetch(const __global float16 *p, size_t num_elements); +#ifdef cl_khr_fp64 +void __ovld prefetch(const __global double *p, size_t num_elements); +void __ovld prefetch(const __global double2 *p, size_t num_elements); +void __ovld prefetch(const __global double3 *p, size_t num_elements); +void __ovld prefetch(const __global double4 *p, size_t num_elements); +void __ovld prefetch(const __global double8 *p, size_t num_elements); +void __ovld prefetch(const __global double16 *p, size_t num_elements); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +void __ovld prefetch(const __global half *p, size_t num_elements); +void __ovld prefetch(const __global half2 *p, size_t num_elements); +void __ovld prefetch(const __global half3 *p, size_t num_elements); +void __ovld prefetch(const __global half4 *p, size_t num_elements); +void __ovld prefetch(const __global half8 *p, size_t num_elements); +void __ovld prefetch(const __global half16 *p, size_t num_elements); +#endif // cl_khr_fp16 + +// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable +#endif +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old + val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_add(volatile __global int *p, int val); +unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_add(volatile __local int *p, int val); +unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_add(volatile int *p, int val); +unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_add(volatile __global int *p, int val); +unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_add(volatile __local int *p, int val); +unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_add(volatile __global long *p, long val); +unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_add(volatile __local long *p, long val); +unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) stored at location pointed by p. + * Compute (old - val) and store result at location pointed by p. The function + * returns old. + */ +int __ovld atomic_sub(volatile __global int *p, int val); +unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_sub(volatile __local int *p, int val); +unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_sub(volatile int *p, int val); +unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_sub(volatile __global int *p, int val); +unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_sub(volatile __local int *p, int val); +unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_sub(volatile __global long *p, long val); +unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_sub(volatile __local long *p, long val); +unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Swaps the old value stored at location p + * with new value given by val. Returns old + * value. + */ +int __ovld atomic_xchg(volatile __global int *p, int val); +unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_xchg(volatile __local int *p, int val); +unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val); +float __ovld atomic_xchg(volatile __global float *p, float val); +float __ovld atomic_xchg(volatile __local float *p, float val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_xchg(volatile int *p, int val); +unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val); +float __ovld atomic_xchg(volatile float *p, float val); +#endif + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_xchg(volatile __global int *p, int val); +unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_xchg(volatile __local int *p, int val); +unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_xchg(volatile __global long *p, long val); +long __ovld atom_xchg(volatile __local long *p, long val); +unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val); +unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old + 1) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_inc(volatile __global int *p); +unsigned int __ovld atomic_inc(volatile __global unsigned int *p); +int __ovld atomic_inc(volatile __local int *p); +unsigned int __ovld atomic_inc(volatile __local unsigned int *p); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_inc(volatile int *p); +unsigned int __ovld atomic_inc(volatile unsigned int *p); +#endif + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_inc(volatile __global int *p); +unsigned int __ovld atom_inc(volatile __global unsigned int *p); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_inc(volatile __local int *p); +unsigned int __ovld atom_inc(volatile __local unsigned int *p); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_inc(volatile __global long *p); +unsigned long __ovld atom_inc(volatile __global unsigned long *p); +long __ovld atom_inc(volatile __local long *p); +unsigned long __ovld atom_inc(volatile __local unsigned long *p); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old - 1) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_dec(volatile __global int *p); +unsigned int __ovld atomic_dec(volatile __global unsigned int *p); +int __ovld atomic_dec(volatile __local int *p); +unsigned int __ovld atomic_dec(volatile __local unsigned int *p); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_dec(volatile int *p); +unsigned int __ovld atomic_dec(volatile unsigned int *p); +#endif + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_dec(volatile __global int *p); +unsigned int __ovld atom_dec(volatile __global unsigned int *p); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_dec(volatile __local int *p); +unsigned int __ovld atom_dec(volatile __local unsigned int *p); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_dec(volatile __global long *p); +unsigned long __ovld atom_dec(volatile __global unsigned long *p); +long __ovld atom_dec(volatile __local long *p); +unsigned long __ovld atom_dec(volatile __local unsigned long *p); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old == cmp) ? val : old and store result at + * location pointed by p. The function + * returns old. + */ +int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val); +unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val); +int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val); +unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val); +unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val); +unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val); +unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val); +unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val); +long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val); +unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * min(old, val) and store minimum value at + * location pointed by p. The function + * returns old. + */ +int __ovld atomic_min(volatile __global int *p, int val); +unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_min(volatile __local int *p, int val); +unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_min(volatile int *p, int val); +unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_min(volatile __global int *p, int val); +unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_min(volatile __local int *p, int val); +unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_min(volatile __global long *p, long val); +unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_min(volatile __local long *p, long val); +unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * max(old, val) and store maximum value at + * location pointed by p. The function + * returns old. + */ +int __ovld atomic_max(volatile __global int *p, int val); +unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_max(volatile __local int *p, int val); +unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_max(volatile int *p, int val); +unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_max(volatile __global int *p, int val); +unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_max(volatile __local int *p, int val); +unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_max(volatile __global long *p, long val); +unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_max(volatile __local long *p, long val); +unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old & val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_and(volatile __global int *p, int val); +unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_and(volatile __local int *p, int val); +unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_and(volatile int *p, int val); +unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_and(volatile __global int *p, int val); +unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_and(volatile __local int *p, int val); +unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_and(volatile __global long *p, long val); +unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_and(volatile __local long *p, long val); +unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old | val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_or(volatile __global int *p, int val); +unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_or(volatile __local int *p, int val); +unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_or(volatile int *p, int val); +unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_or(volatile __global int *p, int val); +unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_or(volatile __local int *p, int val); +unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_or(volatile __global long *p, long val); +unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_or(volatile __local long *p, long val); +unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old ^ val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_xor(volatile __global int *p, int val); +unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_xor(volatile __local int *p, int val); +unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val); +#ifdef __OPENCL_CPP_VERSION__ +int __ovld atomic_xor(volatile int *p, int val); +unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_xor(volatile __global int *p, int val); +unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_xor(volatile __local int *p, int val); +unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_xor(volatile __global long *p, long val); +unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_xor(volatile __local long *p, long val); +unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val); +#endif + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable +#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable +#endif + +// OpenCL v2.0 s6.13.11 - Atomics Functions + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable +#endif + +// atomic_init() +#if defined(__opencl_c_generic_address_space) +void __ovld atomic_init(volatile atomic_int *object, int value); +void __ovld atomic_init(volatile atomic_uint *object, uint value); +void __ovld atomic_init(volatile atomic_float *object, float value); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +void __ovld atomic_init(volatile atomic_long *object, long value); +void __ovld atomic_init(volatile atomic_ulong *object, ulong value); +#ifdef cl_khr_fp64 +void __ovld atomic_init(volatile atomic_double *object, double value); +#endif //cl_khr_fp64 +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +void __ovld atomic_init(volatile __global atomic_int *object, int value); +void __ovld atomic_init(volatile __local atomic_int *object, int value); +void __ovld atomic_init(volatile __global atomic_uint *object, uint value); +void __ovld atomic_init(volatile __local atomic_uint *object, uint value); +void __ovld atomic_init(volatile __global atomic_float *object, float value); +void __ovld atomic_init(volatile __local atomic_float *object, float value); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +void __ovld atomic_init(volatile __global atomic_long *object, long value); +void __ovld atomic_init(volatile __local atomic_long *object, long value); +void __ovld atomic_init(volatile __global atomic_ulong *object, ulong value); +void __ovld atomic_init(volatile __local atomic_ulong *object, ulong value); +#ifdef cl_khr_fp64 +void __ovld atomic_init(volatile __global atomic_double *object, double value); +void __ovld atomic_init(volatile __local atomic_double *object, double value); +#endif //cl_khr_fp64 +#endif +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +// atomic_work_item_fence() +void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope); + +// atomic_fetch() +// OpenCL v2.0 s6.13.11.7.5: +// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t. + +#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_fetch_add(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand); +int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand); +int __ovld atomic_fetch_or(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand); +int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand); +int __ovld atomic_fetch_and(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand); +int __ovld atomic_fetch_min(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand); +int __ovld atomic_fetch_max(volatile atomic_int *object, int operand); +uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand); +long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand); +long __ovld atomic_fetch_or(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand); +long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand); +long __ovld atomic_fetch_and(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand); +long __ovld atomic_fetch_min(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand); +long __ovld atomic_fetch_max(volatile atomic_long *object, long operand); +ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand); +uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_fetch_add(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_add(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_add(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_add(volatile __local atomic_uint *object, uint operand); +int __ovld atomic_fetch_sub(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_sub(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_sub(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_sub(volatile __local atomic_uint *object, uint operand); +int __ovld atomic_fetch_or(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_or(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_or(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_or(volatile __local atomic_uint *object, uint operand); +int __ovld atomic_fetch_xor(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_xor(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_xor(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_xor(volatile __local atomic_uint *object, uint operand);i +int __ovld atomic_fetch_and(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_and(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_and(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_and(volatile __local atomic_uint *object, uint operand); +int __ovld atomic_fetch_min(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_min(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_min(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_min(volatile __local atomic_uint *object, uint operand); +int __ovld atomic_fetch_max(volatile __global atomic_int *object, int operand); +int __ovld atomic_fetch_max(volatile __local atomic_int *object, int operand); +uint __ovld atomic_fetch_max(volatile __global atomic_uint *object, uint operand); +uint __ovld atomic_fetch_max(volatile __local atomic_uint *object, uint operand); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_add(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand); +uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *object, ptrdiff_t operand); +long __ovld atomic_fetch_sub(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_sub(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *object, ptrdiff_t operand); +uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand); +long __ovld atomic_fetch_or(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_or(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *object, intptr_t operand); +intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *object, uintptr_t operand); +long __ovld atomic_fetch_xor(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_xor(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *object, intptr_t operand); +intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *object, uintptr_t operand); +long __ovld atomic_fetch_and(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_and(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *object, intptr_t operand); +intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *object, uintptr_t operand); +long __ovld atomic_fetch_min(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_min(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *object, intptr_t operand); +intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *object, uintptr_t operand); +long __ovld atomic_fetch_max(volatile __global atomic_long *object, long operand); +long __ovld atomic_fetch_max(volatile __local atomic_long *object, long operand); +ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *object, ulong operand); +uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand); +uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order); +uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order); +long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order); +long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order); +long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order); +long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order); +ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order); +uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +// atomic_store() + +#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +void __ovld atomic_store(volatile atomic_int *object, int desired); +void __ovld atomic_store(volatile atomic_uint *object, uint desired); +void __ovld atomic_store(volatile atomic_float *object, float desired); + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store(volatile atomic_double *object, double desired); +#endif //cl_khr_fp64 +void __ovld atomic_store(volatile atomic_long *object, long desired); +void __ovld atomic_store(volatile atomic_ulong *object, ulong desired); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +void __ovld atomic_store(volatile __global atomic_int *object, int desired); +void __ovld atomic_store(volatile __local atomic_int *object, int desired); +void __ovld atomic_store(volatile __global atomic_uint *object, uint desired); +void __ovld atomic_store(volatile __local atomic_uint *object, uint desired); +void __ovld atomic_store(volatile __global atomic_float *object, float desired); +void __ovld atomic_store(volatile __local atomic_float *object, float desired); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store(volatile __global atomic_double *object, double desired); +void __ovld atomic_store(volatile __local atomic_double *object, double desired); +#endif //cl_khr_fp64 +void __ovld atomic_store(volatile __global atomic_long *object, long desired); +void __ovld atomic_store(volatile __local atomic_long *object, long desired); +void __ovld atomic_store(volatile __global atomic_ulong *object, ulong desired); +void __ovld atomic_store(volatile __local atomic_ulong *object, ulong desired); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order); +#endif //cl_khr_fp64 +void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order); +void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order); +void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order); +void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order); +void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order); +void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order); +void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order); +#endif +void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order); +void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order); +void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order); +void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_generic_address_space) +void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +// atomic_load() +#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_load(volatile atomic_int *object); +uint __ovld atomic_load(volatile atomic_uint *object); +float __ovld atomic_load(volatile atomic_float *object); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load(volatile atomic_double *object); +#endif //cl_khr_fp64 +long __ovld atomic_load(volatile atomic_long *object); +ulong __ovld atomic_load(volatile atomic_ulong *object); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_load(volatile __global atomic_int *object); +int __ovld atomic_load(volatile __local atomic_int *object); +uint __ovld atomic_load(volatile __global atomic_uint *object); +uint __ovld atomic_load(volatile __local atomic_uint *object); +float __ovld atomic_load(volatile __global atomic_float *object); +float __ovld atomic_load(volatile __local atomic_float *object); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load(volatile __global atomic_double *object); +double __ovld atomic_load(volatile __local atomic_double *object); +#endif //cl_khr_fp64 +long __ovld atomic_load(volatile __global atomic_long *object); +long __ovld atomic_load(volatile __local atomic_long *object); +ulong __ovld atomic_load(volatile __global atomic_ulong *object); +ulong __ovld atomic_load(volatile __local atomic_ulong *object); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order); +uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order); +float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order); +#endif //cl_khr_fp64 +long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order); +ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order); +int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order); +uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order); +uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order); +float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order); +float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order); +double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order); +#endif //cl_khr_fp64 +long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order); +long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order); +ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order); +ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope); +uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope); +float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope); +ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order, memory_scope scope); +int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order, memory_scope scope); +uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order, memory_scope scope); +uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order, memory_scope scope); +float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order, memory_scope scope); +float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order, memory_scope scope); +double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order, memory_scope scope); +#endif +long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order, memory_scope scope); +long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order, memory_scope scope); +ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order, memory_scope scope); +ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order, memory_scope scope); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +// atomic_exchange() + +#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_exchange(volatile atomic_int *object, int desired); +uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired); +float __ovld atomic_exchange(volatile atomic_float *object, float desired); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange(volatile atomic_double *object, double desired); +#endif //cl_khr_fp64 +long __ovld atomic_exchange(volatile atomic_long *object, long desired); +ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_exchange(volatile __global atomic_int *object, int desired); +int __ovld atomic_exchange(volatile __local atomic_int *object, int desired); +uint __ovld atomic_exchange(volatile __global atomic_uint *object, uint desired); +uint __ovld atomic_exchange(volatile __local atomic_uint *object, uint desired); +float __ovld atomic_exchange(volatile __global atomic_float *object, float desired); +float __ovld atomic_exchange(volatile __local atomic_float *object, float desired); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange(volatile __global atomic_double *object, double desired); +double __ovld atomic_exchange(volatile __local atomic_double *object, double desired); +#endif //cl_khr_fp64 +long __ovld atomic_exchange(volatile __global atomic_long *object, long desired); +long __ovld atomic_exchange(volatile __local atomic_long *object, long desired); +ulong __ovld atomic_exchange(volatile __global atomic_ulong *object, ulong desired); +ulong __ovld atomic_exchange(volatile __local atomic_ulong *object, ulong desired); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order); +uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order); +float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order); +#endif //cl_khr_fp64 +long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order); +ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order); +int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order); +uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order); +uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order); +float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order); +float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order); +double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order); +#endif //cl_khr_fp64 +long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order); +long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order); +ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order); +ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_generic_address_space) +int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope); +uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope); +float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope); +ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope); +int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope); +uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope); +uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope); +float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope); +float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope); +double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope); +long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope); +ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +// atomic_compare_exchange_strong() and atomic_compare_exchange_weak() +#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __global int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __local int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __private int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __global int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __local int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __private int *expected, int desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __global uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __local uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __private uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __global uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __local uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __private uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __global float *expected, float desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __local float *expected, float desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __private float *expected, float desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __global float *expected, float desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __local float *expected, float desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __private float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __global int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __local int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __private int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __global int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __local int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __private int *expected, int desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __global uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __local uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __private uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __global uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __local uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __private uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __global float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __local float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __private float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __global float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __local float *expected, float desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __private float *expected, float desired); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __global double *expected, double desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __local double *expected, double desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __private double *expected, double desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __global double *expected, double desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __local double *expected, double desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __private double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __global double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __local double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __private double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __global double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __local double *expected, double desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __private double *expected, double desired); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __global long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __local long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __private long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __global long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __local long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __private long *expected, long desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __global long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __local long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __private long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __global long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __local long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __private long *expected, long desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_generic_address_space) +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +#if defined(__opencl_c_generic_address_space) +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +#endif +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 + +// atomic_flag_test_and_set() and atomic_flag_clear() +#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object); +void __ovld atomic_flag_clear(volatile atomic_flag *object); +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *object); +bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *object); +void __ovld atomic_flag_clear(volatile __global atomic_flag *object); +void __ovld atomic_flag_clear(volatile __local atomic_flag *object); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_atomic_scope_device) +#if defined(__opencl_c_generic_address_space) +bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order); +void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order); +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order); +bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order); +void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order); +void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif + +#if defined(__opencl_c_generic_address_space) +bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope); +void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope); +#endif //defined(__opencl_c_generic_address_space) +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope); +bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope); +void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope); +void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions + +/** + * The shuffle and shuffle2 built-in functions construct + * a permutation of elements from one or two input + * vectors respectively that are of the same type, + * returning a vector with the same element type as the + * input and length that is the same as the shuffle mask. + * The size of each element in the mask must match the + * size of each element in the result. For shuffle, only + * the ilogb(2m-1) least significant bits of each mask + * element are considered. For shuffle2, only the + * ilogb(2m-1)+1 least significant bits of each mask + * element are considered. Other bits in the mask shall + * be ignored. + * The elements of the input vectors are numbered from + * left to right across one or both of the vectors. For this + * purpose, the number of elements in a vector is given + * by vec_step(gentypem). The shuffle mask operand + * specifies, for each element of the result vector, which + * element of the one or two input vectors the result + * element gets. + * Examples: + * uint4 mask = (uint4)(3, 2, + * 1, 0); + * float4 a; + * float4 r = shuffle(a, mask); + * // r.s0123 = a.wzyx + * uint8 mask = (uint8)(0, 1, 2, 3, + * 4, 5, 6, 7); + * float4 a, b; + * float8 r = shuffle2(a, b, mask); + * // r.s0123 = a.xyzw + * // r.s4567 = b.xyzw + * uint4 mask; + * float8 a; + * float4 b; + * b = shuffle(a, mask); + * Examples that are not valid are: + * uint8 mask; + * short16 a; + * short8 b; + * b = shuffle(a, mask); <- not valid + */ +char2 __ovld __cnfn shuffle(char2 x, uchar2 mask); +char2 __ovld __cnfn shuffle(char4 x, uchar2 mask); +char2 __ovld __cnfn shuffle(char8 x, uchar2 mask); +char2 __ovld __cnfn shuffle(char16 x, uchar2 mask); + +uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask); +uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask); +uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask); +uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask); + +short2 __ovld __cnfn shuffle(short2 x, ushort2 mask); +short2 __ovld __cnfn shuffle(short4 x, ushort2 mask); +short2 __ovld __cnfn shuffle(short8 x, ushort2 mask); +short2 __ovld __cnfn shuffle(short16 x, ushort2 mask); + +ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask); +ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask); +ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask); +ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask); + +int2 __ovld __cnfn shuffle(int2 x, uint2 mask); +int2 __ovld __cnfn shuffle(int4 x, uint2 mask); +int2 __ovld __cnfn shuffle(int8 x, uint2 mask); +int2 __ovld __cnfn shuffle(int16 x, uint2 mask); + +uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask); +uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask); +uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask); +uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask); + +long2 __ovld __cnfn shuffle(long2 x, ulong2 mask); +long2 __ovld __cnfn shuffle(long4 x, ulong2 mask); +long2 __ovld __cnfn shuffle(long8 x, ulong2 mask); +long2 __ovld __cnfn shuffle(long16 x, ulong2 mask); + +ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask); +ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask); +ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask); +ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask); + +float2 __ovld __cnfn shuffle(float2 x, uint2 mask); +float2 __ovld __cnfn shuffle(float4 x, uint2 mask); +float2 __ovld __cnfn shuffle(float8 x, uint2 mask); +float2 __ovld __cnfn shuffle(float16 x, uint2 mask); + +char4 __ovld __cnfn shuffle(char2 x, uchar4 mask); +char4 __ovld __cnfn shuffle(char4 x, uchar4 mask); +char4 __ovld __cnfn shuffle(char8 x, uchar4 mask); +char4 __ovld __cnfn shuffle(char16 x, uchar4 mask); + +uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask); +uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask); +uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask); +uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask); + +short4 __ovld __cnfn shuffle(short2 x, ushort4 mask); +short4 __ovld __cnfn shuffle(short4 x, ushort4 mask); +short4 __ovld __cnfn shuffle(short8 x, ushort4 mask); +short4 __ovld __cnfn shuffle(short16 x, ushort4 mask); + +ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask); +ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask); +ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask); +ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask); + +int4 __ovld __cnfn shuffle(int2 x, uint4 mask); +int4 __ovld __cnfn shuffle(int4 x, uint4 mask); +int4 __ovld __cnfn shuffle(int8 x, uint4 mask); +int4 __ovld __cnfn shuffle(int16 x, uint4 mask); + +uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask); +uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask); +uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask); +uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask); + +long4 __ovld __cnfn shuffle(long2 x, ulong4 mask); +long4 __ovld __cnfn shuffle(long4 x, ulong4 mask); +long4 __ovld __cnfn shuffle(long8 x, ulong4 mask); +long4 __ovld __cnfn shuffle(long16 x, ulong4 mask); + +ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask); +ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask); +ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask); +ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask); + +float4 __ovld __cnfn shuffle(float2 x, uint4 mask); +float4 __ovld __cnfn shuffle(float4 x, uint4 mask); +float4 __ovld __cnfn shuffle(float8 x, uint4 mask); +float4 __ovld __cnfn shuffle(float16 x, uint4 mask); + +char8 __ovld __cnfn shuffle(char2 x, uchar8 mask); +char8 __ovld __cnfn shuffle(char4 x, uchar8 mask); +char8 __ovld __cnfn shuffle(char8 x, uchar8 mask); +char8 __ovld __cnfn shuffle(char16 x, uchar8 mask); + +uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask); +uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask); +uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask); +uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask); + +short8 __ovld __cnfn shuffle(short2 x, ushort8 mask); +short8 __ovld __cnfn shuffle(short4 x, ushort8 mask); +short8 __ovld __cnfn shuffle(short8 x, ushort8 mask); +short8 __ovld __cnfn shuffle(short16 x, ushort8 mask); + +ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask); +ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask); +ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask); +ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask); + +int8 __ovld __cnfn shuffle(int2 x, uint8 mask); +int8 __ovld __cnfn shuffle(int4 x, uint8 mask); +int8 __ovld __cnfn shuffle(int8 x, uint8 mask); +int8 __ovld __cnfn shuffle(int16 x, uint8 mask); + +uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask); +uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask); +uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask); +uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask); + +long8 __ovld __cnfn shuffle(long2 x, ulong8 mask); +long8 __ovld __cnfn shuffle(long4 x, ulong8 mask); +long8 __ovld __cnfn shuffle(long8 x, ulong8 mask); +long8 __ovld __cnfn shuffle(long16 x, ulong8 mask); + +ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask); +ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask); +ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask); +ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask); + +float8 __ovld __cnfn shuffle(float2 x, uint8 mask); +float8 __ovld __cnfn shuffle(float4 x, uint8 mask); +float8 __ovld __cnfn shuffle(float8 x, uint8 mask); +float8 __ovld __cnfn shuffle(float16 x, uint8 mask); + +char16 __ovld __cnfn shuffle(char2 x, uchar16 mask); +char16 __ovld __cnfn shuffle(char4 x, uchar16 mask); +char16 __ovld __cnfn shuffle(char8 x, uchar16 mask); +char16 __ovld __cnfn shuffle(char16 x, uchar16 mask); + +uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask); +uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask); +uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask); +uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask); + +short16 __ovld __cnfn shuffle(short2 x, ushort16 mask); +short16 __ovld __cnfn shuffle(short4 x, ushort16 mask); +short16 __ovld __cnfn shuffle(short8 x, ushort16 mask); +short16 __ovld __cnfn shuffle(short16 x, ushort16 mask); + +ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask); +ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask); +ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask); +ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask); + +int16 __ovld __cnfn shuffle(int2 x, uint16 mask); +int16 __ovld __cnfn shuffle(int4 x, uint16 mask); +int16 __ovld __cnfn shuffle(int8 x, uint16 mask); +int16 __ovld __cnfn shuffle(int16 x, uint16 mask); + +uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask); +uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask); +uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask); +uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask); + +long16 __ovld __cnfn shuffle(long2 x, ulong16 mask); +long16 __ovld __cnfn shuffle(long4 x, ulong16 mask); +long16 __ovld __cnfn shuffle(long8 x, ulong16 mask); +long16 __ovld __cnfn shuffle(long16 x, ulong16 mask); + +ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask); +ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask); +ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask); +ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask); + +float16 __ovld __cnfn shuffle(float2 x, uint16 mask); +float16 __ovld __cnfn shuffle(float4 x, uint16 mask); +float16 __ovld __cnfn shuffle(float8 x, uint16 mask); +float16 __ovld __cnfn shuffle(float16 x, uint16 mask); + +#ifdef cl_khr_fp64 +double2 __ovld __cnfn shuffle(double2 x, ulong2 mask); +double2 __ovld __cnfn shuffle(double4 x, ulong2 mask); +double2 __ovld __cnfn shuffle(double8 x, ulong2 mask); +double2 __ovld __cnfn shuffle(double16 x, ulong2 mask); + +double4 __ovld __cnfn shuffle(double2 x, ulong4 mask); +double4 __ovld __cnfn shuffle(double4 x, ulong4 mask); +double4 __ovld __cnfn shuffle(double8 x, ulong4 mask); +double4 __ovld __cnfn shuffle(double16 x, ulong4 mask); + +double8 __ovld __cnfn shuffle(double2 x, ulong8 mask); +double8 __ovld __cnfn shuffle(double4 x, ulong8 mask); +double8 __ovld __cnfn shuffle(double8 x, ulong8 mask); +double8 __ovld __cnfn shuffle(double16 x, ulong8 mask); + +double16 __ovld __cnfn shuffle(double2 x, ulong16 mask); +double16 __ovld __cnfn shuffle(double4 x, ulong16 mask); +double16 __ovld __cnfn shuffle(double8 x, ulong16 mask); +double16 __ovld __cnfn shuffle(double16 x, ulong16 mask); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half2 __ovld __cnfn shuffle(half2 x, ushort2 mask); +half2 __ovld __cnfn shuffle(half4 x, ushort2 mask); +half2 __ovld __cnfn shuffle(half8 x, ushort2 mask); +half2 __ovld __cnfn shuffle(half16 x, ushort2 mask); + +half4 __ovld __cnfn shuffle(half2 x, ushort4 mask); +half4 __ovld __cnfn shuffle(half4 x, ushort4 mask); +half4 __ovld __cnfn shuffle(half8 x, ushort4 mask); +half4 __ovld __cnfn shuffle(half16 x, ushort4 mask); + +half8 __ovld __cnfn shuffle(half2 x, ushort8 mask); +half8 __ovld __cnfn shuffle(half4 x, ushort8 mask); +half8 __ovld __cnfn shuffle(half8 x, ushort8 mask); +half8 __ovld __cnfn shuffle(half16 x, ushort8 mask); + +half16 __ovld __cnfn shuffle(half2 x, ushort16 mask); +half16 __ovld __cnfn shuffle(half4 x, ushort16 mask); +half16 __ovld __cnfn shuffle(half8 x, ushort16 mask); +half16 __ovld __cnfn shuffle(half16 x, ushort16 mask); +#endif //cl_khr_fp16 + +char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask); +char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask); +char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask); +char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask); + +uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask); +uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask); +uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask); +uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask); + +short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask); +short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask); +short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask); +short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask); + +ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask); +ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask); +ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask); +ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask); + +int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask); +int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask); +int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask); +int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask); + +uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask); +uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask); +uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask); +uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask); + +long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask); +long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask); +long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask); +long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask); + +ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask); +ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask); +ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask); +ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask); + +float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask); +float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask); +float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask); +float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask); + +char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask); +char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask); +char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask); +char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask); + +uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask); +uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask); +uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask); +uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask); + +short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask); +short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask); +short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask); +short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask); + +ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask); +ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask); +ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask); +ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask); + +int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask); +int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask); +int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask); +int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask); + +uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask); +uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask); +uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask); +uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask); + +long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask); +long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask); +long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask); +long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask); + +ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask); +ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask); +ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask); +ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask); + +float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask); +float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask); +float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask); +float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask); + +char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask); +char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask); +char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask); +char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask); + +uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask); +uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask); +uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask); +uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask); + +short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask); +short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask); +short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask); +short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask); + +ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask); +ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask); +ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask); +ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask); + +int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask); +int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask); +int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask); +int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask); + +uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask); +uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask); +uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask); +uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask); + +long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask); +long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask); +long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask); +long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask); + +ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask); +ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask); +ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask); +ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask); + +float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask); +float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask); +float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask); +float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask); + +char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask); +char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask); +char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask); +char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask); + +uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask); +uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask); +uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask); +uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask); + +short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask); +short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask); +short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask); +short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask); + +ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask); +ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask); +ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask); +ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask); + +int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask); +int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask); +int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask); +int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask); + +uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask); +uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask); +uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask); +uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask); + +long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask); +long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask); +long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask); +long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask); + +ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask); +ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask); +ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask); +ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask); + +float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask); +float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask); +float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask); +float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask); + +#ifdef cl_khr_fp64 +double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask); +double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask); +double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask); +double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask); + +double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask); +double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask); +double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask); +double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask); + +double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask); +double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask); +double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask); +double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask); + +double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask); +double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask); +double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask); +double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask); +half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask); +half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask); +half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask); + +half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask); +half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask); +half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask); +half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask); + +half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask); +half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask); +half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask); +half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask); + +half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask); +half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask); +half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask); +half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions + +#ifdef cl_khr_gl_msaa_sharing +#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable +#endif //cl_khr_gl_msaa_sharing + +/** + * Use the coordinate (coord.xy) to do an element lookup in + * the 2D image object specified by image. + * + * Use the coordinate (coord.x, coord.y, coord.z) to do + * an element lookup in the 3D image object specified + * by image. coord.w is ignored. + * + * Use the coordinate (coord.z) to index into the + * 2D image array object specified by image_array + * and (coord.x, coord.y) to do an element lookup in + * the 2D image object specified by image. + * + * Use the coordinate (x) to do an element lookup in + * the 1D image object specified by image. + * + * Use the coordinate (coord.y) to index into the + * 1D image array object specified by image_array + * and (coord.x) to do an element lookup in + * the 1D image object specified by image. + * + * Use the coordinate (cood.xy) and sample to do an + * element lookup in the 2D multi-sample image specified + * by image. + * + * Use coord.xy and sample to do an element + * lookup in the 2D multi-sample image layer + * identified by index coord.z in the 2D multi-sample + * image array specified by image. + * + * For mipmap images, use the mip-level specified by + * the Level-of-Detail (lod) or use gradients for LOD + * computation. + * + * read_imagef returns floating-point values in the + * range [0.0 ... 1.0] for image objects created with + * image_channel_data_type set to one of the predefined + * packed formats or CL_UNORM_INT8, or + * CL_UNORM_INT16. + * + * read_imagef returns floating-point values in the + * range [-1.0 ... 1.0] for image objects created with + * image_channel_data_type set to CL_SNORM_INT8, + * or CL_SNORM_INT16. + * + * read_imagef returns floating-point values for image + * objects created with image_channel_data_type set to + * CL_HALF_FLOAT or CL_FLOAT. + * + * read_imagei and read_imageui return + * unnormalized signed integer and unsigned integer + * values respectively. Each channel will be stored in a + * 32-bit integer. + * + * read_imagei can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_SIGNED_INT8, + * CL_SIGNED_INT16 and + * CL_SIGNED_INT32. + * If the image_channel_data_type is not one of the + * above values, the values returned by read_imagei + * are undefined. + * + * read_imageui can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_UNSIGNED_INT8, + * CL_UNSIGNED_INT16 and + * CL_UNSIGNED_INT32. + * If the image_channel_data_type is not one of the + * above values, the values returned by read_imageui + * are undefined. + * + * The read_image{i|ui} calls support a nearest filter + * only. The filter_mode specified in sampler + * must be set to CLK_FILTER_NEAREST; otherwise + * the values returned are undefined. + + * The read_image{f|i|ui} calls that take + * integer coordinates must use a sampler with + * normalized coordinates set to + * CLK_NORMALIZED_COORDS_FALSE and + * addressing mode set to + * CLK_ADDRESS_CLAMP_TO_EDGE, + * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE; + * otherwise the values returned are undefined. + * + * Values returned by read_imagef for image objects + * with image_channel_data_type values not specified + * in the description above are undefined. + */ + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord); +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord); + +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord); +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord); + +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord); +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord); + +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord); +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord); + +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord); +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord); + +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) + +#ifdef cl_khr_depth_images +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord); +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord); +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord); +#endif //cl_khr_depth_images + +#if defined(cl_khr_gl_msaa_sharing) +float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample); +int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample); +uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample); + +float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample); + +float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample); +int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample); +uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample); + +float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample); +#endif //cl_khr_gl_msaa_sharing + +// OpenCL Extension v2.0 s9.18 - Mipmaps +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#ifdef cl_khr_mipmap_image + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); + +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); + +#endif //cl_khr_mipmap_image +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) + +/** +* Sampler-less Image Access +*/ + +float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord); +int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord); + +float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord); +int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord); +uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord); + +#ifdef cl_khr_depth_images +float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord); +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord); +#endif //cl_khr_depth_images + +float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord); + +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) + +// Image read functions returning half4 type +#ifdef cl_khr_fp16 +half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord); +half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord); +half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord); +half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord); +half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord); +half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord); +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) +half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord); +half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord); +half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord); +half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord); +/** + * Sampler-less Image Access + */ +half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord); +half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2) +#endif //cl_khr_fp16 + +// Image read functions for read_write images +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord); +int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord); + +float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord); +int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord); +uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord); + +#ifdef cl_khr_depth_images +float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord); +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord); +#endif //cl_khr_depth_images + +#if cl_khr_gl_msaa_sharing +float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample); +int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample); +uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample); + +float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample); +int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample); +uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample); + +float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample); +float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#ifdef cl_khr_mipmap_image +float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod); +int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); + +float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); + +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); + +#endif //cl_khr_mipmap_image +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// Image read functions returning half4 type +#ifdef cl_khr_fp16 +half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord); +half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord); +#endif //cl_khr_fp16 +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Write color value to location specified by coordinate + * (coord.x, coord.y) in the 2D image object specified by image. + * (coord.x, coord.y) are considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1, and 0 + * ... image height - 1. + + * Write color value to location specified by coordinate + * (coord.x, coord.y) in the 2D image object specified by index + * (coord.z) of the 2D image array object image_array. + * (coord.x, coord.y) are considered to be unnormalized + * coordinates and must be in the range 0 ... image width + * - 1. + * + * Write color value to location specified by coordinate + * (coord) in the 1D image (buffer) object specified by image. + * coord is considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1. + * + * Write color value to location specified by coordinate + * (coord.x) in the 1D image object specified by index + * (coord.y) of the 1D image array object image_array. + * x is considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1. + * + * Write color value to location specified by coordinate + * (coord.x, coord.y, coord.z) in the 3D image object specified by image. + * coord.x & coord.y are considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1, and 0 + * ... image height - 1. + * + * For mipmap images, use mip-level specified by lod. + * + * Appropriate data format conversion to the specified + * image format is done before writing the color value. + * + * write_imagef can only be used with image objects + * created with image_channel_data_type set to one of + * the pre-defined packed formats or set to + * CL_SNORM_INT8, CL_UNORM_INT8, + * CL_SNORM_INT16, CL_UNORM_INT16, + * CL_HALF_FLOAT or CL_FLOAT. Appropriate data + * format conversion will be done to convert channel + * data from a floating-point value to actual data format + * in which the channels are stored. + * + * write_imagei can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_SIGNED_INT8, + * CL_SIGNED_INT16 and + * CL_SIGNED_INT32. + * + * write_imageui can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_UNSIGNED_INT8, + * CL_UNSIGNED_INT16 and + * CL_UNSIGNED_INT32. + * + * The behavior of write_imagef, write_imagei and + * write_imageui for image objects created with + * image_channel_data_type values not specified in + * the description above or with (x, y) coordinate + * values that are not in the range (0 ... image width -1, + * 0 ... image height - 1), respectively, is undefined. + */ +void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color); +void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color); +void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color); + +void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color); +void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color); +void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color); + +void __ovld write_imagef(write_only image1d_t image, int coord, float4 color); +void __ovld write_imagei(write_only image1d_t image, int coord, int4 color); +void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color); + +void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color); +void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color); +void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color); + +void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color); +void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color); +void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color); + +#ifdef cl_khr_3d_image_writes +void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color); +void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color); +void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color); +#endif + +#ifdef cl_khr_depth_images +void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color); +void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color); +#endif //cl_khr_depth_images + +// OpenCL Extension v2.0 s9.18 - Mipmaps +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#if defined(cl_khr_mipmap_image_writes) +void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color); +void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color); +void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color); +void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color); +void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color); +void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color); +void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color); +void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color); +void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth); +void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth); + +#ifdef cl_khr_3d_image_writes +void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color); +void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color); +void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color); +#endif //cl_khr_3d_image_writes + +#endif //defined(cl_khr_mipmap_image_writes) +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// Image write functions for half4 type +#ifdef cl_khr_fp16 +void __ovld write_imageh(write_only image1d_t image, int coord, half4 color); +void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color); +#ifdef cl_khr_3d_image_writes +void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color); +#endif +void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color); +void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color); +void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color); +#endif //cl_khr_fp16 + +// Image write functions for read_write images +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color); +void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color); +void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color); + +void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color); +void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color); +void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color); + +void __ovld write_imagef(read_write image1d_t image, int coord, float4 color); +void __ovld write_imagei(read_write image1d_t image, int coord, int4 color); +void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color); + +void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color); +void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color); +void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color); + +void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color); +void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color); +void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color); + +#ifdef cl_khr_3d_image_writes +void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color); +void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color); +void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color); +#endif + +#ifdef cl_khr_depth_images +void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color); +void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color); +#endif //cl_khr_depth_images + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#if defined(cl_khr_mipmap_image_writes) +void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color); +void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color); +void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color); +void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color); +void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color); +void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color); +void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color); +void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color); +void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color); +void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color); + +#ifdef cl_khr_3d_image_writes +void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color); +void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color); +void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color); +#endif //cl_khr_3d_image_writes + +#endif //cl_khr_mipmap_image_writes +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// Image write functions for half4 type +#ifdef cl_khr_fp16 +void __ovld write_imageh(read_write image1d_t image, int coord, half4 color); +void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color); +#ifdef cl_khr_3d_image_writes +void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color); +#endif +void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color); +void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color); +void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color); +#endif //cl_khr_fp16 +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have +// access qualifier, which by default assume read_only access qualifier. Image query builtin +// functions with write_only image argument should also be declared. + +/** + * Return the image width in pixels. + * + */ +int __ovld __cnfn get_image_width(read_only image1d_t image); +int __ovld __cnfn get_image_width(read_only image1d_buffer_t image); +int __ovld __cnfn get_image_width(read_only image2d_t image); +#ifdef cl_khr_3d_image_writes +int __ovld __cnfn get_image_width(read_only image3d_t image); +#endif +int __ovld __cnfn get_image_width(read_only image1d_array_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_width(read_only image2d_depth_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_width(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_width(write_only image1d_t image); +int __ovld __cnfn get_image_width(write_only image1d_buffer_t image); +int __ovld __cnfn get_image_width(write_only image2d_t image); +#ifdef cl_khr_3d_image_writes +int __ovld __cnfn get_image_width(write_only image3d_t image); +#endif +int __ovld __cnfn get_image_width(write_only image1d_array_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_width(write_only image2d_depth_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_width(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int __ovld __cnfn get_image_width(read_write image1d_t image); +int __ovld __cnfn get_image_width(read_write image1d_buffer_t image); +int __ovld __cnfn get_image_width(read_write image2d_t image); +int __ovld __cnfn get_image_width(read_write image3d_t image); +int __ovld __cnfn get_image_width(read_write image1d_array_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_width(read_write image2d_depth_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_width(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the image height in pixels. + */ +int __ovld __cnfn get_image_height(read_only image2d_t image); +int __ovld __cnfn get_image_height(read_only image3d_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_height(read_only image2d_depth_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_height(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_height(write_only image2d_t image); +#ifdef cl_khr_3d_image_writes +int __ovld __cnfn get_image_height(write_only image3d_t image); +#endif +int __ovld __cnfn get_image_height(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_height(write_only image2d_depth_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_height(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int __ovld __cnfn get_image_height(read_write image2d_t image); +int __ovld __cnfn get_image_height(read_write image3d_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_height(read_write image2d_depth_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_height(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the image depth in pixels. + */ +int __ovld __cnfn get_image_depth(read_only image3d_t image); + +#ifdef cl_khr_3d_image_writes +int __ovld __cnfn get_image_depth(write_only image3d_t image); +#endif + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int __ovld __cnfn get_image_depth(read_write image3d_t image); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// OpenCL Extension v2.0 s9.18 - Mipmaps +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#ifdef cl_khr_mipmap_image +/** + * Return the image miplevels. + */ + +int __ovld get_image_num_mip_levels(read_only image1d_t image); +int __ovld get_image_num_mip_levels(read_only image2d_t image); +int __ovld get_image_num_mip_levels(read_only image3d_t image); + +int __ovld get_image_num_mip_levels(write_only image1d_t image); +int __ovld get_image_num_mip_levels(write_only image2d_t image); +#ifdef cl_khr_3d_image_writes +int __ovld get_image_num_mip_levels(write_only image3d_t image); +#endif + +int __ovld get_image_num_mip_levels(read_write image1d_t image); +int __ovld get_image_num_mip_levels(read_write image2d_t image); +int __ovld get_image_num_mip_levels(read_write image3d_t image); + +int __ovld get_image_num_mip_levels(read_only image1d_array_t image); +int __ovld get_image_num_mip_levels(read_only image2d_array_t image); +int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image); +int __ovld get_image_num_mip_levels(read_only image2d_depth_t image); + +int __ovld get_image_num_mip_levels(write_only image1d_array_t image); +int __ovld get_image_num_mip_levels(write_only image2d_array_t image); +int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image); +int __ovld get_image_num_mip_levels(write_only image2d_depth_t image); + +int __ovld get_image_num_mip_levels(read_write image1d_array_t image); +int __ovld get_image_num_mip_levels(read_write image2d_array_t image); +int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image); +int __ovld get_image_num_mip_levels(read_write image2d_depth_t image); + +#endif //cl_khr_mipmap_image +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the channel data type. Valid values are: + * CLK_SNORM_INT8 + * CLK_SNORM_INT16 + * CLK_UNORM_INT8 + * CLK_UNORM_INT16 + * CLK_UNORM_SHORT_565 + * CLK_UNORM_SHORT_555 + * CLK_UNORM_SHORT_101010 + * CLK_SIGNED_INT8 + * CLK_SIGNED_INT16 + * CLK_SIGNED_INT32 + * CLK_UNSIGNED_INT8 + * CLK_UNSIGNED_INT16 + * CLK_UNSIGNED_INT32 + * CLK_HALF_FLOAT + * CLK_FLOAT + */ + +int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image); +#ifdef cl_khr_3d_image_writes +int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image); +#endif +int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the image channel order. Valid values are: + * CLK_A + * CLK_R + * CLK_Rx + * CLK_RG + * CLK_RGx + * CLK_RA + * CLK_RGB + * CLK_RGBx + * CLK_RGBA + * CLK_ARGB + * CLK_BGRA + * CLK_INTENSITY + * CLK_LUMINANCE + */ + +int __ovld __cnfn get_image_channel_order(read_only image1d_t image); +int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_t image); +int __ovld __cnfn get_image_channel_order(read_only image3d_t image); +int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_channel_order(write_only image1d_t image); +int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_t image); +#ifdef cl_khr_3d_image_writes +int __ovld __cnfn get_image_channel_order(write_only image3d_t image); +#endif +int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int __ovld __cnfn get_image_channel_order(read_write image1d_t image); +int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_t image); +int __ovld __cnfn get_image_channel_order(read_write image3d_t image); +int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the 2D image width and height as an int2 + * type. The width is returned in the x component, and + * the height in the y component. + */ +int2 __ovld __cnfn get_image_dim(read_only image2d_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int2 __ovld __cnfn get_image_dim(write_only image2d_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int2 __ovld __cnfn get_image_dim(read_write image2d_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the 3D image width, height, and depth as an + * int4 type. The width is returned in the x + * component, height in the y component, depth in the z + * component and the w component is 0. + */ +int4 __ovld __cnfn get_image_dim(read_only image3d_t image); +#ifdef cl_khr_3d_image_writes +int4 __ovld __cnfn get_image_dim(write_only image3d_t image); +#endif +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int4 __ovld __cnfn get_image_dim(read_write image3d_t image); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** + * Return the image array size. + */ + +size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array); +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array); +#ifdef cl_khr_depth_images +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array); +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array); +#endif //cl_khr_gl_msaa_sharing + +size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array); +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array); +#ifdef cl_khr_depth_images +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array); +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array); +#endif //cl_khr_gl_msaa_sharing + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array); +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array); +#ifdef cl_khr_depth_images +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array); +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array); +#endif //cl_khr_gl_msaa_sharing +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +/** +* Return the number of samples associated with image +*/ +#if defined(cl_khr_gl_msaa_sharing) +int __ovld get_image_num_samples(read_only image2d_msaa_t image); +int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image); +int __ovld get_image_num_samples(read_only image2d_array_msaa_t image); +int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image); + +int __ovld get_image_num_samples(write_only image2d_msaa_t image); +int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image); +int __ovld get_image_num_samples(write_only image2d_array_msaa_t image); +int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +int __ovld get_image_num_samples(read_write image2d_msaa_t image); +int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image); +int __ovld get_image_num_samples(read_write image2d_array_msaa_t image); +int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +#endif + +// OpenCL v2.0 s6.13.15 - Work-group Functions + +#if defined(__opencl_c_work_group_collective_functions) +int __ovld __conv work_group_all(int predicate); +int __ovld __conv work_group_any(int predicate); + +#ifdef cl_khr_fp16 +half __ovld __conv work_group_broadcast(half a, size_t local_id); +half __ovld __conv work_group_broadcast(half a, size_t x, size_t y); +half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z); +#endif +int __ovld __conv work_group_broadcast(int a, size_t local_id); +int __ovld __conv work_group_broadcast(int a, size_t x, size_t y); +int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z); +uint __ovld __conv work_group_broadcast(uint a, size_t local_id); +uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y); +uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z); +long __ovld __conv work_group_broadcast(long a, size_t local_id); +long __ovld __conv work_group_broadcast(long a, size_t x, size_t y); +long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z); +ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id); +ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y); +ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z); +float __ovld __conv work_group_broadcast(float a, size_t local_id); +float __ovld __conv work_group_broadcast(float a, size_t x, size_t y); +float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z); +#ifdef cl_khr_fp64 +double __ovld __conv work_group_broadcast(double a, size_t local_id); +double __ovld __conv work_group_broadcast(double a, size_t x, size_t y); +double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld __conv work_group_reduce_add(half x); +half __ovld __conv work_group_reduce_min(half x); +half __ovld __conv work_group_reduce_max(half x); +half __ovld __conv work_group_scan_exclusive_add(half x); +half __ovld __conv work_group_scan_exclusive_min(half x); +half __ovld __conv work_group_scan_exclusive_max(half x); +half __ovld __conv work_group_scan_inclusive_add(half x); +half __ovld __conv work_group_scan_inclusive_min(half x); +half __ovld __conv work_group_scan_inclusive_max(half x); +#endif +int __ovld __conv work_group_reduce_add(int x); +int __ovld __conv work_group_reduce_min(int x); +int __ovld __conv work_group_reduce_max(int x); +int __ovld __conv work_group_scan_exclusive_add(int x); +int __ovld __conv work_group_scan_exclusive_min(int x); +int __ovld __conv work_group_scan_exclusive_max(int x); +int __ovld __conv work_group_scan_inclusive_add(int x); +int __ovld __conv work_group_scan_inclusive_min(int x); +int __ovld __conv work_group_scan_inclusive_max(int x); +uint __ovld __conv work_group_reduce_add(uint x); +uint __ovld __conv work_group_reduce_min(uint x); +uint __ovld __conv work_group_reduce_max(uint x); +uint __ovld __conv work_group_scan_exclusive_add(uint x); +uint __ovld __conv work_group_scan_exclusive_min(uint x); +uint __ovld __conv work_group_scan_exclusive_max(uint x); +uint __ovld __conv work_group_scan_inclusive_add(uint x); +uint __ovld __conv work_group_scan_inclusive_min(uint x); +uint __ovld __conv work_group_scan_inclusive_max(uint x); +long __ovld __conv work_group_reduce_add(long x); +long __ovld __conv work_group_reduce_min(long x); +long __ovld __conv work_group_reduce_max(long x); +long __ovld __conv work_group_scan_exclusive_add(long x); +long __ovld __conv work_group_scan_exclusive_min(long x); +long __ovld __conv work_group_scan_exclusive_max(long x); +long __ovld __conv work_group_scan_inclusive_add(long x); +long __ovld __conv work_group_scan_inclusive_min(long x); +long __ovld __conv work_group_scan_inclusive_max(long x); +ulong __ovld __conv work_group_reduce_add(ulong x); +ulong __ovld __conv work_group_reduce_min(ulong x); +ulong __ovld __conv work_group_reduce_max(ulong x); +ulong __ovld __conv work_group_scan_exclusive_add(ulong x); +ulong __ovld __conv work_group_scan_exclusive_min(ulong x); +ulong __ovld __conv work_group_scan_exclusive_max(ulong x); +ulong __ovld __conv work_group_scan_inclusive_add(ulong x); +ulong __ovld __conv work_group_scan_inclusive_min(ulong x); +ulong __ovld __conv work_group_scan_inclusive_max(ulong x); +float __ovld __conv work_group_reduce_add(float x); +float __ovld __conv work_group_reduce_min(float x); +float __ovld __conv work_group_reduce_max(float x); +float __ovld __conv work_group_scan_exclusive_add(float x); +float __ovld __conv work_group_scan_exclusive_min(float x); +float __ovld __conv work_group_scan_exclusive_max(float x); +float __ovld __conv work_group_scan_inclusive_add(float x); +float __ovld __conv work_group_scan_inclusive_min(float x); +float __ovld __conv work_group_scan_inclusive_max(float x); +#ifdef cl_khr_fp64 +double __ovld __conv work_group_reduce_add(double x); +double __ovld __conv work_group_reduce_min(double x); +double __ovld __conv work_group_reduce_max(double x); +double __ovld __conv work_group_scan_exclusive_add(double x); +double __ovld __conv work_group_scan_exclusive_min(double x); +double __ovld __conv work_group_scan_exclusive_max(double x); +double __ovld __conv work_group_scan_inclusive_add(double x); +double __ovld __conv work_group_scan_inclusive_min(double x); +double __ovld __conv work_group_scan_inclusive_max(double x); +#endif //cl_khr_fp64 + +#endif //defined(__opencl_c_work_group_collective_functions) + +// OpenCL v2.0 s6.13.16 - Pipe Functions +#if defined(__opencl_c_pipes) +bool __ovld is_valid_reserve_id(reserve_id_t reserve_id); +#endif //defined(__opencl_c_pipes) + + +// OpenCL v2.0 s6.13.17 - Enqueue Kernels +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +ndrange_t __ovld ndrange_1D(size_t); +ndrange_t __ovld ndrange_1D(size_t, size_t); +ndrange_t __ovld ndrange_1D(size_t, size_t, size_t); + +ndrange_t __ovld ndrange_2D(const size_t[2]); +ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]); +ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]); + +ndrange_t __ovld ndrange_3D(const size_t[3]); +ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]); +ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]); + +int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*); + +void __ovld retain_event(clk_event_t); + +void __ovld release_event(clk_event_t); + +clk_event_t __ovld create_user_event(void); + +void __ovld set_user_event_status(clk_event_t e, int state); + +bool __ovld is_valid_event (clk_event_t event); + +void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value); + +queue_t __ovld get_default_queue(void); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +// OpenCL Extension v2.0 s9.17 - Sub-groups + +#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups) +// Shared Sub Group Functions +uint __ovld get_sub_group_size(void); +uint __ovld get_max_sub_group_size(void); +uint __ovld get_num_sub_groups(void); +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +uint __ovld get_enqueued_num_sub_groups(void); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +uint __ovld get_sub_group_id(void); +uint __ovld get_sub_group_local_id(void); + +void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags); +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope); +#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +int __ovld __conv sub_group_all(int predicate); +int __ovld __conv sub_group_any(int predicate); + +int __ovld __conv sub_group_broadcast(int x, uint sub_group_local_id); +uint __ovld __conv sub_group_broadcast(uint x, uint sub_group_local_id); +long __ovld __conv sub_group_broadcast(long x, uint sub_group_local_id); +ulong __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id); +float __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id); + +int __ovld __conv sub_group_reduce_add(int x); +uint __ovld __conv sub_group_reduce_add(uint x); +long __ovld __conv sub_group_reduce_add(long x); +ulong __ovld __conv sub_group_reduce_add(ulong x); +float __ovld __conv sub_group_reduce_add(float x); +int __ovld __conv sub_group_reduce_min(int x); +uint __ovld __conv sub_group_reduce_min(uint x); +long __ovld __conv sub_group_reduce_min(long x); +ulong __ovld __conv sub_group_reduce_min(ulong x); +float __ovld __conv sub_group_reduce_min(float x); +int __ovld __conv sub_group_reduce_max(int x); +uint __ovld __conv sub_group_reduce_max(uint x); +long __ovld __conv sub_group_reduce_max(long x); +ulong __ovld __conv sub_group_reduce_max(ulong x); +float __ovld __conv sub_group_reduce_max(float x); + +int __ovld __conv sub_group_scan_exclusive_add(int x); +uint __ovld __conv sub_group_scan_exclusive_add(uint x); +long __ovld __conv sub_group_scan_exclusive_add(long x); +ulong __ovld __conv sub_group_scan_exclusive_add(ulong x); +float __ovld __conv sub_group_scan_exclusive_add(float x); +int __ovld __conv sub_group_scan_exclusive_min(int x); +uint __ovld __conv sub_group_scan_exclusive_min(uint x); +long __ovld __conv sub_group_scan_exclusive_min(long x); +ulong __ovld __conv sub_group_scan_exclusive_min(ulong x); +float __ovld __conv sub_group_scan_exclusive_min(float x); +int __ovld __conv sub_group_scan_exclusive_max(int x); +uint __ovld __conv sub_group_scan_exclusive_max(uint x); +long __ovld __conv sub_group_scan_exclusive_max(long x); +ulong __ovld __conv sub_group_scan_exclusive_max(ulong x); +float __ovld __conv sub_group_scan_exclusive_max(float x); + +int __ovld __conv sub_group_scan_inclusive_add(int x); +uint __ovld __conv sub_group_scan_inclusive_add(uint x); +long __ovld __conv sub_group_scan_inclusive_add(long x); +ulong __ovld __conv sub_group_scan_inclusive_add(ulong x); +float __ovld __conv sub_group_scan_inclusive_add(float x); +int __ovld __conv sub_group_scan_inclusive_min(int x); +uint __ovld __conv sub_group_scan_inclusive_min(uint x); +long __ovld __conv sub_group_scan_inclusive_min(long x); +ulong __ovld __conv sub_group_scan_inclusive_min(ulong x); +float __ovld __conv sub_group_scan_inclusive_min(float x); +int __ovld __conv sub_group_scan_inclusive_max(int x); +uint __ovld __conv sub_group_scan_inclusive_max(uint x); +long __ovld __conv sub_group_scan_inclusive_max(long x); +ulong __ovld __conv sub_group_scan_inclusive_max(ulong x); +float __ovld __conv sub_group_scan_inclusive_max(float x); + +#ifdef cl_khr_fp16 +half __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id); +half __ovld __conv sub_group_reduce_add(half x); +half __ovld __conv sub_group_reduce_min(half x); +half __ovld __conv sub_group_reduce_max(half x); +half __ovld __conv sub_group_scan_exclusive_add(half x); +half __ovld __conv sub_group_scan_exclusive_min(half x); +half __ovld __conv sub_group_scan_exclusive_max(half x); +half __ovld __conv sub_group_scan_inclusive_add(half x); +half __ovld __conv sub_group_scan_inclusive_min(half x); +half __ovld __conv sub_group_scan_inclusive_max(half x); +#endif //cl_khr_fp16 + +#ifdef cl_khr_fp64 +double __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id); +double __ovld __conv sub_group_reduce_add(double x); +double __ovld __conv sub_group_reduce_min(double x); +double __ovld __conv sub_group_reduce_max(double x); +double __ovld __conv sub_group_scan_exclusive_add(double x); +double __ovld __conv sub_group_scan_exclusive_min(double x); +double __ovld __conv sub_group_scan_exclusive_max(double x); +double __ovld __conv sub_group_scan_inclusive_add(double x); +double __ovld __conv sub_group_scan_inclusive_min(double x); +double __ovld __conv sub_group_scan_inclusive_max(double x); +#endif //cl_khr_fp64 + +#endif //cl_khr_subgroups cl_intel_subgroups __opencl_c_subgroups + +#if defined(cl_khr_subgroup_extended_types) +char __ovld __conv sub_group_broadcast( char value, uint index ); +char2 __ovld __conv sub_group_broadcast( char2 value, uint index ); +char3 __ovld __conv sub_group_broadcast( char3 value, uint index ); +char4 __ovld __conv sub_group_broadcast( char4 value, uint index ); +char8 __ovld __conv sub_group_broadcast( char8 value, uint index ); +char16 __ovld __conv sub_group_broadcast( char16 value, uint index ); + +uchar __ovld __conv sub_group_broadcast( uchar value, uint index ); +uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index ); +uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index ); +uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index ); +uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index ); +uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index ); + +short __ovld __conv sub_group_broadcast( short value, uint index ); +short2 __ovld __conv sub_group_broadcast( short2 value, uint index ); +short3 __ovld __conv sub_group_broadcast( short3 value, uint index ); +short4 __ovld __conv sub_group_broadcast( short4 value, uint index ); +short8 __ovld __conv sub_group_broadcast( short8 value, uint index ); +short16 __ovld __conv sub_group_broadcast( short16 value, uint index ); + +ushort __ovld __conv sub_group_broadcast( ushort value, uint index ); +ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index ); +ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index ); +ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index ); +ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index ); +ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index ); + +// scalar int broadcast is part of cl_khr_subgroups +int2 __ovld __conv sub_group_broadcast( int2 value, uint index ); +int3 __ovld __conv sub_group_broadcast( int3 value, uint index ); +int4 __ovld __conv sub_group_broadcast( int4 value, uint index ); +int8 __ovld __conv sub_group_broadcast( int8 value, uint index ); +int16 __ovld __conv sub_group_broadcast( int16 value, uint index ); + +// scalar uint broadcast is part of cl_khr_subgroups +uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index ); +uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index ); +uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index ); +uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index ); +uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index ); + +// scalar long broadcast is part of cl_khr_subgroups +long2 __ovld __conv sub_group_broadcast( long2 value, uint index ); +long3 __ovld __conv sub_group_broadcast( long3 value, uint index ); +long4 __ovld __conv sub_group_broadcast( long4 value, uint index ); +long8 __ovld __conv sub_group_broadcast( long8 value, uint index ); +long16 __ovld __conv sub_group_broadcast( long16 value, uint index ); + +// scalar ulong broadcast is part of cl_khr_subgroups +ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index ); +ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index ); +ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index ); +ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index ); +ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index ); + +// scalar float broadcast is part of cl_khr_subgroups +float2 __ovld __conv sub_group_broadcast( float2 value, uint index ); +float3 __ovld __conv sub_group_broadcast( float3 value, uint index ); +float4 __ovld __conv sub_group_broadcast( float4 value, uint index ); +float8 __ovld __conv sub_group_broadcast( float8 value, uint index ); +float16 __ovld __conv sub_group_broadcast( float16 value, uint index ); + +char __ovld __conv sub_group_reduce_add( char value ); +uchar __ovld __conv sub_group_reduce_add( uchar value ); +short __ovld __conv sub_group_reduce_add( short value ); +ushort __ovld __conv sub_group_reduce_add( ushort value ); + +char __ovld __conv sub_group_reduce_min( char value ); +uchar __ovld __conv sub_group_reduce_min( uchar value ); +short __ovld __conv sub_group_reduce_min( short value ); +ushort __ovld __conv sub_group_reduce_min( ushort value ); + +char __ovld __conv sub_group_reduce_max( char value ); +uchar __ovld __conv sub_group_reduce_max( uchar value ); +short __ovld __conv sub_group_reduce_max( short value ); +ushort __ovld __conv sub_group_reduce_max( ushort value ); + +char __ovld __conv sub_group_scan_inclusive_add( char value ); +uchar __ovld __conv sub_group_scan_inclusive_add( uchar value ); +short __ovld __conv sub_group_scan_inclusive_add( short value ); +ushort __ovld __conv sub_group_scan_inclusive_add( ushort value ); + +char __ovld __conv sub_group_scan_inclusive_min( char value ); +uchar __ovld __conv sub_group_scan_inclusive_min( uchar value ); +short __ovld __conv sub_group_scan_inclusive_min( short value ); +ushort __ovld __conv sub_group_scan_inclusive_min( ushort value ); + +char __ovld __conv sub_group_scan_inclusive_max( char value ); +uchar __ovld __conv sub_group_scan_inclusive_max( uchar value ); +short __ovld __conv sub_group_scan_inclusive_max( short value ); +ushort __ovld __conv sub_group_scan_inclusive_max( ushort value ); + +char __ovld __conv sub_group_scan_exclusive_add( char value ); +uchar __ovld __conv sub_group_scan_exclusive_add( uchar value ); +short __ovld __conv sub_group_scan_exclusive_add( short value ); +ushort __ovld __conv sub_group_scan_exclusive_add( ushort value ); + +char __ovld __conv sub_group_scan_exclusive_min( char value ); +uchar __ovld __conv sub_group_scan_exclusive_min( uchar value ); +short __ovld __conv sub_group_scan_exclusive_min( short value ); +ushort __ovld __conv sub_group_scan_exclusive_min( ushort value ); + +char __ovld __conv sub_group_scan_exclusive_max( char value ); +uchar __ovld __conv sub_group_scan_exclusive_max( uchar value ); +short __ovld __conv sub_group_scan_exclusive_max( short value ); +ushort __ovld __conv sub_group_scan_exclusive_max( ushort value ); + +#if defined(cl_khr_fp16) +// scalar half broadcast is part of cl_khr_subgroups +half2 __ovld __conv sub_group_broadcast( half2 value, uint index ); +half3 __ovld __conv sub_group_broadcast( half3 value, uint index ); +half4 __ovld __conv sub_group_broadcast( half4 value, uint index ); +half8 __ovld __conv sub_group_broadcast( half8 value, uint index ); +half16 __ovld __conv sub_group_broadcast( half16 value, uint index ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +// scalar double broadcast is part of cl_khr_subgroups +double2 __ovld __conv sub_group_broadcast( double2 value, uint index ); +double3 __ovld __conv sub_group_broadcast( double3 value, uint index ); +double4 __ovld __conv sub_group_broadcast( double4 value, uint index ); +double8 __ovld __conv sub_group_broadcast( double8 value, uint index ); +double16 __ovld __conv sub_group_broadcast( double16 value, uint index ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_extended_types + +#if defined(cl_khr_subgroup_non_uniform_vote) +int __ovld sub_group_elect(void); +int __ovld sub_group_non_uniform_all( int predicate ); +int __ovld sub_group_non_uniform_any( int predicate ); + +int __ovld sub_group_non_uniform_all_equal( char value ); +int __ovld sub_group_non_uniform_all_equal( uchar value ); +int __ovld sub_group_non_uniform_all_equal( short value ); +int __ovld sub_group_non_uniform_all_equal( ushort value ); +int __ovld sub_group_non_uniform_all_equal( int value ); +int __ovld sub_group_non_uniform_all_equal( uint value ); +int __ovld sub_group_non_uniform_all_equal( long value ); +int __ovld sub_group_non_uniform_all_equal( ulong value ); +int __ovld sub_group_non_uniform_all_equal( float value ); + +#if defined(cl_khr_fp16) +int __ovld sub_group_non_uniform_all_equal( half value ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +int __ovld sub_group_non_uniform_all_equal( double value ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_non_uniform_vote + +#if defined(cl_khr_subgroup_ballot) +char __ovld sub_group_non_uniform_broadcast( char value, uint index ); +char2 __ovld sub_group_non_uniform_broadcast( char2 value, uint index ); +char3 __ovld sub_group_non_uniform_broadcast( char3 value, uint index ); +char4 __ovld sub_group_non_uniform_broadcast( char4 value, uint index ); +char8 __ovld sub_group_non_uniform_broadcast( char8 value, uint index ); +char16 __ovld sub_group_non_uniform_broadcast( char16 value, uint index ); + +uchar __ovld sub_group_non_uniform_broadcast( uchar value, uint index ); +uchar2 __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index ); +uchar3 __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index ); +uchar4 __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index ); +uchar8 __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index ); +uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index ); + +short __ovld sub_group_non_uniform_broadcast( short value, uint index ); +short2 __ovld sub_group_non_uniform_broadcast( short2 value, uint index ); +short3 __ovld sub_group_non_uniform_broadcast( short3 value, uint index ); +short4 __ovld sub_group_non_uniform_broadcast( short4 value, uint index ); +short8 __ovld sub_group_non_uniform_broadcast( short8 value, uint index ); +short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index ); + +ushort __ovld sub_group_non_uniform_broadcast( ushort value, uint index ); +ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index ); +ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index ); +ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index ); +ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index ); +ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index ); + +int __ovld sub_group_non_uniform_broadcast( int value, uint index ); +int2 __ovld sub_group_non_uniform_broadcast( int2 value, uint index ); +int3 __ovld sub_group_non_uniform_broadcast( int3 value, uint index ); +int4 __ovld sub_group_non_uniform_broadcast( int4 value, uint index ); +int8 __ovld sub_group_non_uniform_broadcast( int8 value, uint index ); +int16 __ovld sub_group_non_uniform_broadcast( int16 value, uint index ); + +uint __ovld sub_group_non_uniform_broadcast( uint value, uint index ); +uint2 __ovld sub_group_non_uniform_broadcast( uint2 value, uint index ); +uint3 __ovld sub_group_non_uniform_broadcast( uint3 value, uint index ); +uint4 __ovld sub_group_non_uniform_broadcast( uint4 value, uint index ); +uint8 __ovld sub_group_non_uniform_broadcast( uint8 value, uint index ); +uint16 __ovld sub_group_non_uniform_broadcast( uint16 value, uint index ); + +long __ovld sub_group_non_uniform_broadcast( long value, uint index ); +long2 __ovld sub_group_non_uniform_broadcast( long2 value, uint index ); +long3 __ovld sub_group_non_uniform_broadcast( long3 value, uint index ); +long4 __ovld sub_group_non_uniform_broadcast( long4 value, uint index ); +long8 __ovld sub_group_non_uniform_broadcast( long8 value, uint index ); +long16 __ovld sub_group_non_uniform_broadcast( long16 value, uint index ); + +ulong __ovld sub_group_non_uniform_broadcast( ulong value, uint index ); +ulong2 __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index ); +ulong3 __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index ); +ulong4 __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index ); +ulong8 __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index ); +ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index ); + +float __ovld sub_group_non_uniform_broadcast( float value, uint index ); +float2 __ovld sub_group_non_uniform_broadcast( float2 value, uint index ); +float3 __ovld sub_group_non_uniform_broadcast( float3 value, uint index ); +float4 __ovld sub_group_non_uniform_broadcast( float4 value, uint index ); +float8 __ovld sub_group_non_uniform_broadcast( float8 value, uint index ); +float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index ); + +char __ovld sub_group_broadcast_first( char value ); +uchar __ovld sub_group_broadcast_first( uchar value ); +short __ovld sub_group_broadcast_first( short value ); +ushort __ovld sub_group_broadcast_first( ushort value ); +int __ovld sub_group_broadcast_first( int value ); +uint __ovld sub_group_broadcast_first( uint value ); +long __ovld sub_group_broadcast_first( long value ); +ulong __ovld sub_group_broadcast_first( ulong value ); +float __ovld sub_group_broadcast_first( float value ); + +uint4 __ovld sub_group_ballot( int predicate ); +int __ovld __cnfn sub_group_inverse_ballot( uint4 value ); +int __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index ); +uint __ovld __cnfn sub_group_ballot_bit_count( uint4 value ); + +uint __ovld sub_group_ballot_inclusive_scan( uint4 value ); +uint __ovld sub_group_ballot_exclusive_scan( uint4 value ); +uint __ovld sub_group_ballot_find_lsb( uint4 value ); +uint __ovld sub_group_ballot_find_msb( uint4 value ); + +uint4 __ovld __cnfn get_sub_group_eq_mask(void); +uint4 __ovld __cnfn get_sub_group_ge_mask(void); +uint4 __ovld __cnfn get_sub_group_gt_mask(void); +uint4 __ovld __cnfn get_sub_group_le_mask(void); +uint4 __ovld __cnfn get_sub_group_lt_mask(void); + +#if defined(cl_khr_fp16) +half __ovld sub_group_non_uniform_broadcast( half value, uint index ); +half2 __ovld sub_group_non_uniform_broadcast( half2 value, uint index ); +half3 __ovld sub_group_non_uniform_broadcast( half3 value, uint index ); +half4 __ovld sub_group_non_uniform_broadcast( half4 value, uint index ); +half8 __ovld sub_group_non_uniform_broadcast( half8 value, uint index ); +half16 __ovld sub_group_non_uniform_broadcast( half16 value, uint index ); + +half __ovld sub_group_broadcast_first( half value ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +double __ovld sub_group_non_uniform_broadcast( double value, uint index ); +double2 __ovld sub_group_non_uniform_broadcast( double2 value, uint index ); +double3 __ovld sub_group_non_uniform_broadcast( double3 value, uint index ); +double4 __ovld sub_group_non_uniform_broadcast( double4 value, uint index ); +double8 __ovld sub_group_non_uniform_broadcast( double8 value, uint index ); +double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index ); + +double __ovld sub_group_broadcast_first( double value ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_ballot + +#if defined(cl_khr_subgroup_non_uniform_arithmetic) +char __ovld sub_group_non_uniform_reduce_add( char value ); +uchar __ovld sub_group_non_uniform_reduce_add( uchar value ); +short __ovld sub_group_non_uniform_reduce_add( short value ); +ushort __ovld sub_group_non_uniform_reduce_add( ushort value ); +int __ovld sub_group_non_uniform_reduce_add( int value ); +uint __ovld sub_group_non_uniform_reduce_add( uint value ); +long __ovld sub_group_non_uniform_reduce_add( long value ); +ulong __ovld sub_group_non_uniform_reduce_add( ulong value ); +float __ovld sub_group_non_uniform_reduce_add( float value ); + +char __ovld sub_group_non_uniform_reduce_mul( char value ); +uchar __ovld sub_group_non_uniform_reduce_mul( uchar value ); +short __ovld sub_group_non_uniform_reduce_mul( short value ); +ushort __ovld sub_group_non_uniform_reduce_mul( ushort value ); +int __ovld sub_group_non_uniform_reduce_mul( int value ); +uint __ovld sub_group_non_uniform_reduce_mul( uint value ); +long __ovld sub_group_non_uniform_reduce_mul( long value ); +ulong __ovld sub_group_non_uniform_reduce_mul( ulong value ); +float __ovld sub_group_non_uniform_reduce_mul( float value ); + +char __ovld sub_group_non_uniform_reduce_min( char value ); +uchar __ovld sub_group_non_uniform_reduce_min( uchar value ); +short __ovld sub_group_non_uniform_reduce_min( short value ); +ushort __ovld sub_group_non_uniform_reduce_min( ushort value ); +int __ovld sub_group_non_uniform_reduce_min( int value ); +uint __ovld sub_group_non_uniform_reduce_min( uint value ); +long __ovld sub_group_non_uniform_reduce_min( long value ); +ulong __ovld sub_group_non_uniform_reduce_min( ulong value ); +float __ovld sub_group_non_uniform_reduce_min( float value ); + +char __ovld sub_group_non_uniform_reduce_max( char value ); +uchar __ovld sub_group_non_uniform_reduce_max( uchar value ); +short __ovld sub_group_non_uniform_reduce_max( short value ); +ushort __ovld sub_group_non_uniform_reduce_max( ushort value ); +int __ovld sub_group_non_uniform_reduce_max( int value ); +uint __ovld sub_group_non_uniform_reduce_max( uint value ); +long __ovld sub_group_non_uniform_reduce_max( long value ); +ulong __ovld sub_group_non_uniform_reduce_max( ulong value ); +float __ovld sub_group_non_uniform_reduce_max( float value ); + +char __ovld sub_group_non_uniform_scan_inclusive_add( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_add( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_add( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_add( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_add( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_add( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_add( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_add( ulong value ); +float __ovld sub_group_non_uniform_scan_inclusive_add( float value ); + +char __ovld sub_group_non_uniform_scan_inclusive_mul( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_mul( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_mul( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_mul( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_mul( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value ); +float __ovld sub_group_non_uniform_scan_inclusive_mul( float value ); + +char __ovld sub_group_non_uniform_scan_inclusive_min( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_min( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_min( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_min( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_min( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_min( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_min( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_min( ulong value ); +float __ovld sub_group_non_uniform_scan_inclusive_min( float value ); + +char __ovld sub_group_non_uniform_scan_inclusive_max( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_max( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_max( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_max( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_max( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_max( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_max( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_max( ulong value ); +float __ovld sub_group_non_uniform_scan_inclusive_max( float value ); + +char __ovld sub_group_non_uniform_scan_exclusive_add( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_add( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_add( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_add( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_add( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_add( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_add( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_add( ulong value ); +float __ovld sub_group_non_uniform_scan_exclusive_add( float value ); + +char __ovld sub_group_non_uniform_scan_exclusive_mul( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_mul( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_mul( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_mul( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_mul( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value ); +float __ovld sub_group_non_uniform_scan_exclusive_mul( float value ); + +char __ovld sub_group_non_uniform_scan_exclusive_min( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_min( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_min( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_min( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_min( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_min( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_min( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_min( ulong value ); +float __ovld sub_group_non_uniform_scan_exclusive_min( float value ); + +char __ovld sub_group_non_uniform_scan_exclusive_max( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_max( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_max( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_max( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_max( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_max( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_max( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_max( ulong value ); +float __ovld sub_group_non_uniform_scan_exclusive_max( float value ); + +char __ovld sub_group_non_uniform_reduce_and( char value ); +uchar __ovld sub_group_non_uniform_reduce_and( uchar value ); +short __ovld sub_group_non_uniform_reduce_and( short value ); +ushort __ovld sub_group_non_uniform_reduce_and( ushort value ); +int __ovld sub_group_non_uniform_reduce_and( int value ); +uint __ovld sub_group_non_uniform_reduce_and( uint value ); +long __ovld sub_group_non_uniform_reduce_and( long value ); +ulong __ovld sub_group_non_uniform_reduce_and( ulong value ); + +char __ovld sub_group_non_uniform_reduce_or( char value ); +uchar __ovld sub_group_non_uniform_reduce_or( uchar value ); +short __ovld sub_group_non_uniform_reduce_or( short value ); +ushort __ovld sub_group_non_uniform_reduce_or( ushort value ); +int __ovld sub_group_non_uniform_reduce_or( int value ); +uint __ovld sub_group_non_uniform_reduce_or( uint value ); +long __ovld sub_group_non_uniform_reduce_or( long value ); +ulong __ovld sub_group_non_uniform_reduce_or( ulong value ); + +char __ovld sub_group_non_uniform_reduce_xor( char value ); +uchar __ovld sub_group_non_uniform_reduce_xor( uchar value ); +short __ovld sub_group_non_uniform_reduce_xor( short value ); +ushort __ovld sub_group_non_uniform_reduce_xor( ushort value ); +int __ovld sub_group_non_uniform_reduce_xor( int value ); +uint __ovld sub_group_non_uniform_reduce_xor( uint value ); +long __ovld sub_group_non_uniform_reduce_xor( long value ); +ulong __ovld sub_group_non_uniform_reduce_xor( ulong value ); + +char __ovld sub_group_non_uniform_scan_inclusive_and( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_and( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_and( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_and( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_and( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_and( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_and( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_and( ulong value ); + +char __ovld sub_group_non_uniform_scan_inclusive_or( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_or( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_or( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_or( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_or( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_or( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_or( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_or( ulong value ); + +char __ovld sub_group_non_uniform_scan_inclusive_xor( char value ); +uchar __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value ); +short __ovld sub_group_non_uniform_scan_inclusive_xor( short value ); +ushort __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value ); +int __ovld sub_group_non_uniform_scan_inclusive_xor( int value ); +uint __ovld sub_group_non_uniform_scan_inclusive_xor( uint value ); +long __ovld sub_group_non_uniform_scan_inclusive_xor( long value ); +ulong __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value ); + +char __ovld sub_group_non_uniform_scan_exclusive_and( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_and( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_and( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_and( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_and( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_and( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_and( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_and( ulong value ); + +char __ovld sub_group_non_uniform_scan_exclusive_or( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_or( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_or( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_or( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_or( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_or( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_or( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_or( ulong value ); + +char __ovld sub_group_non_uniform_scan_exclusive_xor( char value ); +uchar __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value ); +short __ovld sub_group_non_uniform_scan_exclusive_xor( short value ); +ushort __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value ); +int __ovld sub_group_non_uniform_scan_exclusive_xor( int value ); +uint __ovld sub_group_non_uniform_scan_exclusive_xor( uint value ); +long __ovld sub_group_non_uniform_scan_exclusive_xor( long value ); +ulong __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value ); + +int __ovld sub_group_non_uniform_reduce_logical_and( int predicate ); +int __ovld sub_group_non_uniform_reduce_logical_or( int predicate ); +int __ovld sub_group_non_uniform_reduce_logical_xor( int predicate ); + +int __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate ); +int __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate ); +int __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate ); + +int __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate ); +int __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate ); +int __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate ); + +#if defined(cl_khr_fp16) +half __ovld sub_group_non_uniform_reduce_add( half value ); +half __ovld sub_group_non_uniform_reduce_mul( half value ); +half __ovld sub_group_non_uniform_reduce_min( half value ); +half __ovld sub_group_non_uniform_reduce_max( half value ); +half __ovld sub_group_non_uniform_scan_inclusive_add( half value ); +half __ovld sub_group_non_uniform_scan_inclusive_mul( half value ); +half __ovld sub_group_non_uniform_scan_inclusive_min( half value ); +half __ovld sub_group_non_uniform_scan_inclusive_max( half value ); +half __ovld sub_group_non_uniform_scan_exclusive_add( half value ); +half __ovld sub_group_non_uniform_scan_exclusive_mul( half value ); +half __ovld sub_group_non_uniform_scan_exclusive_min( half value ); +half __ovld sub_group_non_uniform_scan_exclusive_max( half value ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +double __ovld sub_group_non_uniform_reduce_add( double value ); +double __ovld sub_group_non_uniform_reduce_mul( double value ); +double __ovld sub_group_non_uniform_reduce_min( double value ); +double __ovld sub_group_non_uniform_reduce_max( double value ); +double __ovld sub_group_non_uniform_scan_inclusive_add( double value ); +double __ovld sub_group_non_uniform_scan_inclusive_mul( double value ); +double __ovld sub_group_non_uniform_scan_inclusive_min( double value ); +double __ovld sub_group_non_uniform_scan_inclusive_max( double value ); +double __ovld sub_group_non_uniform_scan_exclusive_add( double value ); +double __ovld sub_group_non_uniform_scan_exclusive_mul( double value ); +double __ovld sub_group_non_uniform_scan_exclusive_min( double value ); +double __ovld sub_group_non_uniform_scan_exclusive_max( double value ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_non_uniform_arithmetic + +#if defined(cl_khr_subgroup_shuffle) +char __ovld sub_group_shuffle( char value, uint index ); +uchar __ovld sub_group_shuffle( uchar value, uint index ); +short __ovld sub_group_shuffle( short value, uint index ); +ushort __ovld sub_group_shuffle( ushort value, uint index ); +int __ovld sub_group_shuffle( int value, uint index ); +uint __ovld sub_group_shuffle( uint value, uint index ); +long __ovld sub_group_shuffle( long value, uint index ); +ulong __ovld sub_group_shuffle( ulong value, uint index ); +float __ovld sub_group_shuffle( float value, uint index ); + +char __ovld sub_group_shuffle_xor( char value, uint mask ); +uchar __ovld sub_group_shuffle_xor( uchar value, uint mask ); +short __ovld sub_group_shuffle_xor( short value, uint mask ); +ushort __ovld sub_group_shuffle_xor( ushort value, uint mask ); +int __ovld sub_group_shuffle_xor( int value, uint mask ); +uint __ovld sub_group_shuffle_xor( uint value, uint mask ); +long __ovld sub_group_shuffle_xor( long value, uint mask ); +ulong __ovld sub_group_shuffle_xor( ulong value, uint mask ); +float __ovld sub_group_shuffle_xor( float value, uint mask ); + +#if defined(cl_khr_fp16) +half __ovld sub_group_shuffle( half value, uint index ); +half __ovld sub_group_shuffle_xor( half value, uint mask ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +double __ovld sub_group_shuffle( double value, uint index ); +double __ovld sub_group_shuffle_xor( double value, uint mask ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_shuffle + +#if defined(cl_khr_subgroup_shuffle_relative) +char __ovld sub_group_shuffle_up( char value, uint delta ); +uchar __ovld sub_group_shuffle_up( uchar value, uint delta ); +short __ovld sub_group_shuffle_up( short value, uint delta ); +ushort __ovld sub_group_shuffle_up( ushort value, uint delta ); +int __ovld sub_group_shuffle_up( int value, uint delta ); +uint __ovld sub_group_shuffle_up( uint value, uint delta ); +long __ovld sub_group_shuffle_up( long value, uint delta ); +ulong __ovld sub_group_shuffle_up( ulong value, uint delta ); +float __ovld sub_group_shuffle_up( float value, uint delta ); + +char __ovld sub_group_shuffle_down( char value, uint delta ); +uchar __ovld sub_group_shuffle_down( uchar value, uint delta ); +short __ovld sub_group_shuffle_down( short value, uint delta ); +ushort __ovld sub_group_shuffle_down( ushort value, uint delta ); +int __ovld sub_group_shuffle_down( int value, uint delta ); +uint __ovld sub_group_shuffle_down( uint value, uint delta ); +long __ovld sub_group_shuffle_down( long value, uint delta ); +ulong __ovld sub_group_shuffle_down( ulong value, uint delta ); +float __ovld sub_group_shuffle_down( float value, uint delta ); + +#if defined(cl_khr_fp16) +half __ovld sub_group_shuffle_up( half value, uint delta ); +half __ovld sub_group_shuffle_down( half value, uint delta ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +double __ovld sub_group_shuffle_up( double value, uint delta ); +double __ovld sub_group_shuffle_down( double value, uint delta ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_shuffle_relative + +#if defined(cl_khr_subgroup_clustered_reduce) +char __ovld sub_group_clustered_reduce_add( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_add( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_add( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_add( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_add( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize ); +float __ovld sub_group_clustered_reduce_add( float value, uint clustersize ); + +char __ovld sub_group_clustered_reduce_mul( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_mul( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_mul( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_mul( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize ); +float __ovld sub_group_clustered_reduce_mul( float value, uint clustersize ); + +char __ovld sub_group_clustered_reduce_min( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_min( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_min( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_min( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_min( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize ); +float __ovld sub_group_clustered_reduce_min( float value, uint clustersize ); + +char __ovld sub_group_clustered_reduce_max( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_max( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_max( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_max( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_max( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize ); +float __ovld sub_group_clustered_reduce_max( float value, uint clustersize ); + +char __ovld sub_group_clustered_reduce_and( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_and( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_and( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_and( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_and( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize ); + +char __ovld sub_group_clustered_reduce_or( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_or( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_or( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_or( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_or( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize ); + +char __ovld sub_group_clustered_reduce_xor( char value, uint clustersize ); +uchar __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize ); +short __ovld sub_group_clustered_reduce_xor( short value, uint clustersize ); +ushort __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize ); +int __ovld sub_group_clustered_reduce_xor( int value, uint clustersize ); +uint __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize ); +long __ovld sub_group_clustered_reduce_xor( long value, uint clustersize ); +ulong __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize ); + +int __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize ); +int __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize ); +int __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize ); + +#if defined(cl_khr_fp16) +half __ovld sub_group_clustered_reduce_add( half value, uint clustersize ); +half __ovld sub_group_clustered_reduce_mul( half value, uint clustersize ); +half __ovld sub_group_clustered_reduce_min( half value, uint clustersize ); +half __ovld sub_group_clustered_reduce_max( half value, uint clustersize ); +#endif // cl_khr_fp16 + +#if defined(cl_khr_fp64) +double __ovld sub_group_clustered_reduce_add( double value, uint clustersize ); +double __ovld sub_group_clustered_reduce_mul( double value, uint clustersize ); +double __ovld sub_group_clustered_reduce_min( double value, uint clustersize ); +double __ovld sub_group_clustered_reduce_max( double value, uint clustersize ); +#endif // cl_khr_fp64 + +#endif // cl_khr_subgroup_clustered_reduce + +#if defined(cl_khr_extended_bit_ops) +char __ovld __cnfn bitfield_insert(char, char, uint, uint); +uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint); +short __ovld __cnfn bitfield_insert(short, short, uint, uint); +ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint); +int __ovld __cnfn bitfield_insert(int, int, uint, uint); +uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint); +long __ovld __cnfn bitfield_insert(long, long, uint, uint); +ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint); +char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint); +uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint); +short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint); +ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint); +int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint); +uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint); +long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint); +ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint); +char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint); +uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint); +short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint); +ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint); +int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint); +uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint); +long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint); +ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint); +char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint); +uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint); +short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint); +ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint); +int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint); +uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint); +long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint); +ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint); +char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint); +uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint); +short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint); +ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint); +int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint); +uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint); +long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint); +ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint); +char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint); +uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint); +short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint); +ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint); +int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint); +uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint); +long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint); +ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint); + +char __ovld __cnfn bitfield_extract_signed(char, uint, uint); +short __ovld __cnfn bitfield_extract_signed(short, uint, uint); +int __ovld __cnfn bitfield_extract_signed(int, uint, uint); +long __ovld __cnfn bitfield_extract_signed(long, uint, uint); +char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint); +short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint); +int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint); +long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint); +char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint); +short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint); +int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint); +long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint); +char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint); +short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint); +int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint); +long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint); +char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint); +short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint); +int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint); +long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint); +char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint); +short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint); +int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint); +long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint); + +char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint); +short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint); +int __ovld __cnfn bitfield_extract_signed(uint, uint, uint); +long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint); +char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint); +short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint); +int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint); +long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint); +char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint); +short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint); +int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint); +long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint); +char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint); +short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint); +int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint); +long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint); +char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint); +short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint); +int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint); +long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint); +char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint); +short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint); +int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint); +long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint); + +uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint); +ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint); +uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint); +ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint); +uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint); +ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint); +uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint); +ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint); +uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint); +ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint); +uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint); +ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint); +uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint); +ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint); +uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint); +ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint); +uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint); +ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint); +uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint); +ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint); +uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint); +ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint); +uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint); +ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint); + +uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint); +ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint); +uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint); +ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint); +uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint); +ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint); +uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint); +ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint); +uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint); +ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint); +uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint); +ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint); +uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint); +ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint); +uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint); +ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint); +uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint); +ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint); +uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint); +ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint); +uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint); +ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint); +uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint); +ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint); + +char __ovld __cnfn bit_reverse(char); +uchar __ovld __cnfn bit_reverse(uchar); +short __ovld __cnfn bit_reverse(short); +ushort __ovld __cnfn bit_reverse(ushort); +int __ovld __cnfn bit_reverse(int); +uint __ovld __cnfn bit_reverse(uint); +long __ovld __cnfn bit_reverse(long); +ulong __ovld __cnfn bit_reverse(ulong); +char2 __ovld __cnfn bit_reverse(char2); +uchar2 __ovld __cnfn bit_reverse(uchar2); +short2 __ovld __cnfn bit_reverse(short2); +ushort2 __ovld __cnfn bit_reverse(ushort2); +int2 __ovld __cnfn bit_reverse(int2); +uint2 __ovld __cnfn bit_reverse(uint2); +long2 __ovld __cnfn bit_reverse(long2); +ulong2 __ovld __cnfn bit_reverse(ulong2); +char3 __ovld __cnfn bit_reverse(char3); +uchar3 __ovld __cnfn bit_reverse(uchar3); +short3 __ovld __cnfn bit_reverse(short3); +ushort3 __ovld __cnfn bit_reverse(ushort3); +int3 __ovld __cnfn bit_reverse(int3); +uint3 __ovld __cnfn bit_reverse(uint3); +long3 __ovld __cnfn bit_reverse(long3); +ulong3 __ovld __cnfn bit_reverse(ulong3); +char4 __ovld __cnfn bit_reverse(char4); +uchar4 __ovld __cnfn bit_reverse(uchar4); +short4 __ovld __cnfn bit_reverse(short4); +ushort4 __ovld __cnfn bit_reverse(ushort4); +int4 __ovld __cnfn bit_reverse(int4); +uint4 __ovld __cnfn bit_reverse(uint4); +long4 __ovld __cnfn bit_reverse(long4); +ulong4 __ovld __cnfn bit_reverse(ulong4); +char8 __ovld __cnfn bit_reverse(char8); +uchar8 __ovld __cnfn bit_reverse(uchar8); +short8 __ovld __cnfn bit_reverse(short8); +ushort8 __ovld __cnfn bit_reverse(ushort8); +int8 __ovld __cnfn bit_reverse(int8); +uint8 __ovld __cnfn bit_reverse(uint8); +long8 __ovld __cnfn bit_reverse(long8); +ulong8 __ovld __cnfn bit_reverse(ulong8); +char16 __ovld __cnfn bit_reverse(char16); +uchar16 __ovld __cnfn bit_reverse(uchar16); +short16 __ovld __cnfn bit_reverse(short16); +ushort16 __ovld __cnfn bit_reverse(ushort16); +int16 __ovld __cnfn bit_reverse(int16); +uint16 __ovld __cnfn bit_reverse(uint16); +long16 __ovld __cnfn bit_reverse(long16); +ulong16 __ovld __cnfn bit_reverse(ulong16); +#endif // cl_khr_extended_bit_ops + +#if defined(__opencl_c_integer_dot_product_input_4x8bit) +uint __ovld __cnfn dot(uchar4, uchar4); +int __ovld __cnfn dot(char4, char4); +int __ovld __cnfn dot(uchar4, char4); +int __ovld __cnfn dot(char4, uchar4); + +uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint); +int __ovld __cnfn dot_acc_sat(char4, char4, int); +int __ovld __cnfn dot_acc_sat(uchar4, char4, int); +int __ovld __cnfn dot_acc_sat(char4, uchar4, int); +#endif // __opencl_c_integer_dot_product_input_4x8bit + +#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed) +uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint); +int __ovld __cnfn dot_4x8packed_ss_int(uint, uint); +int __ovld __cnfn dot_4x8packed_us_int(uint, uint); +int __ovld __cnfn dot_4x8packed_su_int(uint, uint); + +uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint); +int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int); +int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int); +int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int); +#endif // __opencl_c_integer_dot_product_input_4x8bit_packed + +#if defined(cl_intel_subgroups) +// Intel-Specific Sub Group Functions +float __ovld __conv intel_sub_group_shuffle( float x, uint c ); +float2 __ovld __conv intel_sub_group_shuffle( float2 x, uint c ); +float3 __ovld __conv intel_sub_group_shuffle( float3 x, uint c ); +float4 __ovld __conv intel_sub_group_shuffle( float4 x, uint c ); +float8 __ovld __conv intel_sub_group_shuffle( float8 x, uint c ); +float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c ); + +int __ovld __conv intel_sub_group_shuffle( int x, uint c ); +int2 __ovld __conv intel_sub_group_shuffle( int2 x, uint c ); +int3 __ovld __conv intel_sub_group_shuffle( int3 x, uint c ); +int4 __ovld __conv intel_sub_group_shuffle( int4 x, uint c ); +int8 __ovld __conv intel_sub_group_shuffle( int8 x, uint c ); +int16 __ovld __conv intel_sub_group_shuffle( int16 x, uint c ); + +uint __ovld __conv intel_sub_group_shuffle( uint x, uint c ); +uint2 __ovld __conv intel_sub_group_shuffle( uint2 x, uint c ); +uint3 __ovld __conv intel_sub_group_shuffle( uint3 x, uint c ); +uint4 __ovld __conv intel_sub_group_shuffle( uint4 x, uint c ); +uint8 __ovld __conv intel_sub_group_shuffle( uint8 x, uint c ); +uint16 __ovld __conv intel_sub_group_shuffle( uint16 x, uint c ); + +long __ovld __conv intel_sub_group_shuffle( long x, uint c ); +ulong __ovld __conv intel_sub_group_shuffle( ulong x, uint c ); + +float __ovld __conv intel_sub_group_shuffle_down( float cur, float next, uint c ); +float2 __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c ); +float3 __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c ); +float4 __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c ); +float8 __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c ); +float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c ); + +int __ovld __conv intel_sub_group_shuffle_down( int cur, int next, uint c ); +int2 __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c ); +int3 __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c ); +int4 __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c ); +int8 __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c ); +int16 __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c ); + +uint __ovld __conv intel_sub_group_shuffle_down( uint cur, uint next, uint c ); +uint2 __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c ); +uint3 __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c ); +uint4 __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c ); +uint8 __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c ); +uint16 __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c ); + +long __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c ); +ulong __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c ); + +float __ovld __conv intel_sub_group_shuffle_up( float prev, float cur, uint c ); +float2 __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c ); +float3 __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c ); +float4 __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c ); +float8 __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c ); +float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c ); + +int __ovld __conv intel_sub_group_shuffle_up( int prev, int cur, uint c ); +int2 __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c ); +int3 __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c ); +int4 __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c ); +int8 __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c ); +int16 __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c ); + +uint __ovld __conv intel_sub_group_shuffle_up( uint prev, uint cur, uint c ); +uint2 __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c ); +uint3 __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c ); +uint4 __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c ); +uint8 __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c ); +uint16 __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c ); + +long __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c ); +ulong __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c ); + +float __ovld __conv intel_sub_group_shuffle_xor( float x, uint c ); +float2 __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c ); +float3 __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c ); +float4 __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c ); +float8 __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c ); +float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c ); + +int __ovld __conv intel_sub_group_shuffle_xor( int x, uint c ); +int2 __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c ); +int3 __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c ); +int4 __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c ); +int8 __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c ); +int16 __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c ); + +uint __ovld __conv intel_sub_group_shuffle_xor( uint x, uint c ); +uint2 __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c ); +uint3 __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c ); +uint4 __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c ); +uint8 __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c ); +uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c ); + +long __ovld __conv intel_sub_group_shuffle_xor( long x, uint c ); +ulong __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c ); + +uint __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord ); +uint2 __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord ); +uint4 __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord ); +uint8 __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord ); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord); +uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord); +uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord); +uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +uint __ovld __conv intel_sub_group_block_read( const __global uint* p ); +uint2 __ovld __conv intel_sub_group_block_read2( const __global uint* p ); +uint4 __ovld __conv intel_sub_group_block_read4( const __global uint* p ); +uint8 __ovld __conv intel_sub_group_block_read8( const __global uint* p ); + +void __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data); +void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data); +void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data); +void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data); +void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data); +void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data); +void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +void __ovld __conv intel_sub_group_block_write( __global uint* p, uint data ); +void __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data ); +void __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data ); +void __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data ); + +#ifdef cl_khr_fp16 +half __ovld __conv intel_sub_group_shuffle( half x, uint c ); +half __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c ); +half __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c ); +half __ovld __conv intel_sub_group_shuffle_xor( half x, uint c ); +#endif + +#if defined(cl_khr_fp64) +double __ovld __conv intel_sub_group_shuffle( double x, uint c ); +double __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c ); +double __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c ); +double __ovld __conv intel_sub_group_shuffle_xor( double x, uint c ); +#endif + +#endif //cl_intel_subgroups + +#if defined(cl_intel_subgroups_short) +short __ovld __conv intel_sub_group_broadcast( short x, uint sub_group_local_id ); +short2 __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id ); +short3 __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id ); +short4 __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id ); +short8 __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id ); + +ushort __ovld __conv intel_sub_group_broadcast( ushort x, uint sub_group_local_id ); +ushort2 __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id ); +ushort3 __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id ); +ushort4 __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id ); +ushort8 __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id ); + +short __ovld __conv intel_sub_group_shuffle( short x, uint c ); +short2 __ovld __conv intel_sub_group_shuffle( short2 x, uint c ); +short3 __ovld __conv intel_sub_group_shuffle( short3 x, uint c ); +short4 __ovld __conv intel_sub_group_shuffle( short4 x, uint c ); +short8 __ovld __conv intel_sub_group_shuffle( short8 x, uint c ); +short16 __ovld __conv intel_sub_group_shuffle( short16 x, uint c); + +ushort __ovld __conv intel_sub_group_shuffle( ushort x, uint c ); +ushort2 __ovld __conv intel_sub_group_shuffle( ushort2 x, uint c ); +ushort3 __ovld __conv intel_sub_group_shuffle( ushort3 x, uint c ); +ushort4 __ovld __conv intel_sub_group_shuffle( ushort4 x, uint c ); +ushort8 __ovld __conv intel_sub_group_shuffle( ushort8 x, uint c ); +ushort16 __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c ); + +short __ovld __conv intel_sub_group_shuffle_down( short cur, short next, uint c ); +short2 __ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next, uint c ); +short3 __ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next, uint c ); +short4 __ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next, uint c ); +short8 __ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next, uint c ); +short16 __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c ); + +ushort __ovld __conv intel_sub_group_shuffle_down( ushort cur, ushort next, uint c ); +ushort2 __ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next, uint c ); +ushort3 __ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next, uint c ); +ushort4 __ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next, uint c ); +ushort8 __ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next, uint c ); +ushort16 __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c ); + +short __ovld __conv intel_sub_group_shuffle_up( short cur, short next, uint c ); +short2 __ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next, uint c ); +short3 __ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next, uint c ); +short4 __ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next, uint c ); +short8 __ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next, uint c ); +short16 __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c ); + +ushort __ovld __conv intel_sub_group_shuffle_up( ushort cur, ushort next, uint c ); +ushort2 __ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next, uint c ); +ushort3 __ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next, uint c ); +ushort4 __ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next, uint c ); +ushort8 __ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next, uint c ); +ushort16 __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c ); + +short __ovld __conv intel_sub_group_shuffle_xor( short x, uint c ); +short2 __ovld __conv intel_sub_group_shuffle_xor( short2 x, uint c ); +short3 __ovld __conv intel_sub_group_shuffle_xor( short3 x, uint c ); +short4 __ovld __conv intel_sub_group_shuffle_xor( short4 x, uint c ); +short8 __ovld __conv intel_sub_group_shuffle_xor( short8 x, uint c ); +short16 __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c ); + +ushort __ovld __conv intel_sub_group_shuffle_xor( ushort x, uint c ); +ushort2 __ovld __conv intel_sub_group_shuffle_xor( ushort2 x, uint c ); +ushort3 __ovld __conv intel_sub_group_shuffle_xor( ushort3 x, uint c ); +ushort4 __ovld __conv intel_sub_group_shuffle_xor( ushort4 x, uint c ); +ushort8 __ovld __conv intel_sub_group_shuffle_xor( ushort8 x, uint c ); +ushort16 __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c ); + +short __ovld __conv intel_sub_group_reduce_add( short x ); +ushort __ovld __conv intel_sub_group_reduce_add( ushort x ); +short __ovld __conv intel_sub_group_reduce_min( short x ); +ushort __ovld __conv intel_sub_group_reduce_min( ushort x ); +short __ovld __conv intel_sub_group_reduce_max( short x ); +ushort __ovld __conv intel_sub_group_reduce_max( ushort x ); + +short __ovld __conv intel_sub_group_scan_exclusive_add( short x ); +ushort __ovld __conv intel_sub_group_scan_exclusive_add( ushort x ); +short __ovld __conv intel_sub_group_scan_exclusive_min( short x ); +ushort __ovld __conv intel_sub_group_scan_exclusive_min( ushort x ); +short __ovld __conv intel_sub_group_scan_exclusive_max( short x ); +ushort __ovld __conv intel_sub_group_scan_exclusive_max( ushort x ); + +short __ovld __conv intel_sub_group_scan_inclusive_add( short x ); +ushort __ovld __conv intel_sub_group_scan_inclusive_add( ushort x ); +short __ovld __conv intel_sub_group_scan_inclusive_min( short x ); +ushort __ovld __conv intel_sub_group_scan_inclusive_min( ushort x ); +short __ovld __conv intel_sub_group_scan_inclusive_max( short x ); +ushort __ovld __conv intel_sub_group_scan_inclusive_max( ushort x ); + +uint __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord ); +uint2 __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord ); +uint4 __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord ); +uint8 __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord ); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord ); +uint2 __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord ); +uint4 __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord ); +uint8 __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord ); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +uint __ovld __conv intel_sub_group_block_read_ui( const __global uint* p ); +uint2 __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p ); +uint4 __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p ); +uint8 __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p ); + +void __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data ); +void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data ); +void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data ); +void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data ); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data ); +void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data ); +void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data ); +void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data ); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +void __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data ); +void __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data ); +void __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data ); +void __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data ); + +ushort __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord ); +ushort2 __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord ); +ushort4 __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord ); +ushort8 __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord ); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord); +ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord); +ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord); +ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +ushort __ovld __conv intel_sub_group_block_read_us( const __global ushort* p ); +ushort2 __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p ); +ushort4 __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p ); +ushort8 __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p ); + +void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort data); +void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data); +void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data); +void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data); + +#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) +void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort data); +void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data); +void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data); +void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data); +#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) + +void __ovld __conv intel_sub_group_block_write_us( __global ushort* p, ushort data ); +void __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data ); +void __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data ); +void __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data ); +#endif // cl_intel_subgroups_short + +#ifdef cl_intel_device_side_avc_motion_estimation +#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin + +// MCE built-in functions +uchar __ovld +intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty( + uchar slice_type, uchar qp); +ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty( + uchar slice_type, uchar qp); +uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty( + uchar slice_type, uchar qp); +uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty( + uchar slice_type, uchar qp); +uint2 __ovld +intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table( + uchar slice_type, uchar qp); +uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty( + uchar slice_type, uchar qp); + +uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table(); +uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table(); +uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table(); +uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty(); +uchar __ovld +intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty(); + +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty( + uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_inter_shape_penalty( + ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_inter_direction_penalty( + uchar direction_cost, intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_motion_vector_cost_function( + ulong packed_cost_center_delta, uint2 packed_cost_table, + uchar cost_precision, intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_ac_only_haar( + intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_source_interlaced_field_polarity( + uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity( + uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities( + uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity, + intel_sub_group_avc_mce_payload_t payload); + +ulong __ovld intel_sub_group_avc_mce_get_motion_vectors( + intel_sub_group_avc_mce_result_t result); +ushort __ovld intel_sub_group_avc_mce_get_inter_distortions( + intel_sub_group_avc_mce_result_t result); +ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion( + intel_sub_group_avc_mce_result_t result); +uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape( + intel_sub_group_avc_mce_result_t result); +uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes( + intel_sub_group_avc_mce_result_t result); +uchar __ovld intel_sub_group_avc_mce_get_inter_directions( + intel_sub_group_avc_mce_result_t result); +uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count( + intel_sub_group_avc_mce_result_t result); +uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids( + intel_sub_group_avc_mce_result_t result); +uchar __ovld +intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities( + uint packed_reference_ids, uint packed_reference_parameter_field_polarities, + intel_sub_group_avc_mce_result_t result); + +// IME built-in functions +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_initialize( + ushort2 src_coord, uchar partition_mask, uchar sad_adjustment); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_single_reference( + short2 ref_offset, uchar search_window_config, + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_dual_reference( + short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config, + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_max_motion_vector_count( + uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_unidirectional_mix_disable( + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_early_search_termination_threshold( + uchar threshold, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_weighted_sad( + uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload); + +__attribute__((deprecated("If you use the latest Intel driver, please use " + "intel_sub_group_avc_ime_ref_window_size instead", + "intel_sub_group_avc_ime_ref_window_size"))) +ushort2 __ovld +intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref); +ushort2 __ovld intel_sub_group_avc_ime_ref_window_size( + uchar search_window_config, char dual_ref); +short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset( + short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size, + ushort2 image_size); + +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_ime_evaluate_with_single_reference( + read_only image2d_t src_image, read_only image2d_t ref_image, + sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_ime_evaluate_with_dual_reference( + read_only image2d_t src_image, read_only image2d_t fwd_ref_image, + read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler, + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld +intel_sub_group_avc_ime_evaluate_with_single_reference_streamout( + read_only image2d_t src_image, read_only image2d_t ref_image, + sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld +intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout( + read_only image2d_t src_image, read_only image2d_t fwd_ref_image, + read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler, + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_ime_evaluate_with_single_reference_streamin( + read_only image2d_t src_image, read_only image2d_t ref_image, + sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload, + intel_sub_group_avc_ime_single_reference_streamin_t streamin_components); +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin( + read_only image2d_t src_image, read_only image2d_t fwd_ref_image, + read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler, + intel_sub_group_avc_ime_payload_t payload, + intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components); +intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld +intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout( + read_only image2d_t src_image, read_only image2d_t ref_image, + sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload, + intel_sub_group_avc_ime_single_reference_streamin_t streamin_components); +intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld +intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout( + read_only image2d_t src_image, read_only image2d_t fwd_ref_image, + read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler, + intel_sub_group_avc_ime_payload_t payload, + intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components); + +intel_sub_group_avc_ime_single_reference_streamin_t __ovld +intel_sub_group_avc_ime_get_single_reference_streamin( + intel_sub_group_avc_ime_result_single_reference_streamout_t result); +intel_sub_group_avc_ime_dual_reference_streamin_t __ovld +intel_sub_group_avc_ime_get_dual_reference_streamin( + intel_sub_group_avc_ime_result_dual_reference_streamout_t result); +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_ime_strip_single_reference_streamout( + intel_sub_group_avc_ime_result_single_reference_streamout_t result); +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_ime_strip_dual_reference_streamout( + intel_sub_group_avc_ime_result_dual_reference_streamout_t result); + +uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors( + intel_sub_group_avc_ime_result_single_reference_streamout_t result, + uchar major_shape); +ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions( + intel_sub_group_avc_ime_result_single_reference_streamout_t result, + uchar major_shape); +uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids( + intel_sub_group_avc_ime_result_single_reference_streamout_t result, + uchar major_shape); +uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors( + intel_sub_group_avc_ime_result_dual_reference_streamout_t result, + uchar major_shape, uchar direction); +ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions( + intel_sub_group_avc_ime_result_dual_reference_streamout_t result, + uchar major_shape, uchar direction); +uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids( + intel_sub_group_avc_ime_result_dual_reference_streamout_t result, + uchar major_shape, uchar direction); + +uchar __ovld intel_sub_group_avc_ime_get_border_reached( + uchar image_select, intel_sub_group_avc_ime_result_t result); +uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication( + intel_sub_group_avc_ime_result_t result); +uchar __ovld +intel_sub_group_avc_ime_get_unidirectional_early_search_termination( + intel_sub_group_avc_ime_result_t result); +uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector( + intel_sub_group_avc_ime_result_t result); +ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion( + intel_sub_group_avc_ime_result_t result); + +// REF built-in functions +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_fme_initialize( + ushort2 src_coord, ulong motion_vectors, uchar major_shapes, + uchar minor_shapes, uchar directions, uchar pixel_resolution, + uchar sad_adjustment); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_bme_initialize( + ushort2 src_coord, ulong motion_vectors, uchar major_shapes, + uchar minor_shapes, uchar directions, uchar pixel_resolution, + uchar bidirectional_weight, uchar sad_adjustment); + +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_bidirectional_mix_disable( + intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_bilinear_filter_enable( + intel_sub_group_avc_ref_payload_t payload); + +intel_sub_group_avc_ref_result_t __ovld +intel_sub_group_avc_ref_evaluate_with_single_reference( + read_only image2d_t src_image, read_only image2d_t ref_image, + sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_ref_result_t __ovld +intel_sub_group_avc_ref_evaluate_with_dual_reference( + read_only image2d_t src_image, read_only image2d_t fwd_ref_image, + read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler, + intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_ref_result_t __ovld +intel_sub_group_avc_ref_evaluate_with_multi_reference( + read_only image2d_t src_image, uint packed_reference_ids, + sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_ref_result_t __ovld +intel_sub_group_avc_ref_evaluate_with_multi_reference( + read_only image2d_t src_image, uint packed_reference_ids, + uchar packed_reference_field_polarities, sampler_t vme_media_sampler, + intel_sub_group_avc_ref_payload_t payload); + +// SIC built-in functions +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_initialize( + ushort2 src_coord); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_configure_skc( + uint skip_block_partition_type, uint skip_motion_vector_mask, + ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment, + intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_configure_ipe( + uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty, + uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel, + uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels, + uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_configure_ipe( + uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty, + uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel, + uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels, + ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel, + ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment, + intel_sub_group_avc_sic_payload_t payload); +uint __ovld +intel_sub_group_avc_sic_get_motion_vector_mask( + uint skip_block_partition_type, uchar direction); + +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_intra_luma_shape_penalty( + uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_intra_luma_mode_cost_function( + uchar luma_mode_penalty, uint luma_packed_neighbor_modes, + uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function( + uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_skc_bilinear_filter_enable( + intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_skc_forward_transform_enable( + ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_block_based_raw_skip_sad( + uchar block_based_skip_type, + intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_sic_result_t __ovld +intel_sub_group_avc_sic_evaluate_ipe( + read_only image2d_t src_image, sampler_t vme_media_sampler, + intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_result_t __ovld +intel_sub_group_avc_sic_evaluate_with_single_reference( + read_only image2d_t src_image, read_only image2d_t ref_image, + sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_result_t __ovld +intel_sub_group_avc_sic_evaluate_with_dual_reference( + read_only image2d_t src_image, read_only image2d_t fwd_ref_image, + read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler, + intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_result_t __ovld +intel_sub_group_avc_sic_evaluate_with_multi_reference( + read_only image2d_t src_image, uint packed_reference_ids, + sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_result_t __ovld +intel_sub_group_avc_sic_evaluate_with_multi_reference( + read_only image2d_t src_image, uint packed_reference_ids, + uchar packed_reference_field_polarities, sampler_t vme_media_sampler, + intel_sub_group_avc_sic_payload_t payload); + +uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape( + intel_sub_group_avc_sic_result_t result); +ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion( + intel_sub_group_avc_sic_result_t result); +ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion( + intel_sub_group_avc_sic_result_t result); +ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes( + intel_sub_group_avc_sic_result_t result); +uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode( + intel_sub_group_avc_sic_result_t result); +uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold( + intel_sub_group_avc_sic_result_t result); +ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold( + intel_sub_group_avc_sic_result_t result); +ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads( + intel_sub_group_avc_sic_result_t result); + +// Wrappers +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty( + uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty( + uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty( + uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_inter_shape_penalty( + ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_inter_shape_penalty( + ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_inter_shape_penalty( + ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_inter_direction_penalty( + uchar direction_cost, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_inter_direction_penalty( + uchar direction_cost, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_inter_direction_penalty( + uchar direction_cost, intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_motion_vector_cost_function( + ulong packed_cost_center_delta, uint2 packed_cost_table, + uchar cost_precision, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_motion_vector_cost_function( + ulong packed_cost_center_delta, uint2 packed_cost_table, + uchar cost_precision, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_motion_vector_cost_function( + ulong packed_cost_center_delta, uint2 packed_cost_table, + uchar cost_precision, intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_source_interlaced_field_polarity( + uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_source_interlaced_field_polarity( + uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_source_interlaced_field_polarity( + uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity( + uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity( + uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity( + uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities( + uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity, + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities( + uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity, + intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities( + uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity, + intel_sub_group_avc_sic_payload_t payload); + +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_ime_set_ac_only_haar( + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_ref_set_ac_only_haar( + intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_sic_set_ac_only_haar( + intel_sub_group_avc_sic_payload_t payload); + +ulong __ovld intel_sub_group_avc_ime_get_motion_vectors( + intel_sub_group_avc_ime_result_t result); +ulong __ovld intel_sub_group_avc_ref_get_motion_vectors( + intel_sub_group_avc_ref_result_t result); + +ushort __ovld intel_sub_group_avc_ime_get_inter_distortions( + intel_sub_group_avc_ime_result_t result); +ushort __ovld intel_sub_group_avc_ref_get_inter_distortions( + intel_sub_group_avc_ref_result_t result); +ushort __ovld intel_sub_group_avc_sic_get_inter_distortions( + intel_sub_group_avc_sic_result_t result); + +ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion( + intel_sub_group_avc_ime_result_t result); +ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion( + intel_sub_group_avc_ref_result_t result); + +uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape( + intel_sub_group_avc_ime_result_t result); +uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape( + intel_sub_group_avc_ref_result_t result); +uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes( + intel_sub_group_avc_ime_result_t result); +uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes( + intel_sub_group_avc_ref_result_t result); + +uchar __ovld intel_sub_group_avc_ime_get_inter_directions( + intel_sub_group_avc_ime_result_t result); +uchar __ovld intel_sub_group_avc_ref_get_inter_directions( + intel_sub_group_avc_ref_result_t result); + +uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count( + intel_sub_group_avc_ime_result_t result); +uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count( + intel_sub_group_avc_ref_result_t result); + +uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids( + intel_sub_group_avc_ime_result_t result); +uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids( + intel_sub_group_avc_ref_result_t result); + +uchar __ovld +intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities( + uint packed_reference_ids, uint packed_reference_parameter_field_polarities, + intel_sub_group_avc_ime_result_t result); +uchar __ovld +intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities( + uint packed_reference_ids, uint packed_reference_parameter_field_polarities, + intel_sub_group_avc_ref_result_t result); + +// Type conversion functions +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_ime_convert_to_mce_payload( + intel_sub_group_avc_ime_payload_t payload); +intel_sub_group_avc_ime_payload_t __ovld +intel_sub_group_avc_mce_convert_to_ime_payload( + intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_ref_convert_to_mce_payload( + intel_sub_group_avc_ref_payload_t payload); +intel_sub_group_avc_ref_payload_t __ovld +intel_sub_group_avc_mce_convert_to_ref_payload( + intel_sub_group_avc_mce_payload_t payload); +intel_sub_group_avc_mce_payload_t __ovld +intel_sub_group_avc_sic_convert_to_mce_payload( + intel_sub_group_avc_sic_payload_t payload); +intel_sub_group_avc_sic_payload_t __ovld +intel_sub_group_avc_mce_convert_to_sic_payload( + intel_sub_group_avc_mce_payload_t payload); + +intel_sub_group_avc_mce_result_t __ovld +intel_sub_group_avc_ime_convert_to_mce_result( + intel_sub_group_avc_ime_result_t result); +intel_sub_group_avc_ime_result_t __ovld +intel_sub_group_avc_mce_convert_to_ime_result( + intel_sub_group_avc_mce_result_t result); +intel_sub_group_avc_mce_result_t __ovld +intel_sub_group_avc_ref_convert_to_mce_result( + intel_sub_group_avc_ref_result_t result); +intel_sub_group_avc_ref_result_t __ovld +intel_sub_group_avc_mce_convert_to_ref_result( + intel_sub_group_avc_mce_result_t result); +intel_sub_group_avc_mce_result_t __ovld +intel_sub_group_avc_sic_convert_to_mce_result( + intel_sub_group_avc_sic_result_t result); +intel_sub_group_avc_sic_result_t __ovld +intel_sub_group_avc_mce_convert_to_sic_result( + intel_sub_group_avc_mce_result_t result); +#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end +#endif // cl_intel_device_side_avc_motion_estimation + +#ifdef cl_amd_media_ops +uint __ovld amd_bitalign(uint a, uint b, uint c); +uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_bytealign(uint a, uint b, uint c); +uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_lerp(uint a, uint b, uint c); +uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_pack(float4 v); + +uint __ovld amd_sad4(uint4 x, uint4 y, uint z); + +uint __ovld amd_sadhi(uint a, uint b, uint c); +uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_sad(uint a, uint b, uint c); +uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c); + +float __ovld amd_unpack0(uint a); +float2 __ovld amd_unpack0(uint2 a); +float3 __ovld amd_unpack0(uint3 a); +float4 __ovld amd_unpack0(uint4 a); +float8 __ovld amd_unpack0(uint8 a); +float16 __ovld amd_unpack0(uint16 a); + +float __ovld amd_unpack1(uint a); +float2 __ovld amd_unpack1(uint2 a); +float3 __ovld amd_unpack1(uint3 a); +float4 __ovld amd_unpack1(uint4 a); +float8 __ovld amd_unpack1(uint8 a); +float16 __ovld amd_unpack1(uint16 a); + +float __ovld amd_unpack2(uint a); +float2 __ovld amd_unpack2(uint2 a); +float3 __ovld amd_unpack2(uint3 a); +float4 __ovld amd_unpack2(uint4 a); +float8 __ovld amd_unpack2(uint8 a); +float16 __ovld amd_unpack2(uint16 a); + +float __ovld amd_unpack3(uint a); +float2 __ovld amd_unpack3(uint2 a); +float3 __ovld amd_unpack3(uint3 a); +float4 __ovld amd_unpack3(uint4 a); +float8 __ovld amd_unpack3(uint8 a); +float16 __ovld amd_unpack3(uint16 a); +#endif // cl_amd_media_ops + +#ifdef cl_amd_media_ops2 +int __ovld amd_bfe(int src0, uint src1, uint src2); +int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2); +int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2); +int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2); +int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2); +int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_bfe(uint src0, uint src1, uint src2); +uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_bfm(uint src0, uint src1); +uint2 __ovld amd_bfm(uint2 src0, uint2 src1); +uint3 __ovld amd_bfm(uint3 src0, uint3 src1); +uint4 __ovld amd_bfm(uint4 src0, uint4 src1); +uint8 __ovld amd_bfm(uint8 src0, uint8 src1); +uint16 __ovld amd_bfm(uint16 src0, uint16 src1); + +float __ovld amd_max3(float src0, float src1, float src2); +float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2); +float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2); +float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2); +float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2); +float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2); + +int __ovld amd_max3(int src0, int src1, int src2); +int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2); +int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2); +int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2); +int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2); +int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2); + +uint __ovld amd_max3(uint src0, uint src1, uint src2); +uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2); + +float __ovld amd_median3(float src0, float src1, float src2); +float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2); +float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2); +float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2); +float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2); +float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2); + +int __ovld amd_median3(int src0, int src1, int src2); +int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2); +int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2); +int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2); +int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2); +int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2); + +uint __ovld amd_median3(uint src0, uint src1, uint src2); +uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2); + +float __ovld amd_min3(float src0, float src1, float src); +float2 __ovld amd_min3(float2 src0, float2 src1, float2 src); +float3 __ovld amd_min3(float3 src0, float3 src1, float3 src); +float4 __ovld amd_min3(float4 src0, float4 src1, float4 src); +float8 __ovld amd_min3(float8 src0, float8 src1, float8 src); +float16 __ovld amd_min3(float16 src0, float16 src1, float16 src); + +int __ovld amd_min3(int src0, int src1, int src2); +int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2); +int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2); +int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2); +int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2); +int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2); + +uint __ovld amd_min3(uint src0, uint src1, uint src2); +uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2); + +ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2); +ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2); +ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2); +ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2); +ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2); +ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2); + +ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2); +ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2); +ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2); +ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2); +ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2); +ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2); + +uint __ovld amd_msad(uint src0, uint src1, uint src2); +uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_sadd(uint src0, uint src1, uint src2); +uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_sadw(uint src0, uint src1, uint src2); +uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2); +#endif // cl_amd_media_ops2 + +#if defined(cl_arm_integer_dot_product_int8) +uint __ovld arm_dot(uchar4 a, uchar4 b); +int __ovld arm_dot(char4 a, char4 b); +#endif // defined(cl_arm_integer_dot_product_int8) + +#if defined(cl_arm_integer_dot_product_accumulate_int8) +uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c); +int __ovld arm_dot_acc(char4 a, char4 b, int c); +#endif // defined(cl_arm_integer_dot_product_accumulate_int8) + +#if defined(cl_arm_integer_dot_product_accumulate_int16) +uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c); +int __ovld arm_dot_acc(short2 a, short2 b, int c); +#endif // defined(cl_arm_integer_dot_product_accumulate_int16) + +#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8) +uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c); +int __ovld arm_dot_acc_sat(char4 a, char4 b, int c); +#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8) + +// Disable any extensions we may have enabled previously. +#pragma OPENCL EXTENSION all : disable + +#undef __cnfn +#undef __ovld +#endif //_OPENCL_H_ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h new file mode 100644 index 0000000..279fb26 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h @@ -0,0 +1,106 @@ +/*===- __clang_openmp_device_functions.h - OpenMP device function declares -=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__ +#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__ + +#ifndef _OPENMP +#error "This file is for OpenMP compilation only." +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#pragma omp begin declare variant match( \ + device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) + +#define __CUDA__ +#define __OPENMP_NVPTX__ + +/// Include declarations for libdevice functions. +#include <__clang_cuda_libdevice_declares.h> + +/// Provide definitions for these functions. +#include <__clang_cuda_device_functions.h> + +#undef __OPENMP_NVPTX__ +#undef __CUDA__ + +#pragma omp end declare variant + +#ifdef __AMDGCN__ +#pragma omp begin declare variant match(device = {arch(amdgcn)}) + +// Import types which will be used by __clang_hip_libdevice_declares.h +#ifndef __cplusplus +#include +#include +#endif + +#define __OPENMP_AMDGCN__ +#pragma push_macro("__device__") +#define __device__ + +/// Include declarations for libdevice functions. +#include <__clang_hip_libdevice_declares.h> + +#pragma pop_macro("__device__") +#undef __OPENMP_AMDGCN__ + +#pragma omp end declare variant +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +// Ensure we make `_ZdlPv`, aka. `operator delete(void*)` available without the +// need to `include ` in C++ mode. +#ifdef __cplusplus + +// We require malloc/free. +#include + +#pragma push_macro("OPENMP_NOEXCEPT") +#if __cplusplus >= 201103L +#define OPENMP_NOEXCEPT noexcept +#else +#define OPENMP_NOEXCEPT +#endif + +// Device overrides for non-placement new and delete. +inline void *operator new(__SIZE_TYPE__ size) { + if (size == 0) + size = 1; + return ::malloc(size); +} + +inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); } + +inline void operator delete(void *ptr)OPENMP_NOEXCEPT { ::free(ptr); } + +inline void operator delete[](void *ptr) OPENMP_NOEXCEPT { + ::operator delete(ptr); +} + +// Sized delete, C++14 only. +#if __cplusplus >= 201402L +inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT { + ::operator delete(ptr); +} +inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT { + ::operator delete(ptr); +} +#endif + +#pragma pop_macro("OPENMP_NOEXCEPT") +#endif + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/cmath b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/cmath new file mode 100644 index 0000000..22a720a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/cmath @@ -0,0 +1,132 @@ +/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_OPENMP_CMATH_H__ +#define __CLANG_OPENMP_CMATH_H__ + +#ifndef _OPENMP +#error "This file is for OpenMP compilation only." +#endif + +#include_next + +// Make sure we include our math.h overlay, it probably happend already but we +// need to be sure. +#include + +// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs` +// which might live in cstdlib. +#include + +// We need limits because __clang_cuda_cmath.h below uses `std::numeric_limit`. +#include + +#pragma omp begin declare variant match( \ + device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any, allow_templates)}) + +#define __CUDA__ +#define __OPENMP_NVPTX__ +#include <__clang_cuda_cmath.h> +#undef __OPENMP_NVPTX__ +#undef __CUDA__ + +// Overloads not provided by the CUDA wrappers but by the CUDA system headers. +// Since we do not include the latter we define them ourselves. +#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) + +__DEVICE__ float acosh(float __x) { return ::acoshf(__x); } +__DEVICE__ float asinh(float __x) { return ::asinhf(__x); } +__DEVICE__ float atanh(float __x) { return ::atanhf(__x); } +__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); } +__DEVICE__ float erf(float __x) { return ::erff(__x); } +__DEVICE__ float erfc(float __x) { return ::erfcf(__x); } +__DEVICE__ float exp2(float __x) { return ::exp2f(__x); } +__DEVICE__ float expm1(float __x) { return ::expm1f(__x); } +__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); } +__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); } +__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); } +__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); } +__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); } +__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); } +__DEVICE__ float log1p(float __x) { return ::log1pf(__x); } +__DEVICE__ float log2(float __x) { return ::log2f(__x); } +__DEVICE__ float logb(float __x) { return ::logbf(__x); } +__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); } +__DEVICE__ long int lround(float __x) { return ::lroundf(__x); } +__DEVICE__ float nextafter(float __x, float __y) { + return ::nextafterf(__x, __y); +} +__DEVICE__ float remainder(float __x, float __y) { + return ::remainderf(__x, __y); +} +__DEVICE__ float scalbln(float __x, long int __y) { + return ::scalblnf(__x, __y); +} +__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); } +__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); } + +#undef __DEVICE__ + +#pragma omp end declare variant + +#ifdef __AMDGCN__ +#pragma omp begin declare variant match(device = {arch(amdgcn)}) + +#pragma push_macro("__constant__") +#define __constant__ __attribute__((constant)) +#define __OPENMP_AMDGCN__ + +#include <__clang_hip_cmath.h> + +#pragma pop_macro("__constant__") +#undef __OPENMP_AMDGCN__ + +// Define overloads otherwise which are absent +#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) + +__DEVICE__ float acos(float __x) { return ::acosf(__x); } +__DEVICE__ float acosh(float __x) { return ::acoshf(__x); } +__DEVICE__ float asin(float __x) { return ::asinf(__x); } +__DEVICE__ float asinh(float __x) { return ::asinhf(__x); } +__DEVICE__ float atan(float __x) { return ::atanf(__x); } +__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); } +__DEVICE__ float atanh(float __x) { return ::atanhf(__x); } +__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); } +__DEVICE__ float cosh(float __x) { return ::coshf(__x); } +__DEVICE__ float erf(float __x) { return ::erff(__x); } +__DEVICE__ float erfc(float __x) { return ::erfcf(__x); } +__DEVICE__ float exp2(float __x) { return ::exp2f(__x); } +__DEVICE__ float expm1(float __x) { return ::expm1f(__x); } +__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); } +__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); } +__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); } +__DEVICE__ float ldexp(float __arg, int __exp) { + return ::ldexpf(__arg, __exp); +} +__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); } +__DEVICE__ float log1p(float __x) { return ::log1pf(__x); } +__DEVICE__ float logb(float __x) { return ::logbf(__x); } +__DEVICE__ float nextafter(float __x, float __y) { + return ::nextafterf(__x, __y); +} +__DEVICE__ float remainder(float __x, float __y) { + return ::remainderf(__x, __y); +} +__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); } +__DEVICE__ float sinh(float __x) { return ::sinhf(__x); } +__DEVICE__ float tan(float __x) { return ::tanf(__x); } +__DEVICE__ float tanh(float __x) { return ::tanhf(__x); } +__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); } + +#undef __DEVICE__ + +#pragma omp end declare variant +#endif // __AMDGCN__ + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex new file mode 100644 index 0000000..dfd6193 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex @@ -0,0 +1,46 @@ +/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_OPENMP_COMPLEX__ +#define __CLANG_OPENMP_COMPLEX__ + +#ifndef _OPENMP +#error "This file is for OpenMP compilation only." +#endif + +// We require std::math functions in the complex builtins below. +#include + +#define __OPENMP_NVPTX__ +#include <__clang_cuda_complex_builtins.h> +#undef __OPENMP_NVPTX__ +#endif + +// Grab the host header too. +#include_next + +// If we are compiling against libc++, the macro _LIBCPP_STD_VER should be set +// after including above. Since the complex header we use is a +// simplified version of the libc++, we don't need it in this case. If we +// compile against libstdc++, or any other standard library, we will overload +// the (hopefully template) functions in the header with the ones we +// got from libc++ which decomposes math functions, like `std::sin`, into +// arithmetic and calls to non-complex functions, all of which we can then +// handle. +#ifndef _LIBCPP_STD_VER + +#pragma omp begin declare variant match( \ + device = {arch(nvptx, nvptx64)}, \ + implementation = {extension(match_any, allow_templates)}) + +#include + +#pragma omp end declare variant + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex.h new file mode 100644 index 0000000..15dc415 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex.h @@ -0,0 +1,26 @@ +/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_OPENMP_COMPLEX_H__ +#define __CLANG_OPENMP_COMPLEX_H__ + +#ifndef _OPENMP +#error "This file is for OpenMP compilation only." +#endif + +// We require math functions in the complex builtins below. +#include + +#define __OPENMP_NVPTX__ +#include <__clang_cuda_complex_builtins.h> +#undef __OPENMP_NVPTX__ +#endif + +// Grab the host header too. +#include_next diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex_cmath.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex_cmath.h new file mode 100644 index 0000000..e3d9aeb --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/complex_cmath.h @@ -0,0 +1,388 @@ +//===------------------------- __complex_cmath.h --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// std::complex header copied from the libcxx source and simplified for use in +// OpenMP target offload regions. +// +//===----------------------------------------------------------------------===// + +#ifndef _OPENMP +#error "This file is for OpenMP compilation only." +#endif + +#ifndef __cplusplus +#error "This file is for C++ compilation only." +#endif + +#ifndef _LIBCPP_COMPLEX +#define _LIBCPP_COMPLEX + +#include +#include + +#define __DEVICE__ static constexpr __attribute__((nothrow)) + +namespace std { + +// abs + +template __DEVICE__ _Tp abs(const std::complex<_Tp> &__c) { + return hypot(__c.real(), __c.imag()); +} + +// arg + +template __DEVICE__ _Tp arg(const std::complex<_Tp> &__c) { + return atan2(__c.imag(), __c.real()); +} + +template +typename enable_if::value || is_same<_Tp, double>::value, + double>::type +arg(_Tp __re) { + return atan2(0., __re); +} + +template +typename enable_if::value, float>::type arg(_Tp __re) { + return atan2f(0.F, __re); +} + +// norm + +template __DEVICE__ _Tp norm(const std::complex<_Tp> &__c) { + if (std::isinf(__c.real())) + return abs(__c.real()); + if (std::isinf(__c.imag())) + return abs(__c.imag()); + return __c.real() * __c.real() + __c.imag() * __c.imag(); +} + +// conj + +template std::complex<_Tp> conj(const std::complex<_Tp> &__c) { + return std::complex<_Tp>(__c.real(), -__c.imag()); +} + +// proj + +template std::complex<_Tp> proj(const std::complex<_Tp> &__c) { + std::complex<_Tp> __r = __c; + if (std::isinf(__c.real()) || std::isinf(__c.imag())) + __r = std::complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag())); + return __r; +} + +// polar + +template +complex<_Tp> polar(const _Tp &__rho, const _Tp &__theta = _Tp()) { + if (std::isnan(__rho) || signbit(__rho)) + return std::complex<_Tp>(_Tp(NAN), _Tp(NAN)); + if (std::isnan(__theta)) { + if (std::isinf(__rho)) + return std::complex<_Tp>(__rho, __theta); + return std::complex<_Tp>(__theta, __theta); + } + if (std::isinf(__theta)) { + if (std::isinf(__rho)) + return std::complex<_Tp>(__rho, _Tp(NAN)); + return std::complex<_Tp>(_Tp(NAN), _Tp(NAN)); + } + _Tp __x = __rho * cos(__theta); + if (std::isnan(__x)) + __x = 0; + _Tp __y = __rho * sin(__theta); + if (std::isnan(__y)) + __y = 0; + return std::complex<_Tp>(__x, __y); +} + +// log + +template std::complex<_Tp> log(const std::complex<_Tp> &__x) { + return std::complex<_Tp>(log(abs(__x)), arg(__x)); +} + +// log10 + +template std::complex<_Tp> log10(const std::complex<_Tp> &__x) { + return log(__x) / log(_Tp(10)); +} + +// sqrt + +template +__DEVICE__ std::complex<_Tp> sqrt(const std::complex<_Tp> &__x) { + if (std::isinf(__x.imag())) + return std::complex<_Tp>(_Tp(INFINITY), __x.imag()); + if (std::isinf(__x.real())) { + if (__x.real() > _Tp(0)) + return std::complex<_Tp>(__x.real(), std::isnan(__x.imag()) + ? __x.imag() + : copysign(_Tp(0), __x.imag())); + return std::complex<_Tp>(std::isnan(__x.imag()) ? __x.imag() : _Tp(0), + copysign(__x.real(), __x.imag())); + } + return polar(sqrt(abs(__x)), arg(__x) / _Tp(2)); +} + +// exp + +template +__DEVICE__ std::complex<_Tp> exp(const std::complex<_Tp> &__x) { + _Tp __i = __x.imag(); + if (std::isinf(__x.real())) { + if (__x.real() < _Tp(0)) { + if (!std::isfinite(__i)) + __i = _Tp(1); + } else if (__i == 0 || !std::isfinite(__i)) { + if (std::isinf(__i)) + __i = _Tp(NAN); + return std::complex<_Tp>(__x.real(), __i); + } + } else if (std::isnan(__x.real()) && __x.imag() == 0) + return __x; + _Tp __e = exp(__x.real()); + return std::complex<_Tp>(__e * cos(__i), __e * sin(__i)); +} + +// pow + +template +std::complex<_Tp> pow(const std::complex<_Tp> &__x, + const std::complex<_Tp> &__y) { + return exp(__y * log(__x)); +} + +// __sqr, computes pow(x, 2) + +template std::complex<_Tp> __sqr(const std::complex<_Tp> &__x) { + return std::complex<_Tp>((__x.real() - __x.imag()) * + (__x.real() + __x.imag()), + _Tp(2) * __x.real() * __x.imag()); +} + +// asinh + +template +__DEVICE__ std::complex<_Tp> asinh(const std::complex<_Tp> &__x) { + const _Tp __pi(atan2(+0., -0.)); + if (std::isinf(__x.real())) { + if (std::isnan(__x.imag())) + return __x; + if (std::isinf(__x.imag())) + return std::complex<_Tp>(__x.real(), + copysign(__pi * _Tp(0.25), __x.imag())); + return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); + } + if (std::isnan(__x.real())) { + if (std::isinf(__x.imag())) + return std::complex<_Tp>(__x.imag(), __x.real()); + if (__x.imag() == 0) + return __x; + return std::complex<_Tp>(__x.real(), __x.real()); + } + if (std::isinf(__x.imag())) + return std::complex<_Tp>(copysign(__x.imag(), __x.real()), + copysign(__pi / _Tp(2), __x.imag())); + std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) + _Tp(1))); + return std::complex<_Tp>(copysign(__z.real(), __x.real()), + copysign(__z.imag(), __x.imag())); +} + +// acosh + +template +__DEVICE__ std::complex<_Tp> acosh(const std::complex<_Tp> &__x) { + const _Tp __pi(atan2(+0., -0.)); + if (std::isinf(__x.real())) { + if (std::isnan(__x.imag())) + return std::complex<_Tp>(abs(__x.real()), __x.imag()); + if (std::isinf(__x.imag())) { + if (__x.real() > 0) + return std::complex<_Tp>(__x.real(), + copysign(__pi * _Tp(0.25), __x.imag())); + else + return std::complex<_Tp>(-__x.real(), + copysign(__pi * _Tp(0.75), __x.imag())); + } + if (__x.real() < 0) + return std::complex<_Tp>(-__x.real(), copysign(__pi, __x.imag())); + return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); + } + if (std::isnan(__x.real())) { + if (std::isinf(__x.imag())) + return std::complex<_Tp>(abs(__x.imag()), __x.real()); + return std::complex<_Tp>(__x.real(), __x.real()); + } + if (std::isinf(__x.imag())) + return std::complex<_Tp>(abs(__x.imag()), + copysign(__pi / _Tp(2), __x.imag())); + std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1))); + return std::complex<_Tp>(copysign(__z.real(), _Tp(0)), + copysign(__z.imag(), __x.imag())); +} + +// atanh + +template +__DEVICE__ std::complex<_Tp> atanh(const std::complex<_Tp> &__x) { + const _Tp __pi(atan2(+0., -0.)); + if (std::isinf(__x.imag())) { + return std::complex<_Tp>(copysign(_Tp(0), __x.real()), + copysign(__pi / _Tp(2), __x.imag())); + } + if (std::isnan(__x.imag())) { + if (std::isinf(__x.real()) || __x.real() == 0) + return std::complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag()); + return std::complex<_Tp>(__x.imag(), __x.imag()); + } + if (std::isnan(__x.real())) { + return std::complex<_Tp>(__x.real(), __x.real()); + } + if (std::isinf(__x.real())) { + return std::complex<_Tp>(copysign(_Tp(0), __x.real()), + copysign(__pi / _Tp(2), __x.imag())); + } + if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) { + return std::complex<_Tp>(copysign(_Tp(INFINITY), __x.real()), + copysign(_Tp(0), __x.imag())); + } + std::complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2); + return std::complex<_Tp>(copysign(__z.real(), __x.real()), + copysign(__z.imag(), __x.imag())); +} + +// sinh + +template +__DEVICE__ std::complex<_Tp> sinh(const std::complex<_Tp> &__x) { + if (std::isinf(__x.real()) && !std::isfinite(__x.imag())) + return std::complex<_Tp>(__x.real(), _Tp(NAN)); + if (__x.real() == 0 && !std::isfinite(__x.imag())) + return std::complex<_Tp>(__x.real(), _Tp(NAN)); + if (__x.imag() == 0 && !std::isfinite(__x.real())) + return __x; + return std::complex<_Tp>(sinh(__x.real()) * cos(__x.imag()), + cosh(__x.real()) * sin(__x.imag())); +} + +// cosh + +template +__DEVICE__ std::complex<_Tp> cosh(const std::complex<_Tp> &__x) { + if (std::isinf(__x.real()) && !std::isfinite(__x.imag())) + return std::complex<_Tp>(abs(__x.real()), _Tp(NAN)); + if (__x.real() == 0 && !std::isfinite(__x.imag())) + return std::complex<_Tp>(_Tp(NAN), __x.real()); + if (__x.real() == 0 && __x.imag() == 0) + return std::complex<_Tp>(_Tp(1), __x.imag()); + if (__x.imag() == 0 && !std::isfinite(__x.real())) + return std::complex<_Tp>(abs(__x.real()), __x.imag()); + return std::complex<_Tp>(cosh(__x.real()) * cos(__x.imag()), + sinh(__x.real()) * sin(__x.imag())); +} + +// tanh + +template +__DEVICE__ std::complex<_Tp> tanh(const std::complex<_Tp> &__x) { + if (std::isinf(__x.real())) { + if (!std::isfinite(__x.imag())) + return std::complex<_Tp>(_Tp(1), _Tp(0)); + return std::complex<_Tp>(_Tp(1), + copysign(_Tp(0), sin(_Tp(2) * __x.imag()))); + } + if (std::isnan(__x.real()) && __x.imag() == 0) + return __x; + _Tp __2r(_Tp(2) * __x.real()); + _Tp __2i(_Tp(2) * __x.imag()); + _Tp __d(cosh(__2r) + cos(__2i)); + _Tp __2rsh(sinh(__2r)); + if (std::isinf(__2rsh) && std::isinf(__d)) + return std::complex<_Tp>(__2rsh > _Tp(0) ? _Tp(1) : _Tp(-1), + __2i > _Tp(0) ? _Tp(0) : _Tp(-0.)); + return std::complex<_Tp>(__2rsh / __d, sin(__2i) / __d); +} + +// asin + +template +__DEVICE__ std::complex<_Tp> asin(const std::complex<_Tp> &__x) { + std::complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real())); + return std::complex<_Tp>(__z.imag(), -__z.real()); +} + +// acos + +template +__DEVICE__ std::complex<_Tp> acos(const std::complex<_Tp> &__x) { + const _Tp __pi(atan2(+0., -0.)); + if (std::isinf(__x.real())) { + if (std::isnan(__x.imag())) + return std::complex<_Tp>(__x.imag(), __x.real()); + if (std::isinf(__x.imag())) { + if (__x.real() < _Tp(0)) + return std::complex<_Tp>(_Tp(0.75) * __pi, -__x.imag()); + return std::complex<_Tp>(_Tp(0.25) * __pi, -__x.imag()); + } + if (__x.real() < _Tp(0)) + return std::complex<_Tp>(__pi, + signbit(__x.imag()) ? -__x.real() : __x.real()); + return std::complex<_Tp>(_Tp(0), + signbit(__x.imag()) ? __x.real() : -__x.real()); + } + if (std::isnan(__x.real())) { + if (std::isinf(__x.imag())) + return std::complex<_Tp>(__x.real(), -__x.imag()); + return std::complex<_Tp>(__x.real(), __x.real()); + } + if (std::isinf(__x.imag())) + return std::complex<_Tp>(__pi / _Tp(2), -__x.imag()); + if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag()))) + return std::complex<_Tp>(__pi / _Tp(2), -__x.imag()); + std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1))); + if (signbit(__x.imag())) + return std::complex<_Tp>(abs(__z.imag()), abs(__z.real())); + return std::complex<_Tp>(abs(__z.imag()), -abs(__z.real())); +} + +// atan + +template +__DEVICE__ std::complex<_Tp> atan(const std::complex<_Tp> &__x) { + std::complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real())); + return std::complex<_Tp>(__z.imag(), -__z.real()); +} + +// sin + +template +__DEVICE__ std::complex<_Tp> sin(const std::complex<_Tp> &__x) { + std::complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real())); + return std::complex<_Tp>(__z.imag(), -__z.real()); +} + +// cos + +template std::complex<_Tp> cos(const std::complex<_Tp> &__x) { + return cosh(complex<_Tp>(-__x.imag(), __x.real())); +} + +// tan + +template +__DEVICE__ std::complex<_Tp> tan(const std::complex<_Tp> &__x) { + std::complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real())); + return std::complex<_Tp>(__z.imag(), -__z.real()); +} + +} // namespace std + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/math.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/math.h new file mode 100644 index 0000000..1e3c07c --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/math.h @@ -0,0 +1,61 @@ +/*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +// If we are in C++ mode and include (not ) first, we still need +// to make sure is read first. The problem otherwise is that we haven't +// seen the declarations of the math.h functions when the system math.h includes +// our cmath overlay. However, our cmath overlay, or better the underlying +// overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them +// yet we get errors. CUDA avoids this by eagerly declaring all math functions +// (in the __device__ space) but we cannot do this. Instead we break the +// dependence by forcing cmath to go first. While our cmath will in turn include +// this file, the cmath guards will prevent recursion. +#ifdef __cplusplus +#include +#endif + +#ifndef __CLANG_OPENMP_MATH_H__ +#define __CLANG_OPENMP_MATH_H__ + +#ifndef _OPENMP +#error "This file is for OpenMP compilation only." +#endif + +#include_next + +// We need limits.h for __clang_cuda_math.h below and because it should not hurt +// we include it eagerly here. +#include + +// We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs` +// which should live in stdlib.h. +#include + +#pragma omp begin declare variant match( \ + device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) + +#define __CUDA__ +#define __OPENMP_NVPTX__ +#include <__clang_cuda_math.h> +#undef __OPENMP_NVPTX__ +#undef __CUDA__ + +#pragma omp end declare variant + +#ifdef __AMDGCN__ +#pragma omp begin declare variant match(device = {arch(amdgcn)}) + +#define __OPENMP_AMDGCN__ +#include <__clang_hip_math.h> +#undef __OPENMP_AMDGCN__ + +#pragma omp end declare variant +#endif + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/new b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/new new file mode 100644 index 0000000..985ddc5 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/openmp_wrappers/new @@ -0,0 +1,48 @@ +//===--------- new - OPENMP wrapper for ------------------------------=== +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===-----------------------------------------------------------------------=== + +#ifndef __CLANG_OPENMP_WRAPPERS_NEW +#define __CLANG_OPENMP_WRAPPERS_NEW + +// We need the system for the std::nothrow_t. The new/delete operators +// which do not use nothrow_t are provided without the header. +#include_next + +#if defined(__NVPTX__) && defined(_OPENMP) + +#include + +#pragma push_macro("OPENMP_NOEXCEPT") +#if __cplusplus >= 201103L +#define OPENMP_NOEXCEPT noexcept +#else +#define OPENMP_NOEXCEPT +#endif + +inline void *operator new(__SIZE_TYPE__ size, + const std::nothrow_t &) OPENMP_NOEXCEPT { + return ::operator new(size); +} + +inline void *operator new[](__SIZE_TYPE__ size, const std::nothrow_t &) { + return ::operator new(size); +} + +inline void operator delete(void *ptr, const std::nothrow_t &)OPENMP_NOEXCEPT { + ::operator delete(ptr); +} + +inline void operator delete[](void *ptr, + const std::nothrow_t &) OPENMP_NOEXCEPT { + ::operator delete(ptr); +} + +#pragma pop_macro("OPENMP_NOEXCEPT") +#endif + +#endif // include guard diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pconfigintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pconfigintrin.h new file mode 100644 index 0000000..d2014b0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pconfigintrin.h @@ -0,0 +1,40 @@ +/*===---- pconfigintrin.h - X86 platform configuration ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PCONFIGINTRIN_H +#define __PCONFIGINTRIN_H + +#define __PCONFIG_KEY_PROGRAM 0x00000001 + +#if __has_extension(gnu_asm) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("pconfig"))) + +static __inline unsigned int __DEFAULT_FN_ATTRS +_pconfig_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("pconfig" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __has_extension(gnu_asm) */ + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pkuintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pkuintrin.h new file mode 100644 index 0000000..c62080b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pkuintrin.h @@ -0,0 +1,34 @@ +/*===---- pkuintrin.h - PKU intrinsics -------------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PKUINTRIN_H +#define __PKUINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku"))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_rdpkru_u32(void) +{ + return __builtin_ia32_rdpkru(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_wrpkru(unsigned int __val) +{ + __builtin_ia32_wrpkru(__val); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pmmintrin.h new file mode 100644 index 0000000..a83b2eb --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/pmmintrin.h @@ -0,0 +1,290 @@ +/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PMMINTRIN_H +#define __PMMINTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("sse3"), __min_vector_width__(128))) + +/// Loads data from an unaligned memory location to elements in a 128-bit +/// vector. +/// +/// If the address of the data is not 16-byte aligned, the instruction may +/// read two adjacent aligned blocks of memory to retrieve the requested +/// data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 128-bit integer vector containing integer values. +/// \returns A 128-bit vector containing the moved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_lddqu_si128(__m128i const *__p) +{ + return (__m128i)__builtin_ia32_lddqu((char const *)__p); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the left source operand. +/// \param __b +/// A 128-bit vector of [4 x float] containing the right source operand. +/// \returns A 128-bit vector of [4 x float] containing the alternating sums and +/// differences of both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_addsub_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in two +/// 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of +/// both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_hadd_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in two +/// 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal differences between the values are stored in the lower +/// bits of the destination. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal differences between the values are stored in the upper +/// bits of the destination. +/// \returns A 128-bit vector of [4 x float] containing the horizontal +/// differences of both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_hsub_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b); +} + +/// Moves and duplicates odd-indexed values from a 128-bit vector +/// of [4 x float] to float values stored in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSHDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of +/// the destination. \n +/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated +/// values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_movehdup_ps(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3); +} + +/// Duplicates even-indexed values from a 128-bit vector of +/// [4 x float] to float values stored in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSLDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] \n +/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of +/// the destination. \n +/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated +/// values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_moveldup_ps(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the left source operand. +/// \param __b +/// A 128-bit vector of [2 x double] containing the right source operand. +/// \returns A 128-bit vector of [2 x double] containing the alternating sums +/// and differences of both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_addsub_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b); +} + +/// Horizontally adds the pairs of values contained in two 128-bit +/// vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal sum of the values is stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal sum of the values is stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of +/// both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_hadd_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b); +} + +/// Horizontally subtracts the pairs of values contained in two 128-bit +/// vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal difference of the values is stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal difference of the values is stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [2 x double] containing the horizontal +/// differences of both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_hsub_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b); +} + +/// Moves and duplicates one double-precision value to double-precision +/// values stored in a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_loaddup_pd(double const *dp); +/// \endcode +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param dp +/// A pointer to a double-precision value to be moved and duplicated. +/// \returns A 128-bit vector of [2 x double] containing the moved and +/// duplicated values. +#define _mm_loaddup_pd(dp) _mm_load1_pd(dp) + +/// Moves and duplicates the double-precision value in the lower bits of +/// a 128-bit vector of [2 x double] to double-precision values stored in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. Bits [63:0] are written to bits +/// [127:64] and [63:0] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the moved and +/// duplicated values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_movedup_pd(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); +} + +/// Establishes a linear address memory range to be monitored and puts +/// the processor in the monitor event pending state. Data stored in the +/// monitored address range causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MONITOR instruction. +/// +/// \param __p +/// The memory range to be monitored. The size of the range is determined by +/// CPUID function 0000_0005h. +/// \param __extensions +/// Optional extensions for the monitoring state. +/// \param __hints +/// Optional hints for the monitoring state. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_monitor(__p, __extensions, __hints); +} + +/// Used with the MONITOR instruction to wait while the processor is in +/// the monitor event pending state. Data stored in the monitored address +/// range causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MWAIT instruction. +/// +/// \param __extensions +/// Optional extensions for the monitoring state, which may vary by +/// processor. +/// \param __hints +/// Optional hints for the monitoring state, which may vary by processor. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mwait(unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_mwait(__extensions, __hints); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __PMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/popcntintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/popcntintrin.h new file mode 100644 index 0000000..0aa94ae --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/popcntintrin.h @@ -0,0 +1,59 @@ +/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __POPCNTINTRIN_H +#define __POPCNTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt"))) + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_popcnt_u32(unsigned int __A) +{ + return __builtin_popcount(__A); +} + +#ifdef __x86_64__ +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_popcnt_u64(unsigned long long __A) +{ + return __builtin_popcountll(__A); +} +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_CONSTEXPR + +#endif /* __POPCNTINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/emmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/emmintrin.h new file mode 100644 index 0000000..4dcb848 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/emmintrin.h @@ -0,0 +1,2324 @@ +/*===---- emmintrin.h - Implementation of SSE2 intrinsics on PowerPC -------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header file is to help porting code using Intel intrinsics + explicitly from x86_64 to powerpc64/powerpc64le. + + Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type, + PowerPC VMX/VSX ISA is a good match for vector float SIMD operations. + However scalar float operations in vector (XMM) registers require + the POWER8 VSX ISA (2.07) level. There are differences for data + format and placement of float scalars in the vector register, which + require extra steps to match SSE2 scalar float semantics on POWER. + + It should be noted that there's much difference between X86_64's + MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use + portable instead of access MXSCR directly. + + Most SSE2 scalar float intrinsic operations can be performed more + efficiently as C language float scalar operations or optimized to + use vector SIMD operations. We recommend this for new applications. +*/ +#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error." +#endif + +#ifndef EMMINTRIN_H_ +#define EMMINTRIN_H_ + +#if defined(__linux__) && defined(__ppc64__) + +#include + +/* We need definitions from the SSE header files. */ +#include + +/* SSE2 */ +typedef __vector double __v2df; +typedef __vector long long __v2di; +typedef __vector unsigned long long __v2du; +typedef __vector int __v4si; +typedef __vector unsigned int __v4su; +typedef __vector short __v8hi; +typedef __vector unsigned short __v8hu; +typedef __vector signed char __v16qi; +typedef __vector unsigned char __v16qu; + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Unaligned version of the same types. */ +typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); +typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); + +/* Define two value permute mask. */ +#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y)) + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_sd (double __F) +{ + return __extension__ (__m128d){ __F, 0.0 }; +} + +/* Create a vector with both elements equal to F. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pd (double __F) +{ + return __extension__ (__m128d){ __F, __F }; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pd1 (double __F) +{ + return _mm_set1_pd (__F); +} + +/* Create a vector with the lower value X and upper value W. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __X, __W }; +} + +/* Create a vector with the lower value W and upper value X. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __W, __X }; +} + +/* Create an undefined vector. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_pd (void) +{ + __m128d __Y = __Y; + return __Y; +} + +/* Create a vector of zeros. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_pd (void) +{ + return (__m128d) vec_splats (0); +} + +/* Sets the low DPFP value of A from the low value of B. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_sd (__m128d __A, __m128d __B) +{ + __v2df result = (__v2df) __A; + result [0] = ((__v2df) __B)[0]; + return (__m128d) result; +} + +/* Load two DPFP values from P. The address must be 16-byte aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_pd (double const *__P) +{ + return ((__m128d)vec_ld(0, (__v16qu*)__P)); +} + +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_pd (double const *__P) +{ + return (vec_vsx_ld(0, __P)); +} + +/* Create a vector with all two elements equal to *P. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load1_pd (double const *__P) +{ + return (vec_splats (*__P)); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_sd (double const *__P) +{ + return _mm_set_sd (*__P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_pd1 (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* Load two DPFP values in reverse order. The address must be aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadr_pd (double const *__P) +{ + __v2df __tmp = _mm_load_pd (__P); + return (__m128d)vec_xxpermdi (__tmp, __tmp, 2); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_pd (double *__P, __m128d __A) +{ + vec_st((__v16qu)__A, 0, (__v16qu*)__P); +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_pd (double *__P, __m128d __A) +{ + *(__m128d_u *)__P = __A; +} + +/* Stores the lower DPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_sd (double *__P, __m128d __A) +{ + *__P = ((__v2df)__A)[0]; +} + +extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_f64 (__m128d __A) +{ + return ((__v2df)__A)[0]; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_pd (double *__P, __m128d __A) +{ + _mm_store_sd (__P, __A); +} + +/* Stores the upper DPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeh_pd (double *__P, __m128d __A) +{ + *__P = ((__v2df)__A)[1]; +} +/* Store the lower DPFP value across two words. + The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store1_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, vec_splat (__A, 0)); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_pd1 (double *__P, __m128d __A) +{ + _mm_store1_pd (__P, __A); +} + +/* Store two DPFP values in reverse order. The address must be aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storer_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2)); +} + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si64 (__m128i __A) +{ + return ((__v2di)__A)[0]; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si64x (__m128i __A) +{ + return ((__v2di)__A)[0]; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A + (__v2df)__B); +} + +/* Add the lower double-precision (64-bit) floating-point element in + a and b, store the result in the lower element of dst, and copy + the upper element from a to the upper element of dst. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_sd (__m128d __A, __m128d __B) +{ + __A[0] = __A[0] + __B[0]; + return (__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A - (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_sd (__m128d __A, __m128d __B) +{ + __A[0] = __A[0] - __B[0]; + return (__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A * (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_sd (__m128d __A, __m128d __B) +{ + __A[0] = __A[0] * __B[0]; + return (__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A / (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_sd (__m128d __A, __m128d __B) +{ + __A[0] = __A[0] / __B[0]; + return (__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_pd (__m128d __A) +{ + return (vec_sqrt (__A)); +} + +/* Return pair {sqrt (B[0]), A[1]}. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_sd (__m128d __A, __m128d __B) +{ + __v2df c; + c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0])); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pd (__m128d __A, __m128d __B) +{ + return (vec_min (__A, __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = vec_min (a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pd (__m128d __A, __m128d __B) +{ + return (vec_max (__A, __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = vec_max (a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_pd (__m128d __A, __m128d __B) +{ + __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B); + return ((__m128d)vec_nor (temp, temp)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_pd (__m128d __A, __m128d __B) +{ + return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_pd (__m128d __A, __m128d __B) +{ +#if _ARCH_PWR8 + __v2du c, d; + /* Compare against self will return false (0's) if NAN. */ + c = (__v2du)vec_cmpeq (__A, __A); + d = (__v2du)vec_cmpeq (__B, __B); +#else + __v2du a, b; + __v2du c, d; + const __v2du double_exp_mask = {0x7ff0000000000000, 0x7ff0000000000000}; + a = (__v2du)vec_abs ((__v2df)__A); + b = (__v2du)vec_abs ((__v2df)__B); + c = (__v2du)vec_cmpgt (double_exp_mask, a); + d = (__v2du)vec_cmpgt (double_exp_mask, b); +#endif + /* A != NAN and B != NAN. */ + return ((__m128d)vec_and(c, d)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_pd (__m128d __A, __m128d __B) +{ +#if _ARCH_PWR8 + __v2du c, d; + /* Compare against self will return false (0's) if NAN. */ + c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A); + d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B); + /* A == NAN OR B == NAN converts too: + NOT(A != NAN) OR NOT(B != NAN). */ + c = vec_nor (c, c); + return ((__m128d)vec_orc(c, d)); +#else + __v2du c, d; + /* Compare against self will return false (0's) if NAN. */ + c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A); + d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B); + /* Convert the true ('1's) is NAN. */ + c = vec_nor (c, c); + d = vec_nor (d, d); + return ((__m128d)vec_or(c, d)); +#endif +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_sd(__m128d __A, __m128d __B) +{ + __v2df a, b, c; + /* PowerISA VSX does not allow partial (for just lower double) + results. So to insure we don't generate spurious exceptions + (from the upper double values) we splat the lower double + before we do the operation. */ + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = (__v2df) vec_cmpeq(a, b); + /* Then we merge the lower double result with the original upper + double from __A. */ + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = (__v2df) vec_cmplt(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = (__v2df) vec_cmple(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = (__v2df) vec_cmpgt(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = (__v2df) vec_cmpge(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + c = (__v2df) vec_cmpeq(a, b); + c = vec_nor (c, c); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + /* Not less than is just greater than or equal. */ + c = (__v2df) vec_cmpge(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + /* Not less than or equal is just greater than. */ + c = (__v2df) vec_cmpge(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + /* Not greater than is just less than or equal. */ + c = (__v2df) vec_cmple(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_sd (__m128d __A, __m128d __B) +{ + __v2df a, b, c; + a = vec_splats (__A[0]); + b = vec_splats (__B[0]); + /* Not greater than or equal is just less than. */ + c = (__v2df) vec_cmplt(a, b); + return (__m128d) _mm_setr_pd (c[0], __A[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_sd (__m128d __A, __m128d __B) +{ + __v2df r; + r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0])); + return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_sd (__m128d __A, __m128d __B) +{ + __v2df r; + r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0])); + return (__m128d) _mm_setr_pd (r[0], __A[1]); +} + +/* FIXME + The __mm_comi??_sd and __mm_ucomi??_sd implementations below are + exactly the same because GCC for PowerPC only generates unordered + compares (scalar and vector). + Technically __mm_comieq_sp et all should be using the ordered + compare and signal for QNaNs. The __mm_ucomieq_sd et all should + be OK. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_sd (__m128d __A, __m128d __B) +{ + return (__A[0] == __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_sd (__m128d __A, __m128d __B) +{ + return (__A[0] < __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_sd (__m128d __A, __m128d __B) +{ + return (__A[0] <= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_sd (__m128d __A, __m128d __B) +{ + return (__A[0] > __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_sd (__m128d __A, __m128d __B) +{ + return (__A[0] >= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_sd (__m128d __A, __m128d __B) +{ + return (__A[0] != __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_sd (__m128d __A, __m128d __B) +{ + return (__A[0] == __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_sd (__m128d __A, __m128d __B) +{ + return (__A[0] < __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_sd (__m128d __A, __m128d __B) +{ + return (__A[0] <= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_sd (__m128d __A, __m128d __B) +{ + return (__A[0] > __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_sd (__m128d __A, __m128d __B) +{ + return (__A[0] >= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_sd (__m128d __A, __m128d __B) +{ + return (__A[0] != __B[0]); +} + +/* Create a vector of Qi, where i is the element number. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi64x (long long __q1, long long __q0) +{ + return __extension__ (__m128i)(__v2di){ __q0, __q1 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi64 (__m64 __q1, __m64 __q0) +{ + return _mm_set_epi64x ((long long)__q1, (long long)__q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0) +{ + return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4, + short __q3, short __q2, short __q1, short __q0) +{ + return __extension__ (__m128i)(__v8hi){ + __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m128i)(__v16qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +/* Set all of the elements of the vector to A. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi64x (long long __A) +{ + return _mm_set_epi64x (__A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi64 (__m64 __A) +{ + return _mm_set_epi64 (__A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi32 (int __A) +{ + return _mm_set_epi32 (__A, __A, __A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi16 (short __A) +{ + return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi8 (char __A) +{ + return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +/* Create a vector of Qi, where i is the element number. + The parameter order is reversed from the _mm_set_epi* functions. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi64 (__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64 (__q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3) +{ + return _mm_set_epi32 (__q3, __q2, __q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3, + short __q4, short __q5, short __q6, short __q7) +{ + return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03, + char __q04, char __q05, char __q06, char __q07, + char __q08, char __q09, char __q10, char __q11, + char __q12, char __q13, char __q14, char __q15) +{ + return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08, + __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_si128 (__m128i const *__P) +{ + return *__P; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_si128 (__m128i_u const *__P) +{ + return (__m128i) (vec_vsx_ld(0, (signed int const *)__P)); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_epi64 (__m128i_u const *__P) +{ + return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_si128 (__m128i *__P, __m128i __B) +{ + vec_st ((__v16qu) __B, 0, (__v16qu*)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_si128 (__m128i_u *__P, __m128i __B) +{ + *__P = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_epi64 (__m128i_u *__P, __m128i __B) +{ + *(long long *)__P = ((__v2di)__B)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi64_pi64 (__m128i_u __B) +{ + return (__m64) ((__v2di)__B)[0]; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movpi64_epi64 (__m64 __A) +{ + return _mm_set_epi64 ((__m64)0LL, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_epi64 (__m128i __A) +{ + return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]); +} + +/* Create an undefined vector. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_si128 (void) +{ + __m128i __Y = __Y; + return __Y; +} + +/* Create a vector of zeros. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_si128 (void) +{ + return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 }; +} + +#ifdef _ARCH_PWR8 +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_pd (__m128i __A) +{ + __v2di val; + /* For LE need to generate Vector Unpack Low Signed Word. + Which is generated from unpackh. */ + val = (__v2di)vec_unpackh ((__v4si)__A); + + return (__m128d)vec_ctf (val, 0); +} +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_ps (__m128i __A) +{ + return ((__m128)vec_ctf((__v4si)__A, 0)); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_epi32 (__m128d __A) +{ + __v2df rounded = vec_rint (__A); + __v4si result, temp; + const __v4si vzero = + { 0, 0, 0, 0 }; + + /* VSX Vector truncate Double-Precision to integer and Convert to + Signed Integer Word format with Saturate. */ + __asm__( + "xvcvdpsxws %x0,%x1" + : "=wa" (temp) + : "wa" (rounded) + : ); + +#ifdef _ARCH_PWR8 + temp = vec_mergeo (temp, temp); + result = (__v4si) vec_vpkudum ((__vector long long) temp, + (__vector long long) vzero); +#else + { + const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b, + 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f }; + result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm); + } +#endif + return (__m128i) result; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_pi32 (__m128d __A) +{ + __m128i result = _mm_cvtpd_epi32(__A); + + return (__m64) result[0]; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_ps (__m128d __A) +{ + __v4sf result; + __v4si temp; + const __v4si vzero = { 0, 0, 0, 0 }; + + __asm__( + "xvcvdpsp %x0,%x1" + : "=wa" (temp) + : "wa" (__A) + : ); + +#ifdef _ARCH_PWR8 + temp = vec_mergeo (temp, temp); + result = (__v4sf) vec_vpkudum ((__vector long long) temp, + (__vector long long) vzero); +#else + { + const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b, + 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f }; + result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm); + } +#endif + return ((__m128)result); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_epi32 (__m128d __A) +{ + __v4si result; + __v4si temp; + const __v4si vzero = { 0, 0, 0, 0 }; + + /* VSX Vector truncate Double-Precision to integer and Convert to + Signed Integer Word format with Saturate. */ + __asm__( + "xvcvdpsxws %x0,%x1" + : "=wa" (temp) + : "wa" (__A) + : ); + +#ifdef _ARCH_PWR8 + temp = vec_mergeo (temp, temp); + result = (__v4si) vec_vpkudum ((__vector long long) temp, + (__vector long long) vzero); +#else + { + const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b, + 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f }; + result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm); + } +#endif + + return ((__m128i) result); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_pi32 (__m128d __A) +{ + __m128i result = _mm_cvttpd_epi32 (__A); + + return (__m64) result[0]; +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si32 (__m128i __A) +{ + return ((__v4si)__A)[0]; +} + +#ifdef _ARCH_PWR8 +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32_pd (__m64 __A) +{ + __v4si temp; + __v2di tmp2; + __v2df result; + + temp = (__v4si)vec_splats (__A); + tmp2 = (__v2di)vec_unpackl (temp); + result = vec_ctf ((__vector signed long long) tmp2, 0); + return (__m128d)result; +} +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_epi32 (__m128 __A) +{ + __v4sf rounded; + __v4si result; + + rounded = vec_rint((__v4sf) __A); + result = vec_cts (rounded, 0); + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_epi32 (__m128 __A) +{ + __v4si result; + + result = vec_cts ((__v4sf) __A, 0); + return (__m128i) result; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pd (__m128 __A) +{ + /* Check if vec_doubleh is defined by . If so use that. */ +#ifdef vec_doubleh + return (__m128d) vec_doubleh ((__v4sf)__A); +#else + /* Otherwise the compiler is not current and so need to generate the + equivalent code. */ + __v4sf a = (__v4sf)__A; + __v4sf temp; + __v2df result; +#ifdef __LITTLE_ENDIAN__ + /* The input float values are in elements {[0], [1]} but the convert + instruction needs them in elements {[1], [3]}, So we use two + shift left double vector word immediates to get the elements + lined up. */ + temp = __builtin_vsx_xxsldwi (a, a, 3); + temp = __builtin_vsx_xxsldwi (a, temp, 2); +#else + /* The input float values are in elements {[0], [1]} but the convert + instruction needs them in elements {[0], [2]}, So we use two + shift left double vector word immediates to get the elements + lined up. */ + temp = vec_vmrghw (a, a); +#endif + __asm__( + " xvcvspdp %x0,%x1" + : "=wa" (result) + : "wa" (temp) + : ); + return (__m128d) result; +#endif +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si32 (__m128d __A) +{ + __v2df rounded = vec_rint((__v2df) __A); + int result = ((__v2df)rounded)[0]; + + return result; +} +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si64 (__m128d __A) +{ + __v2df rounded = vec_rint ((__v2df) __A ); + long long result = ((__v2df) rounded)[0]; + + return result; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si64x (__m128d __A) +{ + return _mm_cvtsd_si64 ((__v2df)__A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si32 (__m128d __A) +{ + int result = ((__v2df)__A)[0]; + + return result; +} + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si64 (__m128d __A) +{ + long long result = ((__v2df)__A)[0]; + + return result; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si64x (__m128d __A) +{ + return _mm_cvttsd_si64 (__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_ss (__m128 __A, __m128d __B) +{ + __v4sf result = (__v4sf)__A; + +#ifdef __LITTLE_ENDIAN__ + __v4sf temp_s; + /* Copy double element[0] to element [1] for conversion. */ + __v2df temp_b = vec_splat((__v2df)__B, 0); + + /* Pre-rotate __A left 3 (logically right 1) elements. */ + result = __builtin_vsx_xxsldwi (result, result, 3); + /* Convert double to single float scalar in a vector. */ + __asm__( + "xscvdpsp %x0,%x1" + : "=wa" (temp_s) + : "wa" (temp_b) + : ); + /* Shift the resulting scalar into vector element [0]. */ + result = __builtin_vsx_xxsldwi (result, temp_s, 1); +#else + result [0] = ((__v2df)__B)[0]; +#endif + return (__m128) result; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_sd (__m128d __A, int __B) +{ + __v2df result = (__v2df)__A; + double db = __B; + result [0] = db; + return (__m128d)result; +} + +/* Intel intrinsic. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_sd (__m128d __A, long long __B) +{ + __v2df result = (__v2df)__A; + double db = __B; + result [0] = db; + return (__m128d)result; +} + +/* Microsoft intrinsic. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_sd (__m128d __A, long long __B) +{ + return _mm_cvtsi64_sd (__A, __B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_sd (__m128d __A, __m128 __B) +{ +#ifdef __LITTLE_ENDIAN__ + /* Use splat to move element [0] into position for the convert. */ + __v4sf temp = vec_splat ((__v4sf)__B, 0); + __v2df res; + /* Convert single float scalar to double in a vector. */ + __asm__( + "xscvspdp %x0,%x1" + : "=wa" (res) + : "wa" (temp) + : ); + return (__m128d) vec_mergel (res, (__v2df)__A); +#else + __v2df res = (__v2df)__A; + res [0] = ((__v4sf)__B) [0]; + return (__m128d) res; +#endif +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) +{ + __vector double result; + const int litmsk = __mask & 0x3; + + if (litmsk == 0) + result = vec_mergeh (__A, __B); +#if __GNUC__ < 6 + else if (litmsk == 1) + result = vec_xxpermdi (__B, __A, 2); + else if (litmsk == 2) + result = vec_xxpermdi (__B, __A, 1); +#else + else if (litmsk == 1) + result = vec_xxpermdi (__A, __B, 2); + else if (litmsk == 2) + result = vec_xxpermdi (__A, __B, 1); +#endif + else + result = vec_mergel (__A, __B); + + return result; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pd (__m128d __A, __m128d __B) +{ + return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pd (__m128d __A, __m128d __B) +{ + return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadh_pd (__m128d __A, double const *__B) +{ + __v2df result = (__v2df)__A; + result [1] = *__B; + return (__m128d)result; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_pd (__m128d __A, double const *__B) +{ + __v2df result = (__v2df)__A; + result [0] = *__B; + return (__m128d)result; +} + +#ifdef _ARCH_PWR8 +/* Intrinsic functions that require PowerISA 2.07 minimum. */ + +/* Creates a 2-bit mask from the most significant bits of the DPFP values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pd (__m128d __A) +{ + __vector unsigned long long result; + static const __vector unsigned int perm_mask = + { +#ifdef __LITTLE_ENDIAN__ + 0x80800040, 0x80808080, 0x80808080, 0x80808080 +#else + 0x80808080, 0x80808080, 0x80808080, 0x80804000 +#endif + }; + + result = ((__vector unsigned long long) + vec_vbpermq ((__vector unsigned char) __A, + (__vector unsigned char) perm_mask)); + +#ifdef __LITTLE_ENDIAN__ + return result[1]; +#else + return result[0]; +#endif +} +#endif /* _ARCH_PWR8 */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packus_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergel ((__vector long long) __A, + (__vector long long) __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_mergeh ((__vector long long) __A, + (__vector long long) __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qu)__A + (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hu)__A + (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4su)__A + (__v4su)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A + (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qu)__A - (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hu)__A - (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4su)__A - (__v4su)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A - (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd_epi16 (__m128i __A, __m128i __B) +{ + __vector signed int zero = {0, 0, 0, 0}; + + return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_epi16 (__m128i __A, __m128i __B) +{ + __vector signed int w0, w1; + + __vector unsigned char xform1 = { +#ifdef __LITTLE_ENDIAN__ + 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, + 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F +#else + 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, + 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D +#endif + }; + + w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B); + w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B); + return (__m128i) vec_perm (w0, w1, xform1); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hi)__A * (__v8hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_su32 (__m64 __A, __m64 __B) +{ + unsigned int a = __A; + unsigned int b = __B; + + return ((__m64)a * (__m64)b); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_epu32 (__m128i __A, __m128i __B) +{ +#if __GNUC__ < 8 + __v2du result; + +#ifdef __LITTLE_ENDIAN__ + /* VMX Vector Multiply Odd Unsigned Word. */ + __asm__( + "vmulouw %0,%1,%2" + : "=v" (result) + : "v" (__A), "v" (__B) + : ); +#else + /* VMX Vector Multiply Even Unsigned Word. */ + __asm__( + "vmuleuw %0,%1,%2" + : "=v" (result) + : "v" (__A), "v" (__B) + : ); +#endif + return (__m128i) result; +#else + return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B); +#endif +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi16 (__m128i __A, int __B) +{ + __v8hu lshift; + __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + if (__B >= 0 && __B < 16) + { + if (__builtin_constant_p(__B)) + lshift = (__v8hu) vec_splat_s16(__B); + else + lshift = vec_splats ((unsigned short) __B); + + result = vec_sl ((__v8hi) __A, lshift); + } + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi32 (__m128i __A, int __B) +{ + __v4su lshift; + __v4si result = { 0, 0, 0, 0 }; + + if (__B >= 0 && __B < 32) + { + if (__builtin_constant_p(__B) && __B < 16) + lshift = (__v4su) vec_splat_s32(__B); + else + lshift = vec_splats ((unsigned int) __B); + + result = vec_sl ((__v4si) __A, lshift); + } + + return (__m128i) result; +} + +#ifdef _ARCH_PWR8 +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi64 (__m128i __A, int __B) +{ + __v2du lshift; + __v2di result = { 0, 0 }; + + if (__B >= 0 && __B < 64) + { + if (__builtin_constant_p(__B) && __B < 16) + lshift = (__v2du) vec_splat_s32(__B); + else + lshift = (__v2du) vec_splats ((unsigned int) __B); + + result = vec_sl ((__v2di) __A, lshift); + } + + return (__m128i) result; +} +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi16 (__m128i __A, int __B) +{ + __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 }; + __v8hi result; + + if (__B < 16) + { + if (__builtin_constant_p(__B)) + rshift = (__v8hu) vec_splat_s16(__B); + else + rshift = vec_splats ((unsigned short) __B); + } + result = vec_sra ((__v8hi) __A, rshift); + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi32 (__m128i __A, int __B) +{ + __v4su rshift = { 31, 31, 31, 31 }; + __v4si result; + + if (__B < 32) + { + if (__builtin_constant_p(__B)) + { + if (__B < 16) + rshift = (__v4su) vec_splat_s32(__B); + else + rshift = (__v4su) vec_splats((unsigned int)__B); + } + else + rshift = vec_splats ((unsigned int) __B); + } + result = vec_sra ((__v4si) __A, rshift); + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_bslli_si128 (__m128i __A, const int __N) +{ + __v16qu result; + const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + if (__N < 16) + result = vec_sld ((__v16qu) __A, zeros, __N); + else + result = zeros; + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_bsrli_si128 (__m128i __A, const int __N) +{ + __v16qu result; + const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + if (__N < 16) +#ifdef __LITTLE_ENDIAN__ + if (__builtin_constant_p(__N)) + /* Would like to use Vector Shift Left Double by Octet + Immediate here to use the immediate form and avoid + load of __N * 8 value into a separate VR. */ + result = vec_sld (zeros, (__v16qu) __A, (16 - __N)); + else +#endif + { + __v16qu shift = vec_splats((unsigned char)(__N*8)); +#ifdef __LITTLE_ENDIAN__ + result = vec_sro ((__v16qu)__A, shift); +#else + result = vec_slo ((__v16qu)__A, shift); +#endif + } + else + result = zeros; + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_si128 (__m128i __A, const int __N) +{ + return _mm_bsrli_si128 (__A, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_si128 (__m128i __A, const int _imm5) +{ + __v16qu result; + const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + if (_imm5 < 16) +#ifdef __LITTLE_ENDIAN__ + result = vec_sld ((__v16qu) __A, zeros, _imm5); +#else + result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5)); +#endif + else + result = zeros; + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + +_mm_srli_epi16 (__m128i __A, int __B) +{ + __v8hu rshift; + __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + if (__B < 16) + { + if (__builtin_constant_p(__B)) + rshift = (__v8hu) vec_splat_s16(__B); + else + rshift = vec_splats ((unsigned short) __B); + + result = vec_sr ((__v8hi) __A, rshift); + } + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi32 (__m128i __A, int __B) +{ + __v4su rshift; + __v4si result = { 0, 0, 0, 0 }; + + if (__B < 32) + { + if (__builtin_constant_p(__B)) + { + if (__B < 16) + rshift = (__v4su) vec_splat_s32(__B); + else + rshift = (__v4su) vec_splats((unsigned int)__B); + } + else + rshift = vec_splats ((unsigned int) __B); + + result = vec_sr ((__v4si) __A, rshift); + } + + return (__m128i) result; +} + +#ifdef _ARCH_PWR8 +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi64 (__m128i __A, int __B) +{ + __v2du rshift; + __v2di result = { 0, 0 }; + + if (__B < 64) + { + if (__builtin_constant_p(__B)) + { + if (__B < 16) + rshift = (__v2du) vec_splat_s32(__B); + else + rshift = (__v2du) vec_splats((unsigned long long)__B); + } + else + rshift = (__v2du) vec_splats ((unsigned int) __B); + + result = vec_sr ((__v2di) __A, rshift); + } + + return (__m128i) result; +} +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi16 (__m128i __A, __m128i __B) +{ + __v8hu lshift; + __vector __bool short shmask; + const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 }; + __v8hu result; + +#ifdef __LITTLE_ENDIAN__ + lshift = vec_splat ((__v8hu) __B, 0); +#else + lshift = vec_splat ((__v8hu) __B, 3); +#endif + shmask = vec_cmple (lshift, shmax); + result = vec_sl ((__v8hu) __A, lshift); + result = vec_sel ((__v8hu) shmask, result, shmask); + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi32 (__m128i __A, __m128i __B) +{ + __v4su lshift; + __vector __bool int shmask; + const __v4su shmax = { 32, 32, 32, 32 }; + __v4su result; +#ifdef __LITTLE_ENDIAN__ + lshift = vec_splat ((__v4su) __B, 0); +#else + lshift = vec_splat ((__v4su) __B, 1); +#endif + shmask = vec_cmplt (lshift, shmax); + result = vec_sl ((__v4su) __A, lshift); + result = vec_sel ((__v4su) shmask, result, shmask); + + return (__m128i) result; +} + +#ifdef _ARCH_PWR8 +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi64 (__m128i __A, __m128i __B) +{ + __v2du lshift; + __vector __bool long long shmask; + const __v2du shmax = { 64, 64 }; + __v2du result; + + lshift = vec_splat ((__v2du) __B, 0); + shmask = vec_cmplt (lshift, shmax); + result = vec_sl ((__v2du) __A, lshift); + result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask); + + return (__m128i) result; +} +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi16 (__m128i __A, __m128i __B) +{ + const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 }; + __v8hu rshift; + __v8hi result; + +#ifdef __LITTLE_ENDIAN__ + rshift = vec_splat ((__v8hu)__B, 0); +#else + rshift = vec_splat ((__v8hu)__B, 3); +#endif + rshift = vec_min (rshift, rshmax); + result = vec_sra ((__v8hi) __A, rshift); + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi32 (__m128i __A, __m128i __B) +{ + const __v4su rshmax = { 31, 31, 31, 31 }; + __v4su rshift; + __v4si result; + +#ifdef __LITTLE_ENDIAN__ + rshift = vec_splat ((__v4su)__B, 0); +#else + rshift = vec_splat ((__v4su)__B, 1); +#endif + rshift = vec_min (rshift, rshmax); + result = vec_sra ((__v4si) __A, rshift); + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi16 (__m128i __A, __m128i __B) +{ + __v8hu rshift; + __vector __bool short shmask; + const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 }; + __v8hu result; + +#ifdef __LITTLE_ENDIAN__ + rshift = vec_splat ((__v8hu) __B, 0); +#else + rshift = vec_splat ((__v8hu) __B, 3); +#endif + shmask = vec_cmple (rshift, shmax); + result = vec_sr ((__v8hu) __A, rshift); + result = vec_sel ((__v8hu) shmask, result, shmask); + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi32 (__m128i __A, __m128i __B) +{ + __v4su rshift; + __vector __bool int shmask; + const __v4su shmax = { 32, 32, 32, 32 }; + __v4su result; + +#ifdef __LITTLE_ENDIAN__ + rshift = vec_splat ((__v4su) __B, 0); +#else + rshift = vec_splat ((__v4su) __B, 1); +#endif + shmask = vec_cmplt (rshift, shmax); + result = vec_sr ((__v4su) __A, rshift); + result = vec_sel ((__v4su) shmask, result, shmask); + + return (__m128i) result; +} + +#ifdef _ARCH_PWR8 +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi64 (__m128i __A, __m128i __B) +{ + __v2du rshift; + __vector __bool long long shmask; + const __v2du shmax = { 64, 64 }; + __v2du result; + + rshift = vec_splat ((__v2du) __B, 0); + shmask = vec_cmplt (rshift, shmax); + result = vec_sr ((__v2du) __A, rshift); + result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask); + + return (__m128i) result; +} +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_pd (__m128d __A, __m128d __B) +{ + return (vec_and ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_pd (__m128d __A, __m128d __B) +{ + return (vec_andc ((__v2df) __B, (__v2df) __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_pd (__m128d __A, __m128d __B) +{ + return (vec_or ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_pd (__m128d __A, __m128d __B) +{ + return (vec_xor ((__v2df) __A, (__v2df) __B)); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)vec_and ((__v2di) __A, (__v2di) __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)vec_or ((__v2di) __A, (__v2di) __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi16 (__m128i const __A, int const __N) +{ + return (unsigned short) ((__v8hi)__A)[__N & 7]; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi16 (__m128i const __A, int const __D, int const __N) +{ + __v8hi result = (__v8hi)__A; + + result [(__N & 7)] = __D; + + return (__m128i) result; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B); +} + + +#ifdef _ARCH_PWR8 +/* Intrinsic functions that require PowerISA 2.07 minimum. */ + +/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_epi8 (__m128i __A) +{ + __vector unsigned long long result; + static const __vector unsigned char perm_mask = + { + 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40, + 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00 + }; + + result = ((__vector unsigned long long) + vec_vbpermq ((__vector unsigned char) __A, + (__vector unsigned char) perm_mask)); + +#ifdef __LITTLE_ENDIAN__ + return result[1]; +#else + return result[0]; +#endif +} +#endif /* _ARCH_PWR8 */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_epu16 (__m128i __A, __m128i __B) +{ + __v4su w0, w1; + __v16qu xform1 = { +#ifdef __LITTLE_ENDIAN__ + 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, + 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F +#else + 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, + 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D +#endif + }; + + w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B); + w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B); + return (__m128i) vec_perm (w0, w1, xform1); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shufflehi_epi16 (__m128i __A, const int __mask) +{ + unsigned long element_selector_98 = __mask & 0x03; + unsigned long element_selector_BA = (__mask >> 2) & 0x03; + unsigned long element_selector_DC = (__mask >> 4) & 0x03; + unsigned long element_selector_FE = (__mask >> 6) & 0x03; + static const unsigned short permute_selectors[4] = + { +#ifdef __LITTLE_ENDIAN__ + 0x0908, 0x0B0A, 0x0D0C, 0x0F0E +#else + 0x0809, 0x0A0B, 0x0C0D, 0x0E0F +#endif + }; + __v2du pmask = +#ifdef __LITTLE_ENDIAN__ + { 0x1716151413121110UL, 0UL}; +#else + { 0x1011121314151617UL, 0UL}; +#endif + __m64_union t; + __v2du a, r; + + t.as_short[0] = permute_selectors[element_selector_98]; + t.as_short[1] = permute_selectors[element_selector_BA]; + t.as_short[2] = permute_selectors[element_selector_DC]; + t.as_short[3] = permute_selectors[element_selector_FE]; + pmask[1] = t.as_m64; + a = (__v2du)__A; + r = vec_perm (a, a, (__vector unsigned char)pmask); + return (__m128i) r; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shufflelo_epi16 (__m128i __A, const int __mask) +{ + unsigned long element_selector_10 = __mask & 0x03; + unsigned long element_selector_32 = (__mask >> 2) & 0x03; + unsigned long element_selector_54 = (__mask >> 4) & 0x03; + unsigned long element_selector_76 = (__mask >> 6) & 0x03; + static const unsigned short permute_selectors[4] = + { +#ifdef __LITTLE_ENDIAN__ + 0x0100, 0x0302, 0x0504, 0x0706 +#else + 0x0001, 0x0203, 0x0405, 0x0607 +#endif + }; + __v2du pmask = +#ifdef __LITTLE_ENDIAN__ + { 0UL, 0x1f1e1d1c1b1a1918UL}; +#else + { 0UL, 0x18191a1b1c1d1e1fUL}; +#endif + __m64_union t; + __v2du a, r; + t.as_short[0] = permute_selectors[element_selector_10]; + t.as_short[1] = permute_selectors[element_selector_32]; + t.as_short[2] = permute_selectors[element_selector_54]; + t.as_short[3] = permute_selectors[element_selector_76]; + pmask[0] = t.as_m64; + a = (__v2du)__A; + r = vec_perm (a, a, (__vector unsigned char)pmask); + return (__m128i) r; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_epi32 (__m128i __A, const int __mask) +{ + unsigned long element_selector_10 = __mask & 0x03; + unsigned long element_selector_32 = (__mask >> 2) & 0x03; + unsigned long element_selector_54 = (__mask >> 4) & 0x03; + unsigned long element_selector_76 = (__mask >> 6) & 0x03; + static const unsigned int permute_selectors[4] = + { +#ifdef __LITTLE_ENDIAN__ + 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C +#else + 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F +#endif + }; + __v4su t; + + t[0] = permute_selectors[element_selector_10]; + t[1] = permute_selectors[element_selector_32]; + t[2] = permute_selectors[element_selector_54] + 0x10101010; + t[3] = permute_selectors[element_selector_76] + 0x10101010; + return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) +{ + __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL}; + __v16qu mask, tmp; + __m128i_u *p = (__m128i_u*)__C; + + tmp = (__v16qu)_mm_loadu_si128(p); + mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit); + tmp = vec_sel (tmp, (__v16qu)__A, mask); + _mm_storeu_si128 (p, (__m128i)tmp); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B); +} + + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sad_epu8 (__m128i __A, __m128i __B) +{ + __v16qu a, b; + __v16qu vmin, vmax, vabsdiff; + __v4si vsum; + const __v4su zero = { 0, 0, 0, 0 }; + __v4si result; + + a = (__v16qu) __A; + b = (__v16qu) __B; + vmin = vec_min (a, b); + vmax = vec_max (a, b); + vabsdiff = vec_sub (vmax, vmin); + /* Sum four groups of bytes into integers. */ + vsum = (__vector signed int) vec_sum4s (vabsdiff, zero); + /* Sum across four integers with two integer results. */ + result = vec_sum2s (vsum, (__vector signed int) zero); + /* Rotate the sums into the correct position. */ +#ifdef __LITTLE_ENDIAN__ + result = vec_sld (result, result, 4); +#else + result = vec_sld (result, result, 6); +#endif + /* Rotate the sums into the correct position. */ + return (__m128i) result; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si32 (int *__A, int __B) +{ + /* Use the data cache block touch for store transient. */ + __asm__ ( + "dcbtstt 0,%0" + : + : "b" (__A) + : "memory" + ); + *__A = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si64 (long long int *__A, long long int __B) +{ + /* Use the data cache block touch for store transient. */ + __asm__ ( + " dcbtstt 0,%0" + : + : "b" (__A) + : "memory" + ); + *__A = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si128 (__m128i *__A, __m128i __B) +{ + /* Use the data cache block touch for store transient. */ + __asm__ ( + "dcbtstt 0,%0" + : + : "b" (__A) + : "memory" + ); + *__A = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_pd (double *__A, __m128d __B) +{ + /* Use the data cache block touch for store transient. */ + __asm__ ( + "dcbtstt 0,%0" + : + : "b" (__A) + : "memory" + ); + *(__m128d*)__A = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clflush (void const *__A) +{ + /* Use the data cache block flush. */ + __asm__ ( + "dcbf 0,%0" + : + : "b" (__A) + : "memory" + ); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lfence (void) +{ + /* Use light weight sync for load to load ordering. */ + __atomic_thread_fence (__ATOMIC_RELEASE); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mfence (void) +{ + /* Use heavy weight sync for any to any ordering. */ + __atomic_thread_fence (__ATOMIC_SEQ_CST); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_si128 (int __A) +{ + return _mm_set_epi32 (0, 0, 0, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si128 (long long __A) +{ + return __extension__ (__m128i)(__v2di){ __A, 0LL }; +} + +/* Microsoft intrinsic. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_si128 (long long __A) +{ + return __extension__ (__m128i)(__v2di){ __A, 0LL }; +} + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_ps(__m128d __A) +{ + return (__m128) __A; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_si128(__m128d __A) +{ + return (__m128i) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_pd(__m128 __A) +{ + return (__m128d) __A; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_si128(__m128 __A) +{ + return (__m128i) __A; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_ps(__m128i __A) +{ + return (__m128) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_pd(__m128i __A) +{ + return (__m128d) __A; +} + +#else +#include_next +#endif /* defined(__linux__) && defined(__ppc64__) */ + +#endif /* EMMINTRIN_H_ */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/mm_malloc.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/mm_malloc.h new file mode 100644 index 0000000..24b14c8 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/mm_malloc.h @@ -0,0 +1,50 @@ +/*===---- mm_malloc.h - Implementation of _mm_malloc and _mm_free ----------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef _MM_MALLOC_H_INCLUDED +#define _MM_MALLOC_H_INCLUDED + +#if defined(__linux__) && defined(__ppc64__) + +#include + +/* We can't depend on since the prototype of posix_memalign + may not be visible. */ +#ifndef __cplusplus +extern int posix_memalign (void **, size_t, size_t); +#else +extern "C" int posix_memalign (void **, size_t, size_t) throw (); +#endif + +static __inline void * +_mm_malloc (size_t size, size_t alignment) +{ + /* PowerPC64 ELF V2 ABI requires quadword alignment. */ + size_t vec_align = sizeof (__vector float); + void *ptr; + + if (alignment < vec_align) + alignment = vec_align; + if (posix_memalign (&ptr, alignment, size) == 0) + return ptr; + else + return NULL; +} + +static __inline void +_mm_free (void * ptr) +{ + free (ptr); +} + +#else +#include_next +#endif + +#endif /* _MM_MALLOC_H_INCLUDED */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/mmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/mmintrin.h new file mode 100644 index 0000000..c55c447 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/mmintrin.h @@ -0,0 +1,1450 @@ +/*===---- mmintrin.h - Implementation of MMX intrinsics on PowerPC ---------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header file is to help porting code using Intel intrinsics + explicitly from x86_64 to powerpc64/powerpc64le. + + Since PowerPC target doesn't support native 64-bit vector type, we + typedef __m64 to 64-bit unsigned long long in MMX intrinsics, which + works well for _si64 and some _pi32 operations. + + For _pi16 and _pi8 operations, it's better to transfer __m64 into + 128-bit PowerPC vector first. Power8 introduced direct register + move instructions which helps for more efficient implementation. + + It's user's responsibility to determine if the results of such port + are acceptable or further changes are needed. Please note that much + code using Intel intrinsics CAN BE REWRITTEN in more portable and + efficient standard C or GNU C extensions with 64-bit scalar + operations, or 128-bit SSE/Altivec operations, which are more + recommended. */ +#error \ + "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error." +#endif + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#if defined(__linux__) && defined(__ppc64__) + +#include +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef __attribute__((__aligned__(8))) unsigned long long __m64; + +typedef __attribute__((__aligned__(8))) union { + __m64 as_m64; + char as_char[8]; + signed char as_signed_char[8]; + short as_short[4]; + int as_int[2]; + long long as_long_long; + float as_float[2]; + double as_double; +} __m64_union; + +/* Empty the multimedia state. */ +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_empty(void) { + /* nothing to do on PowerPC. */ +} + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_empty(void) { + /* nothing to do on PowerPC. */ +} + +/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi32_si64(int __i) { + return (__m64)(unsigned int)__i; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_from_int(int __i) { + return _mm_cvtsi32_si64(__i); +} + +/* Convert the lower 32 bits of the __m64 object into an integer. */ +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64_si32(__m64 __i) { + return ((int)__i); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_to_int(__m64 __i) { + return _mm_cvtsi64_si32(__i); +} + +/* Convert I to a __m64 object. */ + +/* Intel intrinsic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_from_int64(long long __i) { + return (__m64)__i; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64_m64(long long __i) { + return (__m64)__i; +} + +/* Microsoft intrinsic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64x_si64(long long __i) { + return (__m64)__i; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi64x(long long __i) { + return (__m64)__i; +} + +/* Convert the __m64 object to a 64bit integer. */ + +/* Intel intrinsic. */ +extern __inline long long + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_to_int64(__m64 __i) { + return (long long)__i; +} + +extern __inline long long + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtm64_si64(__m64 __i) { + return (long long)__i; +} + +/* Microsoft intrinsic. */ +extern __inline long long + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64_si64x(__m64 __i) { + return (long long)__i; +} + +#ifdef _ARCH_PWR8 +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_packs_pi16(__m64 __m1, __m64 __m2) { + __vector signed short vm1; + __vector signed char vresult; + + vm1 = (__vector signed short)(__vector unsigned long long) +#ifdef __LITTLE_ENDIAN__ + {__m1, __m2}; +#else + {__m2, __m1}; +#endif + vresult = vec_packs(vm1, vm1); + return (__m64)((__vector long long)vresult)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_packsswb(__m64 __m1, __m64 __m2) { + return _mm_packs_pi16(__m1, __m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_packs_pi32(__m64 __m1, __m64 __m2) { + __vector signed int vm1; + __vector signed short vresult; + + vm1 = (__vector signed int)(__vector unsigned long long) +#ifdef __LITTLE_ENDIAN__ + {__m1, __m2}; +#else + {__m2, __m1}; +#endif + vresult = vec_packs(vm1, vm1); + return (__m64)((__vector long long)vresult)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_packssdw(__m64 __m1, __m64 __m2) { + return _mm_packs_pi32(__m1, __m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_packs_pu16(__m64 __m1, __m64 __m2) { + __vector unsigned char r; + __vector signed short vm1 = (__vector signed short)(__vector long long) +#ifdef __LITTLE_ENDIAN__ + {__m1, __m2}; +#else + {__m2, __m1}; +#endif + const __vector signed short __zero = {0}; + __vector __bool short __select = vec_cmplt(vm1, __zero); + r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1); + __vector __bool char packsel = vec_pack(__select, __select); + r = vec_sel(r, (const __vector unsigned char)__zero, packsel); + return (__m64)((__vector long long)r)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_packuswb(__m64 __m1, __m64 __m2) { + return _mm_packs_pu16(__m1, __m2); +} +#endif /* end ARCH_PWR8 */ + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_mergel(a, b); + return (__m64)((__vector long long)c)[1]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[4]; + res.as_char[1] = m2.as_char[4]; + res.as_char[2] = m1.as_char[5]; + res.as_char[3] = m2.as_char[5]; + res.as_char[4] = m1.as_char[6]; + res.as_char[5] = m2.as_char[6]; + res.as_char[6] = m1.as_char[7]; + res.as_char[7] = m2.as_char[7]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckhbw(__m64 __m1, __m64 __m2) { + return _mm_unpackhi_pi8(__m1, __m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[2]; + res.as_short[1] = m2.as_short[2]; + res.as_short[2] = m1.as_short[3]; + res.as_short[3] = m2.as_short[3]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckhwd(__m64 __m1, __m64 __m2) { + return _mm_unpackhi_pi16(__m1, __m2); +} +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[1]; + res.as_int[1] = m2.as_int[1]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckhdq(__m64 __m1, __m64 __m2) { + return _mm_unpackhi_pi32(__m1, __m2); +} +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_mergel(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[0]; + res.as_char[1] = m2.as_char[0]; + res.as_char[2] = m1.as_char[1]; + res.as_char[3] = m2.as_char[1]; + res.as_char[4] = m1.as_char[2]; + res.as_char[5] = m2.as_char[2]; + res.as_char[6] = m1.as_char[3]; + res.as_char[7] = m2.as_char[3]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpcklbw(__m64 __m1, __m64 __m2) { + return _mm_unpacklo_pi8(__m1, __m2); +} +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[0]; + res.as_short[1] = m2.as_short[0]; + res.as_short[2] = m1.as_short[1]; + res.as_short[3] = m2.as_short[1]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpcklwd(__m64 __m1, __m64 __m2) { + return _mm_unpacklo_pi16(__m1, __m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[0]; + res.as_int[1] = m2.as_int[0]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckldq(__m64 __m1, __m64 __m2) { + return _mm_unpacklo_pi32(__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_add(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[0] + m2.as_char[0]; + res.as_char[1] = m1.as_char[1] + m2.as_char[1]; + res.as_char[2] = m1.as_char[2] + m2.as_char[2]; + res.as_char[3] = m1.as_char[3] + m2.as_char[3]; + res.as_char[4] = m1.as_char[4] + m2.as_char[4]; + res.as_char[5] = m1.as_char[5] + m2.as_char[5]; + res.as_char[6] = m1.as_char[6] + m2.as_char[6]; + res.as_char[7] = m1.as_char[7] + m2.as_char[7]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddb(__m64 __m1, __m64 __m2) { + return _mm_add_pi8(__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_add(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[0] + m2.as_short[0]; + res.as_short[1] = m1.as_short[1] + m2.as_short[1]; + res.as_short[2] = m1.as_short[2] + m2.as_short[2]; + res.as_short[3] = m1.as_short[3] + m2.as_short[3]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddw(__m64 __m1, __m64 __m2) { + return _mm_add_pi16(__m1, __m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = vec_add(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[0] + m2.as_int[0]; + res.as_int[1] = m1.as_int[1] + m2.as_int[1]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddd(__m64 __m1, __m64 __m2) { + return _mm_add_pi32(__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_sub(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[0] - m2.as_char[0]; + res.as_char[1] = m1.as_char[1] - m2.as_char[1]; + res.as_char[2] = m1.as_char[2] - m2.as_char[2]; + res.as_char[3] = m1.as_char[3] - m2.as_char[3]; + res.as_char[4] = m1.as_char[4] - m2.as_char[4]; + res.as_char[5] = m1.as_char[5] - m2.as_char[5]; + res.as_char[6] = m1.as_char[6] - m2.as_char[6]; + res.as_char[7] = m1.as_char[7] - m2.as_char[7]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubb(__m64 __m1, __m64 __m2) { + return _mm_sub_pi8(__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_sub(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[0] - m2.as_short[0]; + res.as_short[1] = m1.as_short[1] - m2.as_short[1]; + res.as_short[2] = m1.as_short[2] - m2.as_short[2]; + res.as_short[3] = m1.as_short[3] - m2.as_short[3]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubw(__m64 __m1, __m64 __m2) { + return _mm_sub_pi16(__m1, __m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = vec_sub(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[0] - m2.as_int[0]; + res.as_int[1] = m1.as_int[1] - m2.as_int[1]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubd(__m64 __m1, __m64 __m2) { + return _mm_sub_pi32(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_si64(__m64 __m1, __m64 __m2) { + return (__m1 + __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_si64(__m64 __m1, __m64 __m2) { + return (__m1 - __m2); +} + +/* Shift the 64-bit value in M left by COUNT. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sll_si64(__m64 __m, __m64 __count) { + return (__m << __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllq(__m64 __m, __m64 __count) { + return _mm_sll_si64(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_slli_si64(__m64 __m, const int __count) { + return (__m << __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllqi(__m64 __m, const int __count) { + return _mm_slli_si64(__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srl_si64(__m64 __m, __m64 __count) { + return (__m >> __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlq(__m64 __m, __m64 __count) { + return _mm_srl_si64(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srli_si64(__m64 __m, const int __count) { + return (__m >> __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlqi(__m64 __m, const int __count) { + return _mm_srli_si64(__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_and_si64(__m64 __m1, __m64 __m2) { + return (__m1 & __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pand(__m64 __m1, __m64 __m2) { + return _mm_and_si64(__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_andnot_si64(__m64 __m1, __m64 __m2) { + return (~__m1 & __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pandn(__m64 __m1, __m64 __m2) { + return _mm_andnot_si64(__m1, __m2); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_or_si64(__m64 __m1, __m64 __m2) { + return (__m1 | __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_por(__m64 __m1, __m64 __m2) { + return _mm_or_si64(__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_xor_si64(__m64 __m1, __m64 __m2) { + return (__m1 ^ __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pxor(__m64 __m1, __m64 __m2) { + return _mm_xor_si64(__m1, __m2); +} + +/* Creates a 64-bit zero. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setzero_si64(void) { + return (__m64)0; +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { +#if defined(_ARCH_PWR6) && defined(__powerpc64__) + __m64 res; + __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :); + return (res); +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0; + res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0; + res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0; + res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0; + res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0; + res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0; + res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0; + res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpeqb(__m64 __m1, __m64 __m2) { + return _mm_cmpeq_pi8(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = (__vector signed char)vec_cmpgt(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0; + res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0; + res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0; + res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0; + res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0; + res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0; + res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0; + res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpgtb(__m64 __m1, __m64 __m2) { + return _mm_cmpgt_pi8(__m1, __m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = (__vector signed short)vec_cmpeq(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0; + res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0; + res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0; + res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpeqw(__m64 __m1, __m64 __m2) { + return _mm_cmpeq_pi16(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = (__vector signed short)vec_cmpgt(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0; + res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0; + res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0; + res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpgtw(__m64 __m1, __m64 __m2) { + return _mm_cmpgt_pi16(__m1, __m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = (__vector signed int)vec_cmpeq(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0; + res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpeqd(__m64 __m1, __m64 __m2) { + return _mm_cmpeq_pi32(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = (__vector signed int)vec_cmpgt(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0; + res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpgtd(__m64 __m1, __m64 __m2) { + return _mm_cmpgt_pi32(__m1, __m2); +} + +#if _ARCH_PWR8 +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pi8(__m64 __m1, __m64 __m2) { + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddsb(__m64 __m1, __m64 __m2) { + return _mm_adds_pi8(__m1, __m2); +} +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddsw(__m64 __m1, __m64 __m2) { + return _mm_adds_pi16(__m1, __m2); +} +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pu8(__m64 __m1, __m64 __m2) { + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddusb(__m64 __m1, __m64 __m2) { + return _mm_adds_pu8(__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pu16(__m64 __m1, __m64 __m2) { + __vector unsigned short a, b, c; + + a = (__vector unsigned short)vec_splats(__m1); + b = (__vector unsigned short)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddusw(__m64 __m1, __m64 __m2) { + return _mm_adds_pu16(__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pi8(__m64 __m1, __m64 __m2) { + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubsb(__m64 __m1, __m64 __m2) { + return _mm_subs_pi8(__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubsw(__m64 __m1, __m64 __m2) { + return _mm_subs_pi16(__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pu8(__m64 __m1, __m64 __m2) { + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubusb(__m64 __m1, __m64 __m2) { + return _mm_subs_pu8(__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pu16(__m64 __m1, __m64 __m2) { + __vector unsigned short a, b, c; + + a = (__vector unsigned short)vec_splats(__m1); + b = (__vector unsigned short)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubusw(__m64 __m1, __m64 __m2) { + return _mm_subs_pu16(__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_madd_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b; + __vector signed int c; + __vector signed int zero = {0, 0, 0, 0}; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_vmsumshm(a, b, zero); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pmaddwd(__m64 __m1, __m64 __m2) { + return _mm_madd_pi16(__m1, __m2); +} +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_mulhi_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b; + __vector signed short c; + __vector signed int w0, w1; + __vector unsigned char xform1 = { +#ifdef __LITTLE_ENDIAN__ + 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A, + 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F +#else + 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00, + 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15 +#endif + }; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + + w0 = vec_vmulesh(a, b); + w1 = vec_vmulosh(a, b); + c = (__vector signed short)vec_perm(w0, w1, xform1); + + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pmulhw(__m64 __m1, __m64 __m2) { + return _mm_mulhi_pi16(__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_mullo_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = a * b; + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pmullw(__m64 __m1, __m64 __m2) { + return _mm_mullo_pi16(__m1, __m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sll_pi16(__m64 __m, __m64 __count) { + __vector signed short m, r; + __vector unsigned short c; + + if (__count <= 15) { + m = (__vector signed short)vec_splats(__m); + c = (__vector unsigned short)vec_splats((unsigned short)__count); + r = vec_sl(m, (__vector unsigned short)c); + return (__m64)((__vector long long)r)[0]; + } else + return (0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllw(__m64 __m, __m64 __count) { + return _mm_sll_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_slli_pi16(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sll_pi16. */ + return _mm_sll_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllwi(__m64 __m, int __count) { + return _mm_slli_pi16(__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sll_pi32(__m64 __m, __m64 __count) { + __m64_union m, res; + + m.as_m64 = __m; + + res.as_int[0] = m.as_int[0] << __count; + res.as_int[1] = m.as_int[1] << __count; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pslld(__m64 __m, __m64 __count) { + return _mm_sll_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_slli_pi32(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sll_pi32. */ + return _mm_sll_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pslldi(__m64 __m, int __count) { + return _mm_slli_pi32(__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sra_pi16(__m64 __m, __m64 __count) { + __vector signed short m, r; + __vector unsigned short c; + + if (__count <= 15) { + m = (__vector signed short)vec_splats(__m); + c = (__vector unsigned short)vec_splats((unsigned short)__count); + r = vec_sra(m, (__vector unsigned short)c); + return (__m64)((__vector long long)r)[0]; + } else + return (0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psraw(__m64 __m, __m64 __count) { + return _mm_sra_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srai_pi16(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sra_pi32. */ + return _mm_sra_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrawi(__m64 __m, int __count) { + return _mm_srai_pi16(__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sra_pi32(__m64 __m, __m64 __count) { + __m64_union m, res; + + m.as_m64 = __m; + + res.as_int[0] = m.as_int[0] >> __count; + res.as_int[1] = m.as_int[1] >> __count; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrad(__m64 __m, __m64 __count) { + return _mm_sra_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srai_pi32(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sra_pi32. */ + return _mm_sra_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psradi(__m64 __m, int __count) { + return _mm_srai_pi32(__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srl_pi16(__m64 __m, __m64 __count) { + __vector unsigned short m, r; + __vector unsigned short c; + + if (__count <= 15) { + m = (__vector unsigned short)vec_splats(__m); + c = (__vector unsigned short)vec_splats((unsigned short)__count); + r = vec_sr(m, (__vector unsigned short)c); + return (__m64)((__vector long long)r)[0]; + } else + return (0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlw(__m64 __m, __m64 __count) { + return _mm_srl_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srli_pi16(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sra_pi32. */ + return _mm_srl_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlwi(__m64 __m, int __count) { + return _mm_srli_pi16(__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srl_pi32(__m64 __m, __m64 __count) { + __m64_union m, res; + + m.as_m64 = __m; + + res.as_int[0] = (unsigned int)m.as_int[0] >> __count; + res.as_int[1] = (unsigned int)m.as_int[1] >> __count; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrld(__m64 __m, __m64 __count) { + return _mm_srl_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srli_pi32(__m64 __m, int __count) { + /* Promote int to long then invoke mm_srl_pi32. */ + return _mm_srl_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrldi(__m64 __m, int __count) { + return _mm_srli_pi32(__m, __count); +} +#endif /* _ARCH_PWR8 */ + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi32(int __i1, int __i0) { + __m64_union res; + + res.as_int[0] = __i0; + res.as_int[1] = __i1; + return (res.as_m64); +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) { + __m64_union res; + + res.as_short[0] = __w0; + res.as_short[1] = __w1; + res.as_short[2] = __w2; + res.as_short[3] = __w3; + return (res.as_m64); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, + char __b2, char __b1, char __b0) { + __m64_union res; + + res.as_char[0] = __b0; + res.as_char[1] = __b1; + res.as_char[2] = __b2; + res.as_char[3] = __b3; + res.as_char[4] = __b4; + res.as_char[5] = __b5; + res.as_char[6] = __b6; + res.as_char[7] = __b7; + return (res.as_m64); +} + +/* Similar, but with the arguments in reverse order. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setr_pi32(int __i0, int __i1) { + __m64_union res; + + res.as_int[0] = __i0; + res.as_int[1] = __i1; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) { + return _mm_set_pi16(__w3, __w2, __w1, __w0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, + char __b5, char __b6, char __b7) { + return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set1_pi32(int __i) { + __m64_union res; + + res.as_int[0] = __i; + res.as_int[1] = __i; + return (res.as_m64); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set1_pi16(short __w) { +#if _ARCH_PWR9 + __vector signed short w; + + w = (__vector signed short)vec_splats(__w); + return (__m64)((__vector long long)w)[0]; +#else + __m64_union res; + + res.as_short[0] = __w; + res.as_short[1] = __w; + res.as_short[2] = __w; + res.as_short[3] = __w; + return (res.as_m64); +#endif +} + +/* Creates a vector of eight 8-bit values, all elements containing B. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set1_pi8(signed char __b) { +#if _ARCH_PWR8 + __vector signed char b; + + b = (__vector signed char)vec_splats(__b); + return (__m64)((__vector long long)b)[0]; +#else + __m64_union res; + + res.as_char[0] = __b; + res.as_char[1] = __b; + res.as_char[2] = __b; + res.as_char[3] = __b; + res.as_char[4] = __b; + res.as_char[5] = __b; + res.as_char[6] = __b; + res.as_char[7] = __b; + return (res.as_m64); +#endif +} + +#else +#include_next +#endif /* defined(__linux__) && defined(__ppc64__) */ + +#endif /* _MMINTRIN_H_INCLUDED */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/pmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/pmmintrin.h new file mode 100644 index 0000000..6d93383 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/pmmintrin.h @@ -0,0 +1,150 @@ +/*===---- pmmintrin.h - Implementation of SSE3 intrinsics on PowerPC -------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header is distributed to simplify porting x86_64 code that + makes explicit use of Intel intrinsics to powerpc64le. + It is the user's responsibility to determine if the results are + acceptable and make additional changes as necessary. + Note that much code that uses Intel intrinsics can be rewritten in + standard C or GNU C extensions, which are more portable and better + optimized across multiple targets. + + In the specific case of X86 SSE3 intrinsics, the PowerPC VMX/VSX ISA + is a good match for most SIMD operations. However the Horizontal + add/sub requires the data pairs be permuted into a separate + registers with vertical even/odd alignment for the operation. + And the addsub operation requires the sign of only the even numbered + elements be flipped (xored with -0.0). + For larger blocks of code using these intrinsic implementations, + the compiler be should be able to schedule instructions to avoid + additional latency. + + In the specific case of the monitor and mwait instructions there are + no direct equivalent in the PowerISA at this time. So those + intrinsics are not implemented. */ +#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning." +#endif + +#ifndef PMMINTRIN_H_ +#define PMMINTRIN_H_ + +#if defined(__linux__) && defined(__ppc64__) + +/* We need definitions from the SSE2 and SSE header files*/ +#include + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_addsub_ps (__m128 __X, __m128 __Y) +{ + const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0}; + __v4sf even_neg_Y = vec_xor(__Y, even_n0); + return (__m128) vec_add (__X, even_neg_Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_addsub_pd (__m128d __X, __m128d __Y) +{ + const __v2df even_n0 = {-0.0, 0.0}; + __v2df even_neg_Y = vec_xor(__Y, even_n0); + return (__m128d) vec_add (__X, even_neg_Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_ps (__m128 __X, __m128 __Y) +{ + __vector unsigned char xform2 = { + 0x00, 0x01, 0x02, 0x03, + 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, + 0x18, 0x19, 0x1A, 0x1B + }; + __vector unsigned char xform1 = { + 0x04, 0x05, 0x06, 0x07, + 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, + 0x1C, 0x1D, 0x1E, 0x1F + }; + return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2), + vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_ps (__m128 __X, __m128 __Y) +{ + __vector unsigned char xform2 = { + 0x00, 0x01, 0x02, 0x03, + 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, + 0x18, 0x19, 0x1A, 0x1B + }; + __vector unsigned char xform1 = { + 0x04, 0x05, 0x06, 0x07, + 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, + 0x1C, 0x1D, 0x1E, 0x1F + }; + return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2), + vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y), + vec_mergel ((__v2df) __X, (__v2df)__Y)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y), + vec_mergel ((__v2df) __X, (__v2df)__Y)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movehdup_ps (__m128 __X) +{ + return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_moveldup_ps (__m128 __X) +{ + return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loaddup_pd (double const *__P) +{ + return (__m128d) vec_splats (*__P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movedup_pd (__m128d __X) +{ + return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0)); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lddqu_si128 (__m128i const *__P) +{ + return (__m128i) (vec_vsx_ld(0, (signed int const *)__P)); +} + +/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait. */ + +#else +#include_next +#endif /* defined(__linux__) && defined(__ppc64__) */ + +#endif /* PMMINTRIN_H_ */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/smmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/smmintrin.h new file mode 100644 index 0000000..64f0c76 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/smmintrin.h @@ -0,0 +1,109 @@ +/*===---- smmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. + + NOTE: This is NOT a complete implementation of the SSE4 intrinsics! */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header is distributed to simplify porting x86_64 code that + makes explicit use of Intel intrinsics to powerp64/powerpc64le. + + It is the user's responsibility to determine if the results are + acceptable and make additional changes as necessary. + + Note that much code that uses Intel intrinsics can be rewritten in + standard C or GNU C extensions, which are more portable and better + optimized across multiple targets. */ +#error \ + "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error." +#endif + +#ifndef SMMINTRIN_H_ +#define SMMINTRIN_H_ + +#if defined(__linux__) && defined(__ppc64__) + +#include +#include + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_extract_epi8(__m128i __X, const int __N) { + return (unsigned char)((__v16qi)__X)[__N & 15]; +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_extract_epi32(__m128i __X, const int __N) { + return ((__v4si)__X)[__N & 3]; +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_extract_epi64(__m128i __X, const int __N) { + return ((__v2di)__X)[__N & 1]; +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_extract_ps(__m128 __X, const int __N) { + return ((__v4si)__X)[__N & 3]; +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) { + __v16qi __charmask = vec_splats((signed char)__imm8); + __charmask = vec_gb(__charmask); + __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask); +#ifdef __BIG_ENDIAN__ + __shortmask = vec_reve(__shortmask); +#endif + return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask); +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) { + const __v16qu __seven = vec_splats((unsigned char)0x07); + __v16qu __lmask = vec_sra((__v16qu)__mask, __seven); + return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask); +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_insert_epi8(__m128i const __A, int const __D, int const __N) { + __v16qi result = (__v16qi)__A; + result[__N & 0xf] = __D; + return (__m128i)result; +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_insert_epi32(__m128i const __A, int const __D, int const __N) { + __v4si result = (__v4si)__A; + result[__N & 3] = __D; + return (__m128i)result; +} + +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) { + __v2di result = (__v2di)__A; + result[__N & 1] = __D; + return (__m128i)result; +} + +#else +#include_next +#endif /* defined(__linux__) && defined(__ppc64__) */ + +#endif /* _SMMINTRIN_H_ */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/tmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/tmmintrin.h new file mode 100644 index 0000000..b5a935d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/tmmintrin.h @@ -0,0 +1,495 @@ +/*===---- tmmintrin.h - Implementation of SSSE3 intrinsics on PowerPC ------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header is distributed to simplify porting x86_64 code that + makes explicit use of Intel intrinsics to powerpc64le. + + It is the user's responsibility to determine if the results are + acceptable and make additional changes as necessary. + + Note that much code that uses Intel intrinsics can be rewritten in + standard C or GNU C extensions, which are more portable and better + optimized across multiple targets. */ +#endif + +#ifndef TMMINTRIN_H_ +#define TMMINTRIN_H_ + +#if defined(__linux__) && defined(__ppc64__) + +#include + +/* We need definitions from the SSE header files. */ +#include + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi16 (__m128i __A) +{ + return (__m128i) vec_abs ((__v8hi) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi32 (__m128i __A) +{ + return (__m128i) vec_abs ((__v4si) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi8 (__m128i __A) +{ + return (__m128i) vec_abs ((__v16qi) __A); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi16 (__m64 __A) +{ + __v8hi __B = (__v8hi) (__v2du) { __A, __A }; + return (__m64) ((__v2du) vec_abs (__B))[0]; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi32 (__m64 __A) +{ + __v4si __B = (__v4si) (__v2du) { __A, __A }; + return (__m64) ((__v2du) vec_abs (__B))[0]; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi8 (__m64 __A) +{ + __v16qi __B = (__v16qi) (__v2du) { __A, __A }; + return (__m64) ((__v2du) vec_abs (__B))[0]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_epi8 (__m128i __A, __m128i __B, const unsigned int __count) +{ + if (__builtin_constant_p (__count) && __count < 16) + { +#ifdef __LITTLE_ENDIAN__ + __A = (__m128i) vec_reve ((__v16qu) __A); + __B = (__m128i) vec_reve ((__v16qu) __B); +#endif + __A = (__m128i) vec_sld ((__v16qu) __B, (__v16qu) __A, __count); +#ifdef __LITTLE_ENDIAN__ + __A = (__m128i) vec_reve ((__v16qu) __A); +#endif + return __A; + } + + if (__count == 0) + return __B; + + if (__count >= 16) + { + if (__count >= 32) + { + const __v16qu zero = { 0 }; + return (__m128i) zero; + } + else + { + const __v16qu __shift = + vec_splats ((unsigned char) ((__count - 16) * 8)); +#ifdef __LITTLE_ENDIAN__ + return (__m128i) vec_sro ((__v16qu) __A, __shift); +#else + return (__m128i) vec_slo ((__v16qu) __A, __shift); +#endif + } + } + else + { + const __v16qu __shiftA = + vec_splats ((unsigned char) ((16 - __count) * 8)); + const __v16qu __shiftB = vec_splats ((unsigned char) (__count * 8)); +#ifdef __LITTLE_ENDIAN__ + __A = (__m128i) vec_slo ((__v16qu) __A, __shiftA); + __B = (__m128i) vec_sro ((__v16qu) __B, __shiftB); +#else + __A = (__m128i) vec_sro ((__v16qu) __A, __shiftA); + __B = (__m128i) vec_slo ((__v16qu) __B, __shiftB); +#endif + return (__m128i) vec_or ((__v16qu) __A, (__v16qu) __B); + } +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_pi8 (__m64 __A, __m64 __B, unsigned int __count) +{ + if (__count < 16) + { + __v2du __C = { __B, __A }; +#ifdef __LITTLE_ENDIAN__ + const __v4su __shift = { __count << 3, 0, 0, 0 }; + __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift); +#else + const __v4su __shift = { 0, 0, 0, __count << 3 }; + __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift); +#endif + return (__m64) __C[0]; + } + else + { + const __m64 __zero = { 0 }; + return __zero; + } +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_epi16 (__m128i __A, __m128i __B) +{ + const __v16qu __P = + { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 }; + const __v16qu __Q = + { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 }; + __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P); + __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q); + return (__m128i) vec_add (__C, __D); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_epi32 (__m128i __A, __m128i __B) +{ + const __v16qu __P = + { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 }; + const __v16qu __Q = + { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 }; + __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P); + __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q); + return (__m128i) vec_add (__C, __D); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pi16 (__m64 __A, __m64 __B) +{ + __v8hi __C = (__v8hi) (__v2du) { __A, __B }; + const __v16qu __P = + { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 }; + const __v16qu __Q = + { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 }; + __v8hi __D = vec_perm (__C, __C, __Q); + __C = vec_perm (__C, __C, __P); + __C = vec_add (__C, __D); + return (__m64) ((__v2du) __C)[1]; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pi32 (__m64 __A, __m64 __B) +{ + __v4si __C = (__v4si) (__v2du) { __A, __B }; + const __v16qu __P = + { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 }; + const __v16qu __Q = + { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 }; + __v4si __D = vec_perm (__C, __C, __Q); + __C = vec_perm (__C, __C, __P); + __C = vec_add (__C, __D); + return (__m64) ((__v2du) __C)[1]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadds_epi16 (__m128i __A, __m128i __B) +{ + __v4si __C = { 0 }, __D = { 0 }; + __C = vec_sum4s ((__v8hi) __A, __C); + __D = vec_sum4s ((__v8hi) __B, __D); + __C = (__v4si) vec_packs (__C, __D); + return (__m128i) __C; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadds_pi16 (__m64 __A, __m64 __B) +{ + const __v4si __zero = { 0 }; + __v8hi __C = (__v8hi) (__v2du) { __A, __B }; + __v4si __D = vec_sum4s (__C, __zero); + __C = vec_packs (__D, __D); + return (__m64) ((__v2du) __C)[1]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_epi16 (__m128i __A, __m128i __B) +{ + const __v16qu __P = + { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 }; + const __v16qu __Q = + { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 }; + __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P); + __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q); + return (__m128i) vec_sub (__C, __D); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_epi32 (__m128i __A, __m128i __B) +{ + const __v16qu __P = + { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 }; + const __v16qu __Q = + { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 }; + __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P); + __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q); + return (__m128i) vec_sub (__C, __D); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pi16 (__m64 __A, __m64 __B) +{ + const __v16qu __P = + { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 }; + const __v16qu __Q = + { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 }; + __v8hi __C = (__v8hi) (__v2du) { __A, __B }; + __v8hi __D = vec_perm (__C, __C, __Q); + __C = vec_perm (__C, __C, __P); + __C = vec_sub (__C, __D); + return (__m64) ((__v2du) __C)[1]; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pi32 (__m64 __A, __m64 __B) +{ + const __v16qu __P = + { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 }; + const __v16qu __Q = + { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 }; + __v4si __C = (__v4si) (__v2du) { __A, __B }; + __v4si __D = vec_perm (__C, __C, __Q); + __C = vec_perm (__C, __C, __P); + __C = vec_sub (__C, __D); + return (__m64) ((__v2du) __C)[1]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubs_epi16 (__m128i __A, __m128i __B) +{ + const __v16qu __P = + { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 }; + const __v16qu __Q = + { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 }; + __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P); + __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q); + return (__m128i) vec_subs (__C, __D); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubs_pi16 (__m64 __A, __m64 __B) +{ + const __v16qu __P = + { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 }; + const __v16qu __Q = + { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 }; + __v8hi __C = (__v8hi) (__v2du) { __A, __B }; + __v8hi __D = vec_perm (__C, __C, __P); + __v8hi __E = vec_perm (__C, __C, __Q); + __C = vec_subs (__D, __E); + return (__m64) ((__v2du) __C)[1]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_epi8 (__m128i __A, __m128i __B) +{ + const __v16qi __zero = { 0 }; + __vector __bool char __select = vec_cmplt ((__v16qi) __B, __zero); + __v16qi __C = vec_perm ((__v16qi) __A, (__v16qi) __A, (__v16qu) __B); + return (__m128i) vec_sel (__C, __zero, __select); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pi8 (__m64 __A, __m64 __B) +{ + const __v16qi __zero = { 0 }; + __v16qi __C = (__v16qi) (__v2du) { __A, __A }; + __v16qi __D = (__v16qi) (__v2du) { __B, __B }; + __vector __bool char __select = vec_cmplt ((__v16qi) __D, __zero); + __C = vec_perm ((__v16qi) __C, (__v16qi) __C, (__v16qu) __D); + __C = vec_sel (__C, __zero, __select); + return (__m64) ((__v2du) (__C))[0]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi8 (__m128i __A, __m128i __B) +{ + const __v16qi __zero = { 0 }; + __v16qi __selectneg = (__v16qi) vec_cmplt ((__v16qi) __B, __zero); + __v16qi __selectpos = + (__v16qi) vec_neg ((__v16qi) vec_cmpgt ((__v16qi) __B, __zero)); + __v16qi __conv = vec_add (__selectneg, __selectpos); + return (__m128i) vec_mul ((__v16qi) __A, (__v16qi) __conv); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi16 (__m128i __A, __m128i __B) +{ + const __v8hi __zero = { 0 }; + __v8hi __selectneg = (__v8hi) vec_cmplt ((__v8hi) __B, __zero); + __v8hi __selectpos = + (__v8hi) vec_neg ((__v8hi) vec_cmpgt ((__v8hi) __B, __zero)); + __v8hi __conv = vec_add (__selectneg, __selectpos); + return (__m128i) vec_mul ((__v8hi) __A, (__v8hi) __conv); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi32 (__m128i __A, __m128i __B) +{ + const __v4si __zero = { 0 }; + __v4si __selectneg = (__v4si) vec_cmplt ((__v4si) __B, __zero); + __v4si __selectpos = + (__v4si) vec_neg ((__v4si) vec_cmpgt ((__v4si) __B, __zero)); + __v4si __conv = vec_add (__selectneg, __selectpos); + return (__m128i) vec_mul ((__v4si) __A, (__v4si) __conv); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi8 (__m64 __A, __m64 __B) +{ + const __v16qi __zero = { 0 }; + __v16qi __C = (__v16qi) (__v2du) { __A, __A }; + __v16qi __D = (__v16qi) (__v2du) { __B, __B }; + __C = (__v16qi) _mm_sign_epi8 ((__m128i) __C, (__m128i) __D); + return (__m64) ((__v2du) (__C))[0]; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi16 (__m64 __A, __m64 __B) +{ + const __v8hi __zero = { 0 }; + __v8hi __C = (__v8hi) (__v2du) { __A, __A }; + __v8hi __D = (__v8hi) (__v2du) { __B, __B }; + __C = (__v8hi) _mm_sign_epi16 ((__m128i) __C, (__m128i) __D); + return (__m64) ((__v2du) (__C))[0]; +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi32 (__m64 __A, __m64 __B) +{ + const __v4si __zero = { 0 }; + __v4si __C = (__v4si) (__v2du) { __A, __A }; + __v4si __D = (__v4si) (__v2du) { __B, __B }; + __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D); + return (__m64) ((__v2du) (__C))[0]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddubs_epi16 (__m128i __A, __m128i __B) +{ + __v8hi __unsigned = vec_splats ((signed short) 0x00ff); + __v8hi __C = vec_and (vec_unpackh ((__v16qi) __A), __unsigned); + __v8hi __D = vec_and (vec_unpackl ((__v16qi) __A), __unsigned); + __v8hi __E = vec_unpackh ((__v16qi) __B); + __v8hi __F = vec_unpackl ((__v16qi) __B); + __C = vec_mul (__C, __E); + __D = vec_mul (__D, __F); + const __v16qu __odds = + { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 }; + const __v16qu __evens = + { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 }; + __E = vec_perm (__C, __D, __odds); + __F = vec_perm (__C, __D, __evens); + return (__m128i) vec_adds (__E, __F); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddubs_pi16 (__m64 __A, __m64 __B) +{ + __v8hi __C = (__v8hi) (__v2du) { __A, __A }; + __C = vec_unpackl ((__v16qi) __C); + const __v8hi __unsigned = vec_splats ((signed short) 0x00ff); + __C = vec_and (__C, __unsigned); + __v8hi __D = (__v8hi) (__v2du) { __B, __B }; + __D = vec_unpackl ((__v16qi) __D); + __D = vec_mul (__C, __D); + const __v16qu __odds = + { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 }; + const __v16qu __evens = + { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 }; + __C = vec_perm (__D, __D, __odds); + __D = vec_perm (__D, __D, __evens); + __C = vec_adds (__C, __D); + return (__m64) ((__v2du) (__C))[0]; +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhrs_epi16 (__m128i __A, __m128i __B) +{ + __v4si __C = vec_unpackh ((__v8hi) __A); + __v4si __D = vec_unpackh ((__v8hi) __B); + __C = vec_mul (__C, __D); + __D = vec_unpackl ((__v8hi) __A); + __v4si __E = vec_unpackl ((__v8hi) __B); + __D = vec_mul (__D, __E); + const __v4su __shift = vec_splats ((unsigned int) 14); + __C = vec_sr (__C, __shift); + __D = vec_sr (__D, __shift); + const __v4si __ones = vec_splats ((signed int) 1); + __C = vec_add (__C, __ones); + __C = vec_sr (__C, (__v4su) __ones); + __D = vec_add (__D, __ones); + __D = vec_sr (__D, (__v4su) __ones); + return (__m128i) vec_pack (__C, __D); +} + +extern __inline __m64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhrs_pi16 (__m64 __A, __m64 __B) +{ + __v4si __C = (__v4si) (__v2du) { __A, __A }; + __C = vec_unpackh ((__v8hi) __C); + __v4si __D = (__v4si) (__v2du) { __B, __B }; + __D = vec_unpackh ((__v8hi) __D); + __C = vec_mul (__C, __D); + const __v4su __shift = vec_splats ((unsigned int) 14); + __C = vec_sr (__C, __shift); + const __v4si __ones = vec_splats ((signed int) 1); + __C = vec_add (__C, __ones); + __C = vec_sr (__C, (__v4su) __ones); + __v8hi __E = vec_pack (__C, __D); + return (__m64) ((__v2du) (__E))[0]; +} + +#else +#include_next +#endif /* defined(__linux__) && defined(__ppc64__) */ + +#endif /* TMMINTRIN_H_ */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/xmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/xmmintrin.h new file mode 100644 index 0000000..0e45b96 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ppc_wrappers/xmmintrin.h @@ -0,0 +1,1843 @@ +/*===---- xmmintrin.h - Implementation of SSE intrinsics on PowerPC --------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header file is to help porting code using Intel intrinsics + explicitly from x86_64 to powerpc64/powerpc64le. + + Since X86 SSE intrinsics mainly handles __m128 type, PowerPC + VMX/VSX ISA is a good match for vector float SIMD operations. + However scalar float operations in vector (XMM) registers require + the POWER8 VSX ISA (2.07) level. There are differences for data + format and placement of float scalars in the vector register, which + require extra steps to match SSE scalar float semantics on POWER. + + It should be noted that there's much difference between X86_64's + MXSCR and PowerISA's FPSCR/VSCR registers. It's recommended to use + portable instead of access MXSCR directly. + + Most SSE scalar float intrinsic operations can be performed more + efficiently as C language float scalar operations or optimized to + use vector SIMD operations. We recommend this for new applications. */ +#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error." +#endif + +#ifndef _XMMINTRIN_H_INCLUDED +#define _XMMINTRIN_H_INCLUDED + +#if defined(__linux__) && defined(__ppc64__) + +/* Define four value permute mask */ +#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z)) + +#include + +/* Avoid collisions between altivec.h and strict adherence to C++ and + C11 standards. This should eventually be done inside altivec.h itself, + but only after testing a full distro build. */ +#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && \ + __STDC_VERSION__ >= 201112L)) +#undef vector +#undef pixel +#undef bool +#endif + +/* We need type definitions from the MMX header file. */ +#include + +/* Get _mm_malloc () and _mm_free (). */ +#if __STDC_HOSTED__ +#include +#endif + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef vector float __m128 __attribute__((__may_alias__)); + +/* Unaligned version of the same type. */ +typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1))); + +/* Internal data types for implementing the intrinsics. */ +typedef vector float __v4sf; + +/* Create an undefined vector. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_ps (void) +{ + __m128 __Y = __Y; + return __Y; +} + +/* Create a vector of zeros. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_ps (void) +{ + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/* Load four SPFP values from P. The address must be 16-byte aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ps (float const *__P) +{ + return ((__m128)vec_ld(0, (__v4sf*)__P)); +} + +/* Load four SPFP values from P. The address need not be 16-byte aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_ps (float const *__P) +{ + return (vec_vsx_ld(0, __P)); +} + +/* Load four SPFP values in reverse order. The address must be aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadr_ps (float const *__P) +{ + __v4sf __tmp; + __m128 result; + static const __vector unsigned char permute_vector = + { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16, + 0x17, 0x10, 0x11, 0x12, 0x13 }; + + __tmp = vec_ld (0, (__v4sf *) __P); + result = (__m128) vec_perm (__tmp, __tmp, permute_vector); + return result; +} + +/* Create a vector with all four elements equal to F. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_ps (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ps1 (float __F) +{ + return _mm_set1_ps (__F); +} + +/* Create the vector [Z Y X W]. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) +{ + return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; +} + +/* Create the vector [W X Y Z]. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_ps (float __Z, float __Y, float __X, float __W) +{ + return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; +} + +/* Store four SPFP values. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ps (float *__P, __m128 __A) +{ + vec_st((__v4sf)__A, 0, (__v4sf*)__P); +} + +/* Store four SPFP values. The address need not be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_ps (float *__P, __m128 __A) +{ + *(__m128_u *)__P = __A; +} + +/* Store four SPFP values in reverse order. The address must be aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storer_ps (float *__P, __m128 __A) +{ + __v4sf __tmp; + static const __vector unsigned char permute_vector = + { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16, + 0x17, 0x10, 0x11, 0x12, 0x13 }; + + __tmp = (__m128) vec_perm (__A, __A, permute_vector); + + _mm_store_ps (__P, __tmp); +} + +/* Store the lower SPFP value across four words. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store1_ps (float *__P, __m128 __A) +{ + __v4sf __va = vec_splat((__v4sf)__A, 0); + _mm_store_ps (__P, __va); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ps1 (float *__P, __m128 __A) +{ + _mm_store1_ps (__P, __A); +} + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ss (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f }; +} + +/* Sets the low SPFP value of A from the low value of B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + + return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask)); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ss (float const *__P) +{ + return _mm_set_ss (*__P); +} + +/* Stores the lower SPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ss (float *__P, __m128 __A) +{ + *__P = ((__v4sf)__A)[0]; +} + +/* Perform the respective operation on the lower SPFP (single-precision + floating-point) values of A and B; the upper three SPFP values are + passed through from A. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ss (__m128 __A, __m128 __B) +{ +#ifdef _ARCH_PWR7 + __m128 a, b, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + results. So to insure we don't generate spurious exceptions + (from the upper double values) we splat the lower double + before we to the operation. */ + a = vec_splat (__A, 0); + b = vec_splat (__B, 0); + c = a + b; + /* Then we merge the lower float result with the original upper + float elements from __A. */ + return (vec_sel (__A, c, mask)); +#else + __A[0] = __A[0] + __B[0]; + return (__A); +#endif +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ss (__m128 __A, __m128 __B) +{ +#ifdef _ARCH_PWR7 + __m128 a, b, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + results. So to insure we don't generate spurious exceptions + (from the upper double values) we splat the lower double + before we to the operation. */ + a = vec_splat (__A, 0); + b = vec_splat (__B, 0); + c = a - b; + /* Then we merge the lower float result with the original upper + float elements from __A. */ + return (vec_sel (__A, c, mask)); +#else + __A[0] = __A[0] - __B[0]; + return (__A); +#endif +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ss (__m128 __A, __m128 __B) +{ +#ifdef _ARCH_PWR7 + __m128 a, b, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + results. So to insure we don't generate spurious exceptions + (from the upper double values) we splat the lower double + before we to the operation. */ + a = vec_splat (__A, 0); + b = vec_splat (__B, 0); + c = a * b; + /* Then we merge the lower float result with the original upper + float elements from __A. */ + return (vec_sel (__A, c, mask)); +#else + __A[0] = __A[0] * __B[0]; + return (__A); +#endif +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ss (__m128 __A, __m128 __B) +{ +#ifdef _ARCH_PWR7 + __m128 a, b, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + results. So to insure we don't generate spurious exceptions + (from the upper double values) we splat the lower double + before we to the operation. */ + a = vec_splat (__A, 0); + b = vec_splat (__B, 0); + c = a / b; + /* Then we merge the lower float result with the original upper + float elements from __A. */ + return (vec_sel (__A, c, mask)); +#else + __A[0] = __A[0] / __B[0]; + return (__A); +#endif +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ss (__m128 __A) +{ + __m128 a, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + * results. So to insure we don't generate spurious exceptions + * (from the upper double values) we splat the lower double + * before we to the operation. */ + a = vec_splat (__A, 0); + c = vec_sqrt (a); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return (vec_sel (__A, c, mask)); +} + +/* Perform the respective operation on the four SPFP values in A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A + (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A - (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A * (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A / (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ps (__m128 __A) +{ + return (vec_sqrt ((__v4sf)__A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ps (__m128 __A) +{ + return (vec_re ((__v4sf)__A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ps (__m128 __A) +{ + return (vec_rsqrte (__A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ss (__m128 __A) +{ + __m128 a, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + * results. So to insure we don't generate spurious exceptions + * (from the upper double values) we splat the lower double + * before we to the operation. */ + a = vec_splat (__A, 0); + c = _mm_rcp_ps (a); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return (vec_sel (__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ss (__m128 __A) +{ + __m128 a, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower double) + * results. So to insure we don't generate spurious exceptions + * (from the upper double values) we splat the lower double + * before we to the operation. */ + a = vec_splat (__A, 0); + c = vec_rsqrte (a); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return (vec_sel (__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ss (__m128 __A, __m128 __B) +{ + __v4sf a, b, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower float) + * results. So to insure we don't generate spurious exceptions + * (from the upper float values) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf)__A, 0); + b = vec_splat ((__v4sf)__B, 0); + c = vec_min (a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return (vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ss (__m128 __A, __m128 __B) +{ + __v4sf a, b, c; + static const __vector unsigned int mask = {0xffffffff, 0, 0, 0}; + /* PowerISA VSX does not allow partial (for just lower float) + * results. So to insure we don't generate spurious exceptions + * (from the upper float values) we splat the lower float + * before we to the operation. */ + a = vec_splat (__A, 0); + b = vec_splat (__B, 0); + c = vec_max (a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return (vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ps (__m128 __A, __m128 __B) +{ + __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A); + return vec_sel (__B, __A, m); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ps (__m128 __A, __m128 __B) +{ + __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B); + return vec_sel (__B, __A, m); +} + +/* Perform logical bit-wise operations on 128-bit values. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B)); +// return __builtin_ia32_andps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B)); +} + +/* Perform a comparison on the four SPFP values of A and B. For each + element, if the comparison is true, place a mask of all ones in the + result, otherwise a mask of zeros. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_ps (__m128 __A, __m128 __B) +{ + __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B); + return ((__m128)vec_nor (temp, temp)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_ps (__m128 __A, __m128 __B) +{ + return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_ps (__m128 __A, __m128 __B) +{ + __vector unsigned int a, b; + __vector unsigned int c, d; + static const __vector unsigned int float_exp_mask = + { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 }; + + a = (__vector unsigned int) vec_abs ((__v4sf)__A); + b = (__vector unsigned int) vec_abs ((__v4sf)__B); + c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a); + d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b); + return ((__m128 ) vec_and (c, d)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_ps (__m128 __A, __m128 __B) +{ + __vector unsigned int a, b; + __vector unsigned int c, d; + static const __vector unsigned int float_exp_mask = + { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 }; + + a = (__vector unsigned int) vec_abs ((__v4sf)__A); + b = (__vector unsigned int) vec_abs ((__v4sf)__B); + c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask); + d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask); + return ((__m128 ) vec_or (c, d)); +} + +/* Perform a comparison on the lower SPFP values of A and B. If the + comparison is true, place a mask of all ones in the result, otherwise a + mask of zeros. The upper three SPFP values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmpeq(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmplt(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmple(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmpgt(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmpge(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmpeq(a, b); + c = vec_nor (c, c); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmpge(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmpgt(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we to the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmple(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_ss (__m128 __A, __m128 __B) +{ + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + __v4sf a, b, c; + /* PowerISA VMX does not allow partial (for just element 0) + * results. So to insure we don't generate spurious exceptions + * (from the upper elements) we splat the lower float + * before we do the operation. */ + a = vec_splat ((__v4sf) __A, 0); + b = vec_splat ((__v4sf) __B, 0); + c = (__v4sf) vec_cmplt(a, b); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_ss (__m128 __A, __m128 __B) +{ + __vector unsigned int a, b; + __vector unsigned int c, d; + static const __vector unsigned int float_exp_mask = + { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 }; + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + + a = (__vector unsigned int) vec_abs ((__v4sf)__A); + b = (__vector unsigned int) vec_abs ((__v4sf)__B); + c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a); + d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b); + c = vec_and (c, d); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_ss (__m128 __A, __m128 __B) +{ + __vector unsigned int a, b; + __vector unsigned int c, d; + static const __vector unsigned int float_exp_mask = + { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 }; + static const __vector unsigned int mask = + { 0xffffffff, 0, 0, 0 }; + + a = (__vector unsigned int) vec_abs ((__v4sf)__A); + b = (__vector unsigned int) vec_abs ((__v4sf)__B); + c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask); + d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask); + c = vec_or (c, d); + /* Then we merge the lower float result with the original upper + * float elements from __A. */ + return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask)); +} + +/* Compare the lower SPFP values of A and B and return 1 if true + and 0 if false. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_ss (__m128 __A, __m128 __B) +{ + return (__A[0] == __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_ss (__m128 __A, __m128 __B) +{ + return (__A[0] < __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_ss (__m128 __A, __m128 __B) +{ + return (__A[0] <= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_ss (__m128 __A, __m128 __B) +{ + return (__A[0] > __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_ss (__m128 __A, __m128 __B) +{ + return (__A[0] >= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_ss (__m128 __A, __m128 __B) +{ + return (__A[0] != __B[0]); +} + +/* FIXME + * The __mm_ucomi??_ss implementations below are exactly the same as + * __mm_comi??_ss because GCC for PowerPC only generates unordered + * compares (scalar and vector). + * Technically __mm_comieq_ss et al should be using the ordered + * compare and signal for QNaNs. + * The __mm_ucomieq_sd et all should be OK, as is. + */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_ss (__m128 __A, __m128 __B) +{ + return (__A[0] == __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_ss (__m128 __A, __m128 __B) +{ + return (__A[0] < __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_ss (__m128 __A, __m128 __B) +{ + return (__A[0] <= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_ss (__m128 __A, __m128 __B) +{ + return (__A[0] > __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_ss (__m128 __A, __m128 __B) +{ + return (__A[0] >= __B[0]); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_ss (__m128 __A, __m128 __B) +{ + return (__A[0] != __B[0]); +} + +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_f32 (__m128 __A) +{ + return ((__v4sf)__A)[0]; +} + +/* Convert the lower SPFP value to a 32-bit integer according to the current + rounding mode. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si32 (__m128 __A) +{ + __m64 res = 0; +#ifdef _ARCH_PWR8 + double dtmp; + __asm__( +#ifdef __LITTLE_ENDIAN__ + "xxsldwi %x0,%x0,%x0,3;\n" +#endif + "xscvspdp %x2,%x0;\n" + "fctiw %2,%2;\n" + "mfvsrd %1,%x2;\n" + : "+wa" (__A), + "=r" (res), + "=f" (dtmp) + : ); +#else + res = __builtin_rint(__A[0]); +#endif + return (res); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_ss2si (__m128 __A) +{ + return _mm_cvtss_si32 (__A); +} + +/* Convert the lower SPFP value to a 32-bit integer according to the + current rounding mode. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si64 (__m128 __A) +{ + __m64 res = 0; +#ifdef _ARCH_PWR8 + double dtmp; + __asm__( +#ifdef __LITTLE_ENDIAN__ + "xxsldwi %x0,%x0,%x0,3;\n" +#endif + "xscvspdp %x2,%x0;\n" + "fctid %2,%2;\n" + "mfvsrd %1,%x2;\n" + : "+wa" (__A), + "=r" (res), + "=f" (dtmp) + : ); +#else + res = __builtin_llrint(__A[0]); +#endif + return (res); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si64x (__m128 __A) +{ + return _mm_cvtss_si64 ((__v4sf) __A); +} + +/* Constants for use with _mm_prefetch. */ +enum _mm_hint +{ + /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */ + _MM_HINT_ET0 = 7, + _MM_HINT_ET1 = 6, + _MM_HINT_T0 = 3, + _MM_HINT_T1 = 2, + _MM_HINT_T2 = 1, + _MM_HINT_NTA = 0 +}; + +/* Loads one cache line from address P to a location "closer" to the + processor. The selector I specifies the type of prefetch operation. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_prefetch (const void *__P, enum _mm_hint __I) +{ + /* Current PowerPC will ignores the hint parameters. */ + __builtin_prefetch (__P); +} + +/* Convert the two lower SPFP values to 32-bit integers according to the + current rounding mode. Return the integers in packed form. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi32 (__m128 __A) +{ + /* Splat two lower SPFP values to both halves. */ + __v4sf temp, rounded; + __vector unsigned long long result; + + /* Splat two lower SPFP values to both halves. */ + temp = (__v4sf) vec_splat ((__vector long long)__A, 0); + rounded = vec_rint(temp); + result = (__vector unsigned long long) vec_cts (rounded, 0); + + return (__m64) ((__vector long long) result)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_ps2pi (__m128 __A) +{ + return _mm_cvtps_pi32 (__A); +} + +/* Truncate the lower SPFP value to a 32-bit integer. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si32 (__m128 __A) +{ + /* Extract the lower float element. */ + float temp = __A[0]; + /* truncate to 32-bit integer and return. */ + return temp; +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_ss2si (__m128 __A) +{ + return _mm_cvttss_si32 (__A); +} + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si64 (__m128 __A) +{ + /* Extract the lower float element. */ + float temp = __A[0]; + /* truncate to 32-bit integer and return. */ + return temp; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si64x (__m128 __A) +{ + /* Extract the lower float element. */ + float temp = __A[0]; + /* truncate to 32-bit integer and return. */ + return temp; +} + +/* Truncate the two lower SPFP values to 32-bit integers. Return the + integers in packed form. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_pi32 (__m128 __A) +{ + __v4sf temp; + __vector unsigned long long result; + + /* Splat two lower SPFP values to both halves. */ + temp = (__v4sf) vec_splat ((__vector long long)__A, 0); + result = (__vector unsigned long long) vec_cts (temp, 0); + + return (__m64) ((__vector long long) result)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_ps2pi (__m128 __A) +{ + return _mm_cvttps_pi32 (__A); +} + +/* Convert B to a SPFP value and insert it as element zero in A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_ss (__m128 __A, int __B) +{ + float temp = __B; + __A[0] = temp; + + return __A; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_si2ss (__m128 __A, int __B) +{ + return _mm_cvtsi32_ss (__A, __B); +} + +/* Convert B to a SPFP value and insert it as element zero in A. */ +/* Intel intrinsic. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_ss (__m128 __A, long long __B) +{ + float temp = __B; + __A[0] = temp; + + return __A; +} + +/* Microsoft intrinsic. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_ss (__m128 __A, long long __B) +{ + return _mm_cvtsi64_ss (__A, __B); +} + +/* Convert the two 32-bit values in B to SPFP form and insert them + as the two lower elements in A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32_ps (__m128 __A, __m64 __B) +{ + __vector signed int vm1; + __vector float vf1; + + vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B}; + vf1 = (__vector float) vec_ctf (vm1, 0); + + return ((__m128) (__vector unsigned long long) + { ((__vector unsigned long long)vf1) [0], + ((__vector unsigned long long)__A) [1]}); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_pi2ps (__m128 __A, __m64 __B) +{ + return _mm_cvtpi32_ps (__A, __B); +} + +/* Convert the four signed 16-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi16_ps (__m64 __A) +{ + __vector signed short vs8; + __vector signed int vi4; + __vector float vf1; + + vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A }; + vi4 = vec_vupklsh (vs8); + vf1 = (__vector float) vec_ctf (vi4, 0); + + return (__m128) vf1; +} + +/* Convert the four unsigned 16-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpu16_ps (__m64 __A) +{ + const __vector unsigned short zero = + { 0, 0, 0, 0, 0, 0, 0, 0 }; + __vector unsigned short vs8; + __vector unsigned int vi4; + __vector float vf1; + + vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A }; + vi4 = (__vector unsigned int) vec_mergel +#ifdef __LITTLE_ENDIAN__ + (vs8, zero); +#else + (zero, vs8); +#endif + vf1 = (__vector float) vec_ctf (vi4, 0); + + return (__m128) vf1; +} + +/* Convert the low four signed 8-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi8_ps (__m64 __A) +{ + __vector signed char vc16; + __vector signed short vs8; + __vector signed int vi4; + __vector float vf1; + + vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A }; + vs8 = vec_vupkhsb (vc16); + vi4 = vec_vupkhsh (vs8); + vf1 = (__vector float) vec_ctf (vi4, 0); + + return (__m128) vf1; +} + +/* Convert the low four unsigned 8-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + +_mm_cvtpu8_ps (__m64 __A) +{ + const __vector unsigned char zero = + { 0, 0, 0, 0, 0, 0, 0, 0 }; + __vector unsigned char vc16; + __vector unsigned short vs8; + __vector unsigned int vi4; + __vector float vf1; + + vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A }; +#ifdef __LITTLE_ENDIAN__ + vs8 = (__vector unsigned short) vec_mergel (vc16, zero); + vi4 = (__vector unsigned int) vec_mergeh (vs8, + (__vector unsigned short) zero); +#else + vs8 = (__vector unsigned short) vec_mergel (zero, vc16); + vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero, + vs8); +#endif + vf1 = (__vector float) vec_ctf (vi4, 0); + + return (__m128) vf1; +} + +/* Convert the four signed 32-bit values in A and B to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32x2_ps (__m64 __A, __m64 __B) +{ + __vector signed int vi4; + __vector float vf4; + + vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B }; + vf4 = (__vector float) vec_ctf (vi4, 0); + return (__m128) vf4; +} + +/* Convert the four SPFP values in A to four signed 16-bit integers. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi16 (__m128 __A) +{ + __v4sf rounded; + __vector signed int temp; + __vector unsigned long long result; + + rounded = vec_rint(__A); + temp = vec_cts (rounded, 0); + result = (__vector unsigned long long) vec_pack (temp, temp); + + return (__m64) ((__vector long long) result)[0]; +} + +/* Convert the four SPFP values in A to four signed 8-bit integers. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi8 (__m128 __A) +{ + __v4sf rounded; + __vector signed int tmp_i; + static const __vector signed int zero = {0, 0, 0, 0}; + __vector signed short tmp_s; + __vector signed char res_v; + + rounded = vec_rint(__A); + tmp_i = vec_cts (rounded, 0); + tmp_s = vec_pack (tmp_i, zero); + res_v = vec_pack (tmp_s, tmp_s); + return (__m64) ((__vector long long) res_v)[0]; +} + +/* Selects four specific SPFP values from A and B based on MASK. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + +_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask) +{ + unsigned long element_selector_10 = __mask & 0x03; + unsigned long element_selector_32 = (__mask >> 2) & 0x03; + unsigned long element_selector_54 = (__mask >> 4) & 0x03; + unsigned long element_selector_76 = (__mask >> 6) & 0x03; + static const unsigned int permute_selectors[4] = + { +#ifdef __LITTLE_ENDIAN__ + 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C +#else + 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F +#endif + }; + __vector unsigned int t; + + t[0] = permute_selectors[element_selector_10]; + t[1] = permute_selectors[element_selector_32]; + t[2] = permute_selectors[element_selector_54] + 0x10101010; + t[3] = permute_selectors[element_selector_76] + 0x10101010; + return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t); +} + +/* Selects and interleaves the upper two SPFP values from A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_ps (__m128 __A, __m128 __B) +{ + return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B); +} + +/* Selects and interleaves the lower two SPFP values from A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_ps (__m128 __A, __m128 __B) +{ + return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B); +} + +/* Sets the upper two SPFP values with 64-bits of data loaded from P; + the lower two values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadh_pi (__m128 __A, __m64 const *__P) +{ + __vector unsigned long long __a = (__vector unsigned long long)__A; + __vector unsigned long long __p = vec_splats(*__P); + __a [1] = __p [1]; + + return (__m128)__a; +} + +/* Stores the upper two SPFP values of A into P. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeh_pi (__m64 *__P, __m128 __A) +{ + __vector unsigned long long __a = (__vector unsigned long long) __A; + + *__P = __a[1]; +} + +/* Moves the upper two values of B into the lower two values of A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movehl_ps (__m128 __A, __m128 __B) +{ + return (__m128) vec_mergel ((__vector unsigned long long)__B, + (__vector unsigned long long)__A); +} + +/* Moves the lower two values of B into the upper two values of A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movelh_ps (__m128 __A, __m128 __B) +{ + return (__m128) vec_mergeh ((__vector unsigned long long)__A, + (__vector unsigned long long)__B); +} + +/* Sets the lower two SPFP values with 64-bits of data loaded from P; + the upper two values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_pi (__m128 __A, __m64 const *__P) +{ + __vector unsigned long long __a = (__vector unsigned long long)__A; + __vector unsigned long long __p = vec_splats(*__P); + __a [0] = __p [0]; + + return (__m128)__a; +} + +/* Stores the lower two SPFP values of A into P. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_pi (__m64 *__P, __m128 __A) +{ + __vector unsigned long long __a = (__vector unsigned long long) __A; + + *__P = __a[0]; +} + +#ifdef _ARCH_PWR8 +/* Intrinsic functions that require PowerISA 2.07 minimum. */ + +/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_ps (__m128 __A) +{ + __vector unsigned long long result; + static const __vector unsigned int perm_mask = + { +#ifdef __LITTLE_ENDIAN__ + 0x00204060, 0x80808080, 0x80808080, 0x80808080 +#else + 0x80808080, 0x80808080, 0x80808080, 0x00204060 +#endif + }; + + result = ((__vector unsigned long long) + vec_vbpermq ((__vector unsigned char) __A, + (__vector unsigned char) perm_mask)); + +#ifdef __LITTLE_ENDIAN__ + return result[1]; +#else + return result[0]; +#endif +} +#endif /* _ARCH_PWR8 */ + +/* Create a vector with all four elements equal to *P. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load1_ps (float const *__P) +{ + return _mm_set1_ps (*__P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ps1 (float const *__P) +{ + return _mm_load1_ps (__P); +} + +/* Extracts one of the four words of A. The selector N must be immediate. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_pi16 (__m64 const __A, int const __N) +{ + unsigned int shiftr = __N & 3; +#ifdef __BIG_ENDIAN__ + shiftr = 3 - shiftr; +#endif + + return ((__A >> (shiftr * 16)) & 0xffff); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pextrw (__m64 const __A, int const __N) +{ + return _mm_extract_pi16 (__A, __N); +} + +/* Inserts word D into one of four words of A. The selector N must be + immediate. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_pi16 (__m64 const __A, int const __D, int const __N) +{ + const int shiftl = (__N & 3) * 16; + const __m64 shiftD = (const __m64) __D << shiftl; + const __m64 mask = 0xffffUL << shiftl; + __m64 result = (__A & (~mask)) | (shiftD & mask); + + return (result); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pinsrw (__m64 const __A, int const __D, int const __N) +{ + return _mm_insert_pi16 (__A, __D, __N); +} + +/* Compute the element-wise maximum of signed 16-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + +_mm_max_pi16 (__m64 __A, __m64 __B) +{ +#if _ARCH_PWR8 + __vector signed short a, b, r; + __vector __bool short c; + + a = (__vector signed short)vec_splats (__A); + b = (__vector signed short)vec_splats (__B); + c = (__vector __bool short)vec_cmpgt (a, b); + r = vec_sel (b, a, c); + return (__m64) ((__vector long long) r)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __A; + m2.as_m64 = __B; + + res.as_short[0] = + (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0]; + res.as_short[1] = + (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1]; + res.as_short[2] = + (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2]; + res.as_short[3] = + (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3]; + + return (__m64) res.as_m64; +#endif +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaxsw (__m64 __A, __m64 __B) +{ + return _mm_max_pi16 (__A, __B); +} + +/* Compute the element-wise maximum of unsigned 8-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pu8 (__m64 __A, __m64 __B) +{ +#if _ARCH_PWR8 + __vector unsigned char a, b, r; + __vector __bool char c; + + a = (__vector unsigned char)vec_splats (__A); + b = (__vector unsigned char)vec_splats (__B); + c = (__vector __bool char)vec_cmpgt (a, b); + r = vec_sel (b, a, c); + return (__m64) ((__vector long long) r)[0]; +#else + __m64_union m1, m2, res; + long i; + + m1.as_m64 = __A; + m2.as_m64 = __B; + + + for (i = 0; i < 8; i++) + res.as_char[i] = + ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ? + m1.as_char[i] : m2.as_char[i]; + + return (__m64) res.as_m64; +#endif +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaxub (__m64 __A, __m64 __B) +{ + return _mm_max_pu8 (__A, __B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pi16 (__m64 __A, __m64 __B) +{ +#if _ARCH_PWR8 + __vector signed short a, b, r; + __vector __bool short c; + + a = (__vector signed short)vec_splats (__A); + b = (__vector signed short)vec_splats (__B); + c = (__vector __bool short)vec_cmplt (a, b); + r = vec_sel (b, a, c); + return (__m64) ((__vector long long) r)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __A; + m2.as_m64 = __B; + + res.as_short[0] = + (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0]; + res.as_short[1] = + (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1]; + res.as_short[2] = + (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2]; + res.as_short[3] = + (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3]; + + return (__m64) res.as_m64; +#endif +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pminsw (__m64 __A, __m64 __B) +{ + return _mm_min_pi16 (__A, __B); +} + +/* Compute the element-wise minimum of unsigned 8-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pu8 (__m64 __A, __m64 __B) +{ +#if _ARCH_PWR8 + __vector unsigned char a, b, r; + __vector __bool char c; + + a = (__vector unsigned char)vec_splats (__A); + b = (__vector unsigned char)vec_splats (__B); + c = (__vector __bool char)vec_cmplt (a, b); + r = vec_sel (b, a, c); + return (__m64) ((__vector long long) r)[0]; +#else + __m64_union m1, m2, res; + long i; + + m1.as_m64 = __A; + m2.as_m64 = __B; + + + for (i = 0; i < 8; i++) + res.as_char[i] = + ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ? + m1.as_char[i] : m2.as_char[i]; + + return (__m64) res.as_m64; +#endif +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pminub (__m64 __A, __m64 __B) +{ + return _mm_min_pu8 (__A, __B); +} + +/* Create an 8-bit mask of the signs of 8-bit values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pi8 (__m64 __A) +{ + unsigned long long p = +#ifdef __LITTLE_ENDIAN__ + 0x0008101820283038UL; // permute control for sign bits +#else + 0x3830282018100800UL; // permute control for sign bits +#endif + return __builtin_bpermd (p, __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmovmskb (__m64 __A) +{ + return _mm_movemask_pi8 (__A); +} + +/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values + in B and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_pu16 (__m64 __A, __m64 __B) +{ + __vector unsigned short a, b; + __vector unsigned short c; + __vector unsigned int w0, w1; + __vector unsigned char xform1 = { +#ifdef __LITTLE_ENDIAN__ + 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, + 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F +#else + 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, + 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15 +#endif + }; + + a = (__vector unsigned short)vec_splats (__A); + b = (__vector unsigned short)vec_splats (__B); + + w0 = vec_vmuleuh (a, b); + w1 = vec_vmulouh (a, b); + c = (__vector unsigned short)vec_perm (w0, w1, xform1); + + return (__m64) ((__vector long long) c)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhuw (__m64 __A, __m64 __B) +{ + return _mm_mulhi_pu16 (__A, __B); +} + +/* Return a combination of the four 16-bit values in A. The selector + must be an immediate. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pi16 (__m64 __A, int const __N) +{ + unsigned long element_selector_10 = __N & 0x03; + unsigned long element_selector_32 = (__N >> 2) & 0x03; + unsigned long element_selector_54 = (__N >> 4) & 0x03; + unsigned long element_selector_76 = (__N >> 6) & 0x03; + static const unsigned short permute_selectors[4] = + { +#ifdef __LITTLE_ENDIAN__ + 0x0908, 0x0B0A, 0x0D0C, 0x0F0E +#else + 0x0607, 0x0405, 0x0203, 0x0001 +#endif + }; + __m64_union t; + __vector unsigned long long a, p, r; + +#ifdef __LITTLE_ENDIAN__ + t.as_short[0] = permute_selectors[element_selector_10]; + t.as_short[1] = permute_selectors[element_selector_32]; + t.as_short[2] = permute_selectors[element_selector_54]; + t.as_short[3] = permute_selectors[element_selector_76]; +#else + t.as_short[3] = permute_selectors[element_selector_10]; + t.as_short[2] = permute_selectors[element_selector_32]; + t.as_short[1] = permute_selectors[element_selector_54]; + t.as_short[0] = permute_selectors[element_selector_76]; +#endif + p = vec_splats (t.as_m64); + a = vec_splats (__A); + r = vec_perm (a, a, (__vector unsigned char)p); + return (__m64) ((__vector long long) r)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pshufw (__m64 __A, int const __N) +{ + return _mm_shuffle_pi16 (__A, __N); +} + +/* Conditionally store byte elements of A into P. The high bit of each + byte in the selector N determines whether the corresponding byte from + A is stored. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) +{ + __m64 hibit = 0x8080808080808080UL; + __m64 mask, tmp; + __m64 *p = (__m64*)__P; + + tmp = *p; + mask = _mm_cmpeq_pi8 ((__N & hibit), hibit); + tmp = (tmp & (~mask)) | (__A & mask); + *p = tmp; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_maskmovq (__m64 __A, __m64 __N, char *__P) +{ + _mm_maskmove_si64 (__A, __N, __P); +} + +/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_pu8 (__m64 __A, __m64 __B) +{ + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats (__A); + b = (__vector unsigned char)vec_splats (__B); + c = vec_avg (a, b); + return (__m64) ((__vector long long) c)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgb (__m64 __A, __m64 __B) +{ + return _mm_avg_pu8 (__A, __B); +} + +/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_pu16 (__m64 __A, __m64 __B) +{ + __vector unsigned short a, b, c; + + a = (__vector unsigned short)vec_splats (__A); + b = (__vector unsigned short)vec_splats (__B); + c = vec_avg (a, b); + return (__m64) ((__vector long long) c)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgw (__m64 __A, __m64 __B) +{ + return _mm_avg_pu16 (__A, __B); +} + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sad_pu8 (__m64 __A, __m64 __B) +{ + __vector unsigned char a, b; + __vector unsigned char vmin, vmax, vabsdiff; + __vector signed int vsum; + const __vector unsigned int zero = + { 0, 0, 0, 0 }; + __m64_union result = {0}; + + a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A }; + b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B }; + vmin = vec_min (a, b); + vmax = vec_max (a, b); + vabsdiff = vec_sub (vmax, vmin); + /* Sum four groups of bytes into integers. */ + vsum = (__vector signed int) vec_sum4s (vabsdiff, zero); + /* Sum across four integers with integer result. */ + vsum = vec_sums (vsum, (__vector signed int) zero); + /* The sum is in the right most 32-bits of the vector result. + Transfer to a GPR and truncate to 16 bits. */ + result.as_short[0] = vsum[3]; + return result.as_m64; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psadbw (__m64 __A, __m64 __B) +{ + return _mm_sad_pu8 (__A, __B); +} + +/* Stores the data in A to the address P without polluting the caches. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_pi (__m64 *__P, __m64 __A) +{ + /* Use the data cache block touch for store transient. */ + __asm__ ( + " dcbtstt 0,%0" + : + : "b" (__P) + : "memory" + ); + *__P = __A; +} + +/* Likewise. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_ps (float *__P, __m128 __A) +{ + /* Use the data cache block touch for store transient. */ + __asm__ ( + " dcbtstt 0,%0" + : + : "b" (__P) + : "memory" + ); + _mm_store_ps (__P, __A); +} + +/* Guarantees that every preceding store is globally visible before + any subsequent store. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sfence (void) +{ + /* Generate a light weight sync. */ + __atomic_thread_fence (__ATOMIC_RELEASE); +} + +/* The execution of the next instruction is delayed by an implementation + specific amount of time. The instruction does not modify the + architectural state. This is after the pop_options pragma because + it does not require SSE support in the processor--the encoding is a + nop on processors that do not support it. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_pause (void) +{ + /* There is no exact match with this construct, but the following is + close to the desired effect. */ +#if _ARCH_PWR8 + /* On power8 and later processors we can depend on Program Priority + (PRI) and associated "very low" PPI setting. Since we don't know + what PPI this thread is running at we: 1) save the current PRI + from the PPR SPR into a local GRP, 2) set the PRI to "very low* + via the special or 31,31,31 encoding. 3) issue an "isync" to + insure the PRI change takes effect before we execute any more + instructions. + Now we can execute a lwsync (release barrier) while we execute + this thread at "very low" PRI. Finally we restore the original + PRI and continue execution. */ + unsigned long __PPR; + + __asm__ volatile ( + " mfppr %0;" + " or 31,31,31;" + " isync;" + " lwsync;" + " isync;" + " mtppr %0;" + : "=r" (__PPR) + : + : "memory" + ); +#else + /* For older processor where we may not even have Program Priority + controls we can only depend on Heavy Weight Sync. */ + __atomic_thread_fence (__ATOMIC_SEQ_CST); +#endif +} + +/* Transpose the 4x4 matrix composed of row[0-3]. */ +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ + __v4sf __t0 = vec_vmrghw (__r0, __r1); \ + __v4sf __t1 = vec_vmrghw (__r2, __r3); \ + __v4sf __t2 = vec_vmrglw (__r0, __r1); \ + __v4sf __t3 = vec_vmrglw (__r2, __r3); \ + (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, \ + (__vector long long)__t1); \ + (row1) = (__v4sf)vec_mergel ((__vector long long)__t0, \ + (__vector long long)__t1); \ + (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2, \ + (__vector long long)__t3); \ + (row3) = (__v4sf)vec_mergel ((__vector long long)__t2, \ + (__vector long long)__t3); \ +} while (0) + +/* For backward source compatibility. */ +//# include + +#else +#include_next +#endif /* defined(__linux__) && defined(__ppc64__) */ + +#endif /* _XMMINTRIN_H_INCLUDED */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/prfchwintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/prfchwintrin.h new file mode 100644 index 0000000..d2f91aa --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/prfchwintrin.h @@ -0,0 +1,58 @@ +/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED) +#error "Never use directly; include or instead." +#endif + +#ifndef __PRFCHWINTRIN_H +#define __PRFCHWINTRIN_H + +/// Loads a memory sequence containing the specified memory address into +/// all data cache levels. The cache-coherency state is set to exclusive. +/// Data can be read from and written to the cache line without additional +/// delay. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHT0 instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_m_prefetch(void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +/// Loads a memory sequence containing the specified memory address into +/// the L1 data cache and sets the cache-coherency to modified. This +/// provides a hint to the processor that the cache line will be modified. +/// It is intended for use when the cache line will be written to shortly +/// after the prefetch is performed. +/// +/// Note that the effect of this intrinsic is dependent on the processor +/// implementation. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHW instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_m_prefetchw(volatile const void *__P) +{ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" + __builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */); +#pragma clang diagnostic pop +} + +#endif /* __PRFCHWINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/profile/InstrProfData.inc b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/profile/InstrProfData.inc new file mode 100644 index 0000000..cb2d6a6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/profile/InstrProfData.inc @@ -0,0 +1,896 @@ +/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\ +|* +|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +|* See https://llvm.org/LICENSE.txt for license information. +|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +|* +\*===----------------------------------------------------------------------===*/ +/* + * This is the main file that defines all the data structure, signature, + * constant literals that are shared across profiling runtime library, + * compiler (instrumentation), and host tools (reader/writer). The entities + * defined in this file affect the profile runtime ABI, the raw profile format, + * or both. + * + * The file has two identical copies. The primary copy lives in LLVM and + * the other one sits in compiler-rt/lib/profile directory. To make changes + * in this file, first modify the primary copy and copy it over to compiler-rt. + * Testing of any change in this file can start only after the two copies are + * synced up. + * + * The first part of the file includes macros that defines types, names, and + * initializers for the member fields of the core data structures. The field + * declarations for one structure is enabled by defining the field activation + * macro associated with that structure. Only one field activation record + * can be defined at one time and the rest definitions will be filtered out by + * the preprocessor. + * + * Examples of how the template is used to instantiate structure definition: + * 1. To declare a structure: + * + * struct ProfData { + * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \ + * Type Name; + * #include "llvm/ProfileData/InstrProfData.inc" + * }; + * + * 2. To construct LLVM type arrays for the struct type: + * + * Type *DataTypes[] = { + * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \ + * LLVMType, + * #include "llvm/ProfileData/InstrProfData.inc" + * }; + * + * 4. To construct constant array for the initializers: + * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \ + * Initializer, + * Constant *ConstantVals[] = { + * #include "llvm/ProfileData/InstrProfData.inc" + * }; + * + * + * The second part of the file includes definitions all other entities that + * are related to runtime ABI and format. When no field activation macro is + * defined, this file can be included to introduce the definitions. + * +\*===----------------------------------------------------------------------===*/ + +/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in + * the compiler runtime. */ +#ifndef INSTR_PROF_VISIBILITY +#define INSTR_PROF_VISIBILITY +#endif + +/* INSTR_PROF_DATA start. */ +/* Definition of member fields of the per-function control structure. */ +#ifndef INSTR_PROF_DATA +#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) +#else +#define INSTR_PROF_DATA_DEFINED +#endif +INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \ + ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \ + IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName())))) +INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \ + ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \ + Inc->getHash()->getZExtValue())) +INSTR_PROF_DATA(const IntPtrT, IntPtrTy, CounterPtr, RelativeCounterPtr) +/* This is used to map function pointers for the indirect call targets to + * function name hashes during the conversion from raw to merged profile + * data. + */ +INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \ + FunctionAddr) +INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \ + ValuesPtrExpr) +INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \ + ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters)) +INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \ + ConstantArray::get(Int16ArrayTy, Int16ArrayVals)) +#undef INSTR_PROF_DATA +/* INSTR_PROF_DATA end. */ + + +/* This is an internal data structure used by value profiler. It + * is defined here to allow serialization code sharing by LLVM + * to be used in unit test. + * + * typedef struct ValueProfNode { + * // InstrProfValueData VData; + * uint64_t Value; + * uint64_t Count; + * struct ValueProfNode *Next; + * } ValueProfNode; + */ +/* INSTR_PROF_VALUE_NODE start. */ +#ifndef INSTR_PROF_VALUE_NODE +#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer) +#else +#define INSTR_PROF_DATA_DEFINED +#endif +INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \ + ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0)) +INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \ + ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0)) +INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \ + ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0)) +#undef INSTR_PROF_VALUE_NODE +/* INSTR_PROF_VALUE_NODE end. */ + +/* INSTR_PROF_RAW_HEADER start */ +/* Definition of member fields of the raw profile header data structure. */ +#ifndef INSTR_PROF_RAW_HEADER +#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer) +#else +#define INSTR_PROF_DATA_DEFINED +#endif +INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic()) +INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version()) +INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL)) +INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize) +INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters) +INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize) +INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters) +INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize) +INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, + (uintptr_t)CountersBegin - (uintptr_t)DataBegin) +INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin) +INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last) +#undef INSTR_PROF_RAW_HEADER +/* INSTR_PROF_RAW_HEADER end */ + +/* VALUE_PROF_FUNC_PARAM start */ +/* Definition of parameter types of the runtime API used to do value profiling + * for a given value site. + */ +#ifndef VALUE_PROF_FUNC_PARAM +#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType) +#define INSTR_PROF_COMMA +#else +#define INSTR_PROF_DATA_DEFINED +#define INSTR_PROF_COMMA , +#endif +VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \ + INSTR_PROF_COMMA +VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA +VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) +#undef VALUE_PROF_FUNC_PARAM +#undef INSTR_PROF_COMMA +/* VALUE_PROF_FUNC_PARAM end */ + +/* VALUE_PROF_KIND start */ +#ifndef VALUE_PROF_KIND +#define VALUE_PROF_KIND(Enumerator, Value, Descr) +#else +#define INSTR_PROF_DATA_DEFINED +#endif +/* For indirect function call value profiling, the addresses of the target + * functions are profiled by the instrumented code. The target addresses are + * written in the raw profile data and converted to target function name's MD5 + * hash by the profile reader during deserialization. Typically, this happens + * when the raw profile data is read during profile merging. + * + * For this remapping the ProfData is used. ProfData contains both the function + * name hash and the function address. + */ +VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target") +/* For memory intrinsic functions size profiling. */ +VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size") +/* These two kinds must be the last to be + * declared. This is to make sure the string + * array created with the template can be + * indexed with the kind value. + */ +VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first") +VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last") + +#undef VALUE_PROF_KIND +/* VALUE_PROF_KIND end */ + +#undef COVMAP_V2_OR_V3 +#ifdef COVMAP_V2 +#define COVMAP_V2_OR_V3 +#endif +#ifdef COVMAP_V3 +#define COVMAP_V2_OR_V3 +#endif + +/* COVMAP_FUNC_RECORD start */ +/* Definition of member fields of the function record structure in coverage + * map. + */ +#ifndef COVMAP_FUNC_RECORD +#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer) +#else +#define INSTR_PROF_DATA_DEFINED +#endif +#ifdef COVMAP_V1 +COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \ + NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \ + llvm::Type::getInt8PtrTy(Ctx))) +COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \ + llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \ + NameValue.size())) +#endif +#ifdef COVMAP_V2_OR_V3 +COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \ + llvm::ConstantInt::get( \ + llvm::Type::getInt64Ty(Ctx), NameHash)) +#endif +COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \ + llvm::ConstantInt::get( \ + llvm::Type::getInt32Ty(Ctx), CoverageMapping.size())) +COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \ + llvm::ConstantInt::get( \ + llvm::Type::getInt64Ty(Ctx), FuncHash)) +#ifdef COVMAP_V3 +COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \ + llvm::ConstantInt::get( \ + llvm::Type::getInt64Ty(Ctx), FilenamesRef)) +COVMAP_FUNC_RECORD(const char, \ + llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \ + CoverageMapping.size()), \ + CoverageMapping, + llvm::ConstantDataArray::getRaw( \ + CoverageMapping, CoverageMapping.size(), \ + llvm::Type::getInt8Ty(Ctx))) +#endif +#undef COVMAP_FUNC_RECORD +/* COVMAP_FUNC_RECORD end. */ + +/* COVMAP_HEADER start */ +/* Definition of member fields of coverage map header. + */ +#ifndef COVMAP_HEADER +#define COVMAP_HEADER(Type, LLVMType, Name, Initializer) +#else +#define INSTR_PROF_DATA_DEFINED +#endif +COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \ + llvm::ConstantInt::get(Int32Ty, NRecords)) +COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \ + llvm::ConstantInt::get(Int32Ty, FilenamesSize)) +COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \ + llvm::ConstantInt::get(Int32Ty, CoverageMappingSize)) +COVMAP_HEADER(uint32_t, Int32Ty, Version, \ + llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion)) +#undef COVMAP_HEADER +/* COVMAP_HEADER end. */ + + +#ifdef INSTR_PROF_SECT_ENTRY +#define INSTR_PROF_DATA_DEFINED +INSTR_PROF_SECT_ENTRY(IPSK_data, \ + INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \ + INSTR_PROF_DATA_COFF, "__DATA,") +INSTR_PROF_SECT_ENTRY(IPSK_cnts, \ + INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \ + INSTR_PROF_CNTS_COFF, "__DATA,") +INSTR_PROF_SECT_ENTRY(IPSK_name, \ + INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \ + INSTR_PROF_NAME_COFF, "__DATA,") +INSTR_PROF_SECT_ENTRY(IPSK_vals, \ + INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \ + INSTR_PROF_VALS_COFF, "__DATA,") +INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \ + INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \ + INSTR_PROF_VNODES_COFF, "__DATA,") +INSTR_PROF_SECT_ENTRY(IPSK_covmap, \ + INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \ + INSTR_PROF_COVMAP_COFF, "__LLVM_COV,") +INSTR_PROF_SECT_ENTRY(IPSK_covfun, \ + INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \ + INSTR_PROF_COVFUN_COFF, "__LLVM_COV,") +INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \ + INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \ + INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,") + +#undef INSTR_PROF_SECT_ENTRY +#endif + + +#ifdef INSTR_PROF_VALUE_PROF_DATA +#define INSTR_PROF_DATA_DEFINED + +#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255 +/*! + * This is the header of the data structure that defines the on-disk + * layout of the value profile data of a particular kind for one function. + */ +typedef struct ValueProfRecord { + /* The kind of the value profile record. */ + uint32_t Kind; + /* + * The number of value profile sites. It is guaranteed to be non-zero; + * otherwise the record for this kind won't be emitted. + */ + uint32_t NumValueSites; + /* + * The first element of the array that stores the number of profiled + * values for each value site. The size of the array is NumValueSites. + * Since NumValueSites is greater than zero, there is at least one + * element in the array. + */ + uint8_t SiteCountArray[1]; + + /* + * The fake declaration is for documentation purpose only. + * Align the start of next field to be on 8 byte boundaries. + uint8_t Padding[X]; + */ + + /* The array of value profile data. The size of the array is the sum + * of all elements in SiteCountArray[]. + InstrProfValueData ValueData[]; + */ + +#ifdef __cplusplus + /*! + * Return the number of value sites. + */ + uint32_t getNumValueSites() const { return NumValueSites; } + /*! + * Read data from this record and save it to Record. + */ + void deserializeTo(InstrProfRecord &Record, + InstrProfSymtab *SymTab); + /* + * In-place byte swap: + * Do byte swap for this instance. \c Old is the original order before + * the swap, and \c New is the New byte order. + */ + void swapBytes(support::endianness Old, support::endianness New); +#endif +} ValueProfRecord; + +/*! + * Per-function header/control data structure for value profiling + * data in indexed format. + */ +typedef struct ValueProfData { + /* + * Total size in bytes including this field. It must be a multiple + * of sizeof(uint64_t). + */ + uint32_t TotalSize; + /* + *The number of value profile kinds that has value profile data. + * In this implementation, a value profile kind is considered to + * have profile data if the number of value profile sites for the + * kind is not zero. More aggressively, the implementation can + * choose to check the actual data value: if none of the value sites + * has any profiled values, the kind can be skipped. + */ + uint32_t NumValueKinds; + + /* + * Following are a sequence of variable length records. The prefix/header + * of each record is defined by ValueProfRecord type. The number of + * records is NumValueKinds. + * ValueProfRecord Record_1; + * ValueProfRecord Record_N; + */ + +#if __cplusplus + /*! + * Return the total size in bytes of the on-disk value profile data + * given the data stored in Record. + */ + static uint32_t getSize(const InstrProfRecord &Record); + /*! + * Return a pointer to \c ValueProfData instance ready to be streamed. + */ + static std::unique_ptr + serializeFrom(const InstrProfRecord &Record); + /*! + * Check the integrity of the record. + */ + Error checkIntegrity(); + /*! + * Return a pointer to \c ValueProfileData instance ready to be read. + * All data in the instance are properly byte swapped. The input + * data is assumed to be in little endian order. + */ + static Expected> + getValueProfData(const unsigned char *SrcBuffer, + const unsigned char *const SrcBufferEnd, + support::endianness SrcDataEndianness); + /*! + * Swap byte order from \c Endianness order to host byte order. + */ + void swapBytesToHost(support::endianness Endianness); + /*! + * Swap byte order from host byte order to \c Endianness order. + */ + void swapBytesFromHost(support::endianness Endianness); + /*! + * Return the total size of \c ValueProfileData. + */ + uint32_t getSize() const { return TotalSize; } + /*! + * Read data from this data and save it to \c Record. + */ + void deserializeTo(InstrProfRecord &Record, + InstrProfSymtab *SymTab); + void operator delete(void *ptr) { ::operator delete(ptr); } +#endif +} ValueProfData; + +/* + * The closure is designed to abstact away two types of value profile data: + * - InstrProfRecord which is the primary data structure used to + * represent profile data in host tools (reader, writer, and profile-use) + * - value profile runtime data structure suitable to be used by C + * runtime library. + * + * Both sources of data need to serialize to disk/memory-buffer in common + * format: ValueProfData. The abstraction allows compiler-rt's raw profiler + * writer to share the same format and code with indexed profile writer. + * + * For documentation of the member methods below, refer to corresponding methods + * in class InstrProfRecord. + */ +typedef struct ValueProfRecordClosure { + const void *Record; + uint32_t (*GetNumValueKinds)(const void *Record); + uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind); + uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind); + uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S); + + /* + * After extracting the value profile data from the value profile record, + * this method is used to map the in-memory value to on-disk value. If + * the method is null, value will be written out untranslated. + */ + uint64_t (*RemapValueData)(uint32_t, uint64_t Value); + void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K, + uint32_t S); + ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes); +} ValueProfRecordClosure; + +INSTR_PROF_VISIBILITY ValueProfRecord * +getFirstValueProfRecord(ValueProfData *VPD); +INSTR_PROF_VISIBILITY ValueProfRecord * +getValueProfRecordNext(ValueProfRecord *VPR); +INSTR_PROF_VISIBILITY InstrProfValueData * +getValueProfRecordValueData(ValueProfRecord *VPR); +INSTR_PROF_VISIBILITY uint32_t +getValueProfRecordHeaderSize(uint32_t NumValueSites); + +#undef INSTR_PROF_VALUE_PROF_DATA +#endif /* INSTR_PROF_VALUE_PROF_DATA */ + + +#ifdef INSTR_PROF_COMMON_API_IMPL +#define INSTR_PROF_DATA_DEFINED +#ifdef __cplusplus +#define INSTR_PROF_INLINE inline +#define INSTR_PROF_NULLPTR nullptr +#else +#define INSTR_PROF_INLINE +#define INSTR_PROF_NULLPTR NULL +#endif + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +/*! + * Return the \c ValueProfRecord header size including the + * padding bytes. + */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) { + uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) + + sizeof(uint8_t) * NumValueSites; + /* Round the size to multiple of 8 bytes. */ + Size = (Size + 7) & ~7; + return Size; +} + +/*! + * Return the total size of the value profile record including the + * header and the value data. + */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +uint32_t getValueProfRecordSize(uint32_t NumValueSites, + uint32_t NumValueData) { + return getValueProfRecordHeaderSize(NumValueSites) + + sizeof(InstrProfValueData) * NumValueData; +} + +/*! + * Return the pointer to the start of value data array. + */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) { + return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize( + This->NumValueSites)); +} + +/*! + * Return the total number of value data for \c This record. + */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) { + uint32_t NumValueData = 0; + uint32_t I; + for (I = 0; I < This->NumValueSites; I++) + NumValueData += This->SiteCountArray[I]; + return NumValueData; +} + +/*! + * Use this method to advance to the next \c This \c ValueProfRecord. + */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) { + uint32_t NumValueData = getValueProfRecordNumValueData(This); + return (ValueProfRecord *)((char *)This + + getValueProfRecordSize(This->NumValueSites, + NumValueData)); +} + +/*! + * Return the first \c ValueProfRecord instance. + */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) { + return (ValueProfRecord *)((char *)This + sizeof(ValueProfData)); +} + +/* Closure based interfaces. */ + +/*! + * Return the total size in bytes of the on-disk value profile data + * given the data stored in Record. + */ +INSTR_PROF_VISIBILITY uint32_t +getValueProfDataSize(ValueProfRecordClosure *Closure) { + uint32_t Kind; + uint32_t TotalSize = sizeof(ValueProfData); + const void *Record = Closure->Record; + + for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) { + uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind); + if (!NumValueSites) + continue; + TotalSize += getValueProfRecordSize(NumValueSites, + Closure->GetNumValueData(Record, Kind)); + } + return TotalSize; +} + +/*! + * Extract value profile data of a function for the profile kind \c ValueKind + * from the \c Closure and serialize the data into \c This record instance. + */ +INSTR_PROF_VISIBILITY void +serializeValueProfRecordFrom(ValueProfRecord *This, + ValueProfRecordClosure *Closure, + uint32_t ValueKind, uint32_t NumValueSites) { + uint32_t S; + const void *Record = Closure->Record; + This->Kind = ValueKind; + This->NumValueSites = NumValueSites; + InstrProfValueData *DstVD = getValueProfRecordValueData(This); + + for (S = 0; S < NumValueSites; S++) { + uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S); + This->SiteCountArray[S] = ND; + Closure->GetValueForSite(Record, DstVD, ValueKind, S); + DstVD += ND; + } +} + +/*! + * Extract value profile data of a function from the \c Closure + * and serialize the data into \c DstData if it is not NULL or heap + * memory allocated by the \c Closure's allocator method. If \c + * DstData is not null, the caller is expected to set the TotalSize + * in DstData. + */ +INSTR_PROF_VISIBILITY ValueProfData * +serializeValueProfDataFrom(ValueProfRecordClosure *Closure, + ValueProfData *DstData) { + uint32_t Kind; + uint32_t TotalSize = + DstData ? DstData->TotalSize : getValueProfDataSize(Closure); + + ValueProfData *VPD = + DstData ? DstData : Closure->AllocValueProfData(TotalSize); + + VPD->TotalSize = TotalSize; + VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record); + ValueProfRecord *VR = getFirstValueProfRecord(VPD); + for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) { + uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind); + if (!NumValueSites) + continue; + serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites); + VR = getValueProfRecordNext(VR); + } + return VPD; +} + +#undef INSTR_PROF_COMMON_API_IMPL +#endif /* INSTR_PROF_COMMON_API_IMPL */ + +/*============================================================================*/ + +#ifndef INSTR_PROF_DATA_DEFINED + +#ifndef INSTR_PROF_DATA_INC +#define INSTR_PROF_DATA_INC + +/* Helper macros. */ +#define INSTR_PROF_SIMPLE_QUOTE(x) #x +#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x) +#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y +#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y) + +/* Magic number to detect file format and endianness. + * Use 255 at one end, since no UTF-8 file can use that character. Avoid 0, + * so that utilities, like strings, don't grab it as a string. 129 is also + * invalid UTF-8, and high enough to be interesting. + * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR" + * for 32-bit platforms. + */ +#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \ + (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \ + (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129 +#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \ + (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \ + (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129 + +/* Raw profile format version (start from 1). */ +#define INSTR_PROF_RAW_VERSION 7 +/* Indexed profile format version (start from 1). */ +#define INSTR_PROF_INDEX_VERSION 7 +/* Coverage mapping format version (start from 0). */ +#define INSTR_PROF_COVMAP_VERSION 5 + +/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the + * version for other variants of profile. We set the lowest bit of the upper 8 + * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton + * generated profile, and 0 if this is a Clang FE generated profile. + * 1 in bit 57 indicates there are context-sensitive records in the profile. + */ +#define VARIANT_MASKS_ALL 0xff00000000000000ULL +#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL) +#define VARIANT_MASK_IR_PROF (0x1ULL << 56) +#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57) +#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58) +#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version +#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime +#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias + +/* The variable that holds the name of the profile data + * specified via command line. */ +#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename + +/* section name strings common to all targets other + than WIN32 */ +#define INSTR_PROF_DATA_COMMON __llvm_prf_data +#define INSTR_PROF_NAME_COMMON __llvm_prf_names +#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts +#define INSTR_PROF_VALS_COMMON __llvm_prf_vals +#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds +#define INSTR_PROF_COVMAP_COMMON __llvm_covmap +#define INSTR_PROF_COVFUN_COMMON __llvm_covfun +#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile +/* Windows section names. Because these section names contain dollar characters, + * they must be quoted. + */ +#define INSTR_PROF_DATA_COFF ".lprfd$M" +#define INSTR_PROF_NAME_COFF ".lprfn$M" +#define INSTR_PROF_CNTS_COFF ".lprfc$M" +#define INSTR_PROF_VALS_COFF ".lprfv$M" +#define INSTR_PROF_VNODES_COFF ".lprfnd$M" +#define INSTR_PROF_COVMAP_COFF ".lcovmap$M" +#define INSTR_PROF_COVFUN_COFF ".lcovfun$M" +#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M" + +#ifdef _WIN32 +/* Runtime section names and name strings. */ +#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF +#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF +#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF +/* Array of pointers. Each pointer points to a list + * of value nodes associated with one value site. + */ +#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF +/* Value profile nodes section. */ +#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF +#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF +#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF +#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF +#else +/* Runtime section names and name strings. */ +#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON) +#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON) +#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON) +/* Array of pointers. Each pointer points to a list + * of value nodes associated with one value site. + */ +#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON) +/* Value profile nodes section. */ +#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON) +#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON) +#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON) +/* Order file instrumentation. */ +#define INSTR_PROF_ORDERFILE_SECT_NAME \ + INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON) +#endif + +#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer +#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR \ + INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME) +#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx +#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR \ + INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME) + +/* Macros to define start/stop section symbol for a given + * section on Linux. For instance + * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will + * expand to __start___llvm_prof_data + */ +#define INSTR_PROF_SECT_START(Sect) \ + INSTR_PROF_CONCAT(__start_,Sect) +#define INSTR_PROF_SECT_STOP(Sect) \ + INSTR_PROF_CONCAT(__stop_,Sect) + +/* Value Profiling API linkage name. */ +#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target +#define INSTR_PROF_VALUE_PROF_FUNC_STR \ + INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC) +#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC __llvm_profile_instrument_memop +#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR \ + INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_MEMOP_FUNC) + +/* InstrProfile per-function control data alignment. */ +#define INSTR_PROF_DATA_ALIGNMENT 8 + +/* The data structure that represents a tracked value by the + * value profiler. + */ +typedef struct InstrProfValueData { + /* Profiled value. */ + uint64_t Value; + /* Number of times the value appears in the training run. */ + uint64_t Count; +} InstrProfValueData; + +#endif /* INSTR_PROF_DATA_INC */ + +#ifndef INSTR_ORDER_FILE_INC +/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */ +#define INSTR_ORDER_FILE_BUFFER_SIZE 131072 +#define INSTR_ORDER_FILE_BUFFER_BITS 17 +#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff +#endif /* INSTR_ORDER_FILE_INC */ +#else +#undef INSTR_PROF_DATA_DEFINED +#endif + +#undef COVMAP_V2_OR_V3 + +#ifdef INSTR_PROF_VALUE_PROF_MEMOP_API + +#ifdef __cplusplus +#define INSTR_PROF_INLINE inline +#else +#define INSTR_PROF_INLINE +#endif + +/* The value range buckets (22 buckets) for the memop size value profiling looks + * like: + * + * [0, 0] + * [1, 1] + * [2, 2] + * [3, 3] + * [4, 4] + * [5, 5] + * [6, 6] + * [7, 7] + * [8, 8] + * [9, 15] + * [16, 16] + * [17, 31] + * [32, 32] + * [33, 63] + * [64, 64] + * [65, 127] + * [128, 128] + * [129, 255] + * [256, 256] + * [257, 511] + * [512, 512] + * [513, UINT64_MAX] + * + * Each range has a 'representative value' which is the lower end value of the + * range and used to store in the runtime profile data records and the VP + * metadata. For example, it's 2 for [2, 2] and 64 for [65, 127]. + */ +#define INSTR_PROF_NUM_BUCKETS 22 + +/* + * Clz and Popcount. This code was copied from + * compiler-rt/lib/fuzzer/{FuzzerBuiltins.h,FuzzerBuiltinsMsvc.h} and + * llvm/include/llvm/Support/MathExtras.h. + */ +#if defined(_MSC_VER) && !defined(__clang__) + +#include +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +int InstProfClzll(unsigned long long X) { + unsigned long LeadZeroIdx = 0; +#if !defined(_M_ARM64) && !defined(_M_X64) + // Scan the high 32 bits. + if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X >> 32))) + return (int)(63 - (LeadZeroIdx + 32)); // Create a bit offset + // from the MSB. + // Scan the low 32 bits. + if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X))) + return (int)(63 - LeadZeroIdx); +#else + if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx; +#endif + return 64; +} +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +int InstProfPopcountll(unsigned long long X) { + // This code originates from https://reviews.llvm.org/rG30626254510f. + unsigned long long v = X; + v = v - ((v >> 1) & 0x5555555555555555ULL); + v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); + v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return (int)((unsigned long long)(v * 0x0101010101010101ULL) >> 56); +} + +#else + +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +int InstProfClzll(unsigned long long X) { return __builtin_clzll(X); } +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE +int InstProfPopcountll(unsigned long long X) { return __builtin_popcountll(X); } + +#endif /* defined(_MSC_VER) && !defined(__clang__) */ + +/* Map an (observed) memop size value to the representative value of its range. + * For example, 5 -> 5, 22 -> 17, 99 -> 65, 256 -> 256, 1001 -> 513. */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE uint64_t +InstrProfGetRangeRepValue(uint64_t Value) { + if (Value <= 8) + // The first ranges are individually tracked. Use the value as is. + return Value; + else if (Value >= 513) + // The last range is mapped to its lowest value. + return 513; + else if (InstProfPopcountll(Value) == 1) + // If it's a power of two, use it as is. + return Value; + else + // Otherwise, take to the previous power of two + 1. + return (UINT64_C(1) << (64 - InstProfClzll(Value) - 1)) + 1; +} + +/* Return true if the range that an (observed) memop size value belongs to has + * only a single value in the range. For example, 0 -> true, 8 -> true, 10 -> + * false, 64 -> true, 100 -> false, 513 -> false. */ +INSTR_PROF_VISIBILITY INSTR_PROF_INLINE unsigned +InstrProfIsSingleValRange(uint64_t Value) { + if (Value <= 8) + // The first ranges are individually tracked. + return 1; + else if (InstProfPopcountll(Value) == 1) + // If it's a power of two, there's only one value. + return 1; + else + // Otherwise, there's more than one value in the range. + return 0; +} + +#endif /* INSTR_PROF_VALUE_PROF_MEMOP_API */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ptwriteintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ptwriteintrin.h new file mode 100644 index 0000000..0a04f7c --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/ptwriteintrin.h @@ -0,0 +1,37 @@ +/*===------------ ptwriteintrin.h - PTWRITE intrinsic --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PTWRITEINTRIN_H +#define __PTWRITEINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("ptwrite"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_ptwrite32(unsigned int __value) { + __builtin_ia32_ptwrite32(__value); +} + +#ifdef __x86_64__ + +static __inline__ void __DEFAULT_FN_ATTRS +_ptwrite64(unsigned long long __value) { + __builtin_ia32_ptwrite64(__value); +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __PTWRITEINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/rdseedintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/rdseedintrin.h new file mode 100644 index 0000000..ccb3d2d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/rdseedintrin.h @@ -0,0 +1,42 @@ +/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __RDSEEDINTRIN_H +#define __RDSEEDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed"))) + +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed16_step(unsigned short *__p) +{ + return __builtin_ia32_rdseed16_step(__p); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed32_step(unsigned int *__p) +{ + return __builtin_ia32_rdseed32_step(__p); +} + +#ifdef __x86_64__ +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed64_step(unsigned long long *__p) +{ + return __builtin_ia32_rdseed64_step(__p); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __RDSEEDINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/riscv_vector.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/riscv_vector.h new file mode 100644 index 0000000..7fc360f --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/riscv_vector.h @@ -0,0 +1,123865 @@ +/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics -------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __RISCV_VECTOR_H +#define __RISCV_VECTOR_H + +#include +#include + +#ifndef __riscv_vector +#error "Vector intrinsics require the vector extension." +#endif + +#ifdef __cplusplus +extern "C" { +#endif + + +#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5) +#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6) +#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7) +#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0) +#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1) +#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2) +#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3) + +#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6) +#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7) +#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0) +#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1) +#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2) +#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3) + +#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7) +#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0) +#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1) +#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2) +#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3) + +#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0) +#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1) +#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2) +#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3) + + +#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5) +#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6) +#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7) +#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0) +#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1) +#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2) +#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3) + +#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6) +#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7) +#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0) +#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1) +#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2) +#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3) + +#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7) +#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0) +#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1) +#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2) +#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3) + +#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0) +#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1) +#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2) +#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3) + +typedef __rvv_bool64_t vbool64_t; +typedef __rvv_bool32_t vbool32_t; +typedef __rvv_bool16_t vbool16_t; +typedef __rvv_bool8_t vbool8_t; +typedef __rvv_bool4_t vbool4_t; +typedef __rvv_bool2_t vbool2_t; +typedef __rvv_bool1_t vbool1_t; +typedef __rvv_int8mf8_t vint8mf8_t; +typedef __rvv_uint8mf8_t vuint8mf8_t; +typedef __rvv_int8mf4_t vint8mf4_t; +typedef __rvv_uint8mf4_t vuint8mf4_t; +typedef __rvv_int8mf2_t vint8mf2_t; +typedef __rvv_uint8mf2_t vuint8mf2_t; +typedef __rvv_int8m1_t vint8m1_t; +typedef __rvv_uint8m1_t vuint8m1_t; +typedef __rvv_int8m2_t vint8m2_t; +typedef __rvv_uint8m2_t vuint8m2_t; +typedef __rvv_int8m4_t vint8m4_t; +typedef __rvv_uint8m4_t vuint8m4_t; +typedef __rvv_int8m8_t vint8m8_t; +typedef __rvv_uint8m8_t vuint8m8_t; +typedef __rvv_int16mf4_t vint16mf4_t; +typedef __rvv_uint16mf4_t vuint16mf4_t; +typedef __rvv_int16mf2_t vint16mf2_t; +typedef __rvv_uint16mf2_t vuint16mf2_t; +typedef __rvv_int16m1_t vint16m1_t; +typedef __rvv_uint16m1_t vuint16m1_t; +typedef __rvv_int16m2_t vint16m2_t; +typedef __rvv_uint16m2_t vuint16m2_t; +typedef __rvv_int16m4_t vint16m4_t; +typedef __rvv_uint16m4_t vuint16m4_t; +typedef __rvv_int16m8_t vint16m8_t; +typedef __rvv_uint16m8_t vuint16m8_t; +typedef __rvv_int32mf2_t vint32mf2_t; +typedef __rvv_uint32mf2_t vuint32mf2_t; +typedef __rvv_int32m1_t vint32m1_t; +typedef __rvv_uint32m1_t vuint32m1_t; +typedef __rvv_int32m2_t vint32m2_t; +typedef __rvv_uint32m2_t vuint32m2_t; +typedef __rvv_int32m4_t vint32m4_t; +typedef __rvv_uint32m4_t vuint32m4_t; +typedef __rvv_int32m8_t vint32m8_t; +typedef __rvv_uint32m8_t vuint32m8_t; +typedef __rvv_int64m1_t vint64m1_t; +typedef __rvv_uint64m1_t vuint64m1_t; +typedef __rvv_int64m2_t vint64m2_t; +typedef __rvv_uint64m2_t vuint64m2_t; +typedef __rvv_int64m4_t vint64m4_t; +typedef __rvv_uint64m4_t vuint64m4_t; +typedef __rvv_int64m8_t vint64m8_t; +typedef __rvv_uint64m8_t vuint64m8_t; +#if defined(__riscv_zfh) +typedef __rvv_float16mf4_t vfloat16mf4_t; +typedef __rvv_float16mf2_t vfloat16mf2_t; +typedef __rvv_float16m1_t vfloat16m1_t; +typedef __rvv_float16m2_t vfloat16m2_t; +typedef __rvv_float16m4_t vfloat16m4_t; +typedef __rvv_float16m8_t vfloat16m8_t; +#endif +#if defined(__riscv_f) +typedef __rvv_float32mf2_t vfloat32mf2_t; +typedef __rvv_float32m1_t vfloat32m1_t; +typedef __rvv_float32m2_t vfloat32m2_t; +typedef __rvv_float32m4_t vfloat32m4_t; +typedef __rvv_float32m8_t vfloat32m8_t; +#endif +#if defined(__riscv_d) +typedef __rvv_float64m1_t vfloat64m1_t; +typedef __rvv_float64m2_t vfloat64m2_t; +typedef __rvv_float64m4_t vfloat64m4_t; +typedef __rvv_float64m8_t vfloat64m8_t; +#endif + +#define vadd_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vadd_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vadd_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vadd_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vadd_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vadd_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vadd_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vadd_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vadd_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vadd_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vadd_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vadd_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vadd_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vadd_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vadd_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vadd_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vadd_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vadd_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vadd_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vadd_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vadd_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vadd_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vadd_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vadd_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vadd_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vadd_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vadd_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vadd_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vadd_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vadd_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vadd_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vadd_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vadd_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vadd_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vadd_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vadd_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vadd_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vadd_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vadd_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vadd_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vadd_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vadd_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vadd_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vadd_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u16mf4((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u16mf2((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u16m1((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u16m2((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u16m4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u16m8((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u32mf2((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u32m1((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u32m2((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u32m4((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u32m8((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u64m1((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u64m2((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u64m4((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwaddu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_vv_u64m8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vwaddu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8m1((const uint8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8m2((const uint8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8m4((const uint8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8m8((const uint8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (const uint8_t *)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8mf2((const uint8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8mf4((const uint8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u8mf8((const uint8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vlse64_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vlse64_v_u64m1((const uint64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vlse64_v_u64m2((const uint64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vlse64_v_u64m4((const uint64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vlse64_v_u64m8((const uint64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vsse16_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsse16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsse16_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_i16m2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsse16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsse16_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_i16m4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsse16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsse16_v_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_i16m8((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vsse16_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_i16m8_m((vbool2_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsse16_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsse16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsse16_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsse16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsse16_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsse16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsse16_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_u16m2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsse16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsse16_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_u16m4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsse16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsse16_v_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_u16m8((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vsse16_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_u16m8_m((vbool2_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsse16_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsse16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsse16_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsse16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsse32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsse32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsse32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_i32m2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsse32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsse32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_i32m4((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsse32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsse32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_i32m8((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsse32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsse32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsse32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsse32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsse32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsse32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_u32m2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsse32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsse32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_u32m4((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsse32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsse32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_u32m8((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsse32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsse32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsse32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsse64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsse64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsse64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_i64m2((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsse64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsse64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_i64m4((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsse64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsse64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_i64m8((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsse64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsse64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsse64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsse64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_u64m2((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsse64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsse64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_u64m4((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsse64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsse64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_u64m8((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsse64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei16_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i8m1((const int8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i8m2((const int8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i8m4((const int8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vluxei16_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxei16_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i8mf2((const int8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i8mf4((const int8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i8mf8((const int8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u8m1((const uint8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u8m2((const uint8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u8m4((const uint8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vluxei16_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxei16_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u8mf2((const uint8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u8mf4((const uint8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u8mf8((const uint8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei32_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i8m1((const int8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i8m2((const int8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i8mf2((const int8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i8mf4((const int8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i8mf8((const int8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u8m1((const uint8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u8m2((const uint8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u8mf2((const uint8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u8mf4((const uint8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u8mf8((const uint8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i8m1((const int8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i8mf2((const int8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i8mf4((const int8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i8mf8((const int8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei64_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u8m1((const uint8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u8mf2((const uint8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u8mf4((const uint8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u8mf8((const uint8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i16m1((const int16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i16m2((const int16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i16m4((const int16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i16m8((const int16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vluxei8_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxei8_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i16mf2((const int16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i16mf4((const int16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei8_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u16m1((const uint16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u16m2((const uint16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u16m4((const uint16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u16m8((const uint16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vluxei8_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u16mf2((const uint16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u16mf4((const uint16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei16_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i16m1((const int16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i16m2((const int16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i16m4((const int16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i16m8((const int16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vluxei16_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxei16_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i16mf2((const int16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i16mf4((const int16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u16m1((const uint16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u16m2((const uint16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u16m4((const uint16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u16m8((const uint16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vluxei16_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxei16_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u16mf2((const uint16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u16mf4((const uint16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei32_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i16m1((const int16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i16m2((const int16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i16m4((const int16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i16mf2((const int16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i16mf4((const int16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u16m1((const uint16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u16m2((const uint16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u16m4((const uint16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u16mf2((const uint16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u16mf4((const uint16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i16m1((const int16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i16m2((const int16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i16mf2((const int16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i16mf4((const int16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei64_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u16m1((const uint16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u16m2((const uint16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u16mf2((const uint16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u16mf4((const uint16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i32m1((const int32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i32m2((const int32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i32m4((const int32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i32m8((const int32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i32mf2((const int32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei8_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u32m1((const uint32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u32m2((const uint32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u32m4((const uint32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u32m8((const uint32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u32mf2((const uint32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei16_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i32m1((const int32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i32m2((const int32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i32m4((const int32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i32m8((const int32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i32mf2((const int32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u32m1((const uint32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u32m2((const uint32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u32m4((const uint32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u32m8((const uint32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u32mf2((const uint32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei32_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i32m1((const int32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i32m2((const int32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i32m4((const int32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i32m8((const int32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i32mf2((const int32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u32m1((const uint32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u32m2((const uint32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u32m4((const uint32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u32m8((const uint32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u32mf2((const uint32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i32m1((const int32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i32m2((const int32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i32m4((const int32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i32mf2((const int32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei64_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u32m1((const uint32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u32m2((const uint32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u32m4((const uint32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u32mf2((const uint32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i64m1((const int64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei8_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i64m2((const int64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i64m4((const int64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i64m8((const int64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u64m1((const uint64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei8_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u64m2((const uint64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u64m4((const uint64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_u64m8((const uint64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i64m1((const int64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei16_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i64m2((const int64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i64m4((const int64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_i64m8((const int64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u64m1((const uint64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei16_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u64m2((const uint64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u64m4((const uint64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_u64m8((const uint64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i64m1((const int64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei32_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i64m2((const int64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i64m4((const int64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_i64m8((const int64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u64m1((const uint64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u64m2((const uint64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u64m4((const uint64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_u64m8((const uint64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i64m1((const int64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei64_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i64m2((const int64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i64m4((const int64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_i64m8((const int64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u64m1((const uint64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei64_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u64m2((const uint64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u64m4((const uint64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_u64m8((const uint64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8m1((const int8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8m2((const int8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8m4((const int8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8m8((const int8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (const int8_t *)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8mf2((const int8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8mf4((const int8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i8mf8((const int8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8m1((const uint8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8m2((const uint8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8m4((const uint8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8m8((const uint8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (const uint8_t *)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8mf2((const uint8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8mf4((const uint8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u8mf8((const uint8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei16_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i8m1((const int8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i8m2((const int8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i8m4((const int8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vloxei16_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxei16_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i8mf2((const int8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i8mf4((const int8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i8mf8((const int8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u8m1((const uint8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u8m2((const uint8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u8m4((const uint8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vloxei16_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxei16_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u8mf2((const uint8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u8mf4((const uint8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u8mf8((const uint8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei32_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i8m1((const int8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i8m2((const int8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i8mf2((const int8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i8mf4((const int8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i8mf8((const int8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u8m1((const uint8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u8m2((const uint8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u8mf2((const uint8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u8mf4((const uint8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u8mf8((const uint8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i8m1((const int8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i8mf2((const int8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i8mf4((const int8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i8mf8((const int8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei64_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u8m1((const uint8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u8mf2((const uint8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u8mf4((const uint8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u8mf8((const uint8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i16m1((const int16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i16m2((const int16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i16m4((const int16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i16m8((const int16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vloxei8_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxei8_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i16mf2((const int16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i16mf4((const int16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei8_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u16m1((const uint16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u16m2((const uint16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u16m4((const uint16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u16m8((const uint16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vloxei8_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxei8_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u16mf2((const uint16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u16mf4((const uint16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei16_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i16m1((const int16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i16m2((const int16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i16m4((const int16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i16m8((const int16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vloxei16_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxei16_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i16mf2((const int16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i16mf4((const int16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u16m1((const uint16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u16m2((const uint16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u16m4((const uint16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u16m8((const uint16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vloxei16_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxei16_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u16mf2((const uint16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u16mf4((const uint16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei32_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i16m1((const int16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i16m2((const int16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i16m4((const int16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i16mf2((const int16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i16mf4((const int16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u16m1((const uint16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u16m2((const uint16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u16m4((const uint16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u16mf2((const uint16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u16mf4((const uint16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i16m1((const int16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i16m2((const int16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i16mf2((const int16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i16mf4((const int16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei64_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u16m1((const uint16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u16m2((const uint16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u16mf2((const uint16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u16mf4((const uint16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i32m1((const int32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i32m2((const int32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i32m4((const int32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i32m8((const int32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i32mf2((const int32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei8_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u32m1((const uint32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u32m2((const uint32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u32m4((const uint32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u32m8((const uint32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u32mf2((const uint32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei16_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i32m1((const int32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i32m2((const int32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i32m4((const int32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i32m8((const int32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i32mf2((const int32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u32m1((const uint32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u32m2((const uint32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u32m4((const uint32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u32m8((const uint32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u32mf2((const uint32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei32_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i32m1((const int32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i32m2((const int32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i32m4((const int32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i32m8((const int32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i32mf2((const int32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u32m1((const uint32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u32m2((const uint32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u32m4((const uint32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u32m8((const uint32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u32mf2((const uint32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i32m1((const int32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i32m2((const int32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i32m4((const int32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i32mf2((const int32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei64_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u32m1((const uint32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u32m2((const uint32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u32m4((const uint32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u32mf2((const uint32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i64m1((const int64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei8_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i64m2((const int64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i64m4((const int64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_i64m8((const int64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vse8_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vse8_v_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vse8_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8m2((int8_t *)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vse8_v_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vse8_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8m4((int8_t *)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vse8_v_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vse8_v_i8m8(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8m8((int8_t *)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vse8_v_i8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8m8_m((vbool1_t)(op0), (int8_t *)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vse8_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vse8_v_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vse8_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vse8_v_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vse8_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vse8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vse8_v_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vloxei8_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u64m1((const uint64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei8_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u64m2((const uint64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u64m4((const uint64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_u64m8((const uint64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i64m1((const int64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei16_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i64m2((const int64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i64m4((const int64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_i64m8((const int64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u64m1((const uint64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei16_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u64m2((const uint64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u64m4((const uint64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_u64m8((const uint64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i64m1((const int64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei32_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i64m2((const int64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i64m4((const int64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_i64m8((const int64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u64m1((const uint64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u64m2((const uint64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u64m4((const uint64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_u64m8((const uint64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i64m1((const int64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei64_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i64m2((const int64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i64m4((const int64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_i64m8((const int64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u64m1((const uint64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei64_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u64m2((const uint64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u64m4((const uint64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_u64m8((const uint64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vse8_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vse8_v_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vse8_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vse8_v_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vse8_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8m4((uint8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vse8_v_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vse8_v_u8m8(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8m8((uint8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vse8_v_u8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8m8_m((vbool1_t)(op0), (uint8_t *)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vse8_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vse8_v_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vse8_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vse8_v_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vse8_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vse8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vse8_v_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vwaddu_wv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u16mf4((vuint16mf4_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u16mf2((vuint16mf2_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u16m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u16m1((vuint16m1_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u16m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u16m2((vuint16m2_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u16m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u16m4((vuint16m4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u16m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u16m8((vuint16m8_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u32mf2((vuint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u32m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u32m1((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u32m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u32m2((vuint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u32m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u32m4((vuint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u32m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u32m8((vuint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u64m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u64m1((vuint64m1_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u64m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u64m2((vuint64m2_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u64m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u64m4((vuint64m4_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwaddu_wv_u64m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_wv_u64m8((vuint64m8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vwaddu_wv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsse8_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsse8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsse8_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8m2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsse8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsse8_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8m4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsse8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsse8_v_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8m8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vsse8_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8m8_m((vbool1_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vsse8_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsse8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsse8_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsse8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsse8_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsse8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i8m4((int8_t *)(op0), (vuint16m8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint16m8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u8m4((uint8_t *)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vadd_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vadd_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vadd_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vadd_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vadd_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vadd_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vadd_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vadd_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vadd_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vadd_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vadd_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vadd_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vadd_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vadd_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vadd_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vadd_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vadd_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vadd_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vadd_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vadd_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vadd_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vadd_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vadd_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vadd_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vadd_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vadd_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vadd_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vadd_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vadd_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vadd_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vadd_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vadd_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vadd_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vadd_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vadd_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vadd_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vadd_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vadd_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vadd_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vadd_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vadd_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vadd_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vadd_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vadd_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vadd_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vadd_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vadd_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vadd_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vadd_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vadd_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vadd_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vadd_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vadd_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vadd_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vadd_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vadd_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vadd_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vadd_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vadd_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vadd_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vadd_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vadd_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vadd_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vadd_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vadd_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vadd_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vadd_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vadd_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vadd_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vadd_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vadd_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vadd_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vadd_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vadd_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vadd_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vadd_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vadd_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vadd_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vadd_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vadd_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vadd_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vadd_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vadd_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vadd_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vadd_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vadd_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vadd_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vadd_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vadd_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vadd_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vadd_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vadd_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vadd_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vadd_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vadd_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vadd_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vadd_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vadd_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vadd_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vadd_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vadd_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vadd_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vadd_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vadd_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vadd_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vadd_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vadd_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vadd_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vadd_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsub_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vsub_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsub_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vsub_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsub_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vsub_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsub_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vsub_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vsub_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vsub_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsub_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vsub_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsub_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vsub_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsub_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vsub_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsub_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vsub_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsub_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vsub_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsub_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vsub_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsub_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vsub_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsub_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vsub_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsub_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vsub_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsub_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vsub_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsub_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vsub_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsub_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vsub_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsub_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vsub_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsub_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vsub_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsub_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vsub_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsub_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vsub_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsub_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vsub_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsub_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsub_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsub_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsub_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsub_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsub_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsub_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsub_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsub_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsub_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsub_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsub_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsub_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsub_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsub_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsub_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsub_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsub_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsub_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsub_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsub_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsub_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsub_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsub_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsub_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsub_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsub_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsub_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsub_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsub_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsub_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsub_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vsub_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsub_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vsub_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsub_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vsub_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsub_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vsub_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsub_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vsub_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsub_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vsub_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsub_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vsub_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsub_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vsub_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsub_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vsub_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsub_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vsub_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsub_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vsub_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsub_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vsub_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsub_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vsub_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsub_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vsub_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsub_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vsub_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsub_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vsub_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsub_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vsub_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsub_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vsub_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsub_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vsub_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsub_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vsub_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsub_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vsub_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsub_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vsub_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vsub_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsub_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsub_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsub_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsub_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsub_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsub_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsub_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsub_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsub_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsub_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsub_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsub_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsub_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsub_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsub_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsub_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsub_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsub_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsub_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsub_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsub_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsub_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsub_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsub_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsub_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsub_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsub_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsub_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsub_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsub_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vsub_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsub_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsub_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrsub_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrsub_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrsub_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrsub_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrsub_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrsub_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrsub_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrsub_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrsub_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrsub_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrsub_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrsub_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrsub_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrsub_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrsub_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrsub_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrsub_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrsub_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrsub_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrsub_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrsub_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrsub_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrsub_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrsub_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrsub_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrsub_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrsub_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrsub_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrsub_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrsub_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrsub_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vrsub_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vrsub_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vrsub_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vrsub_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vrsub_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vrsub_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vrsub_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vrsub_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vrsub_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vrsub_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vrsub_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vrsub_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vrsub_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vrsub_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vrsub_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vrsub_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vrsub_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vrsub_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vrsub_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vrsub_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vrsub_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vrsub_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vrsub_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vrsub_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vrsub_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vrsub_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vrsub_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vrsub_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vrsub_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vrsub_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vrsub_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vrsub_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrsub_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u16mf4((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u16mf2((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u16m1((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u16m2((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u16m4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u16m8((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u32mf2((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u32m1((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u32m2((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u32m4((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u32m8((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u64m1((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u64m2((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u64m4((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwaddu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_vx_u64m8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u16mf4((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u16mf2((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u16m1((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u16m2((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u16m4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u16m8((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u32mf2((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u32m1((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u32m2((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u32m4((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u32m8((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u64m1((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u64m2((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u64m4((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwsubu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_vv_u64m8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vwsubu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u16mf4((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u16mf2((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u16m1((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u16m2((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u16m4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u16m8((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u32mf2((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u32m1((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u32m2((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u32m4((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u32m8((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u64m1((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u64m2((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u64m4((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_vx_u64m8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwadd_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i16mf4((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vwadd_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwadd_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i16mf2((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vwadd_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwadd_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i16m1((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vwadd_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwadd_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i16m2((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vwadd_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwadd_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i16m4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vwadd_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwadd_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i16m8((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vwadd_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwadd_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i32mf2((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vwadd_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwadd_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i32m1((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vwadd_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwadd_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i32m2((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vwadd_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwadd_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i32m4((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vwadd_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwadd_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i32m8((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vwadd_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwadd_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i64m1((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vwadd_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwadd_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i64m2((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vwadd_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwadd_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i64m4((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vwadd_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwadd_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vwadd_vv_i64m8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vwadd_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vwadd_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i16mf4((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i16mf2((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i16m1((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i16m2((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i16m4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i16m8((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i32mf2((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i32m1((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i32m2((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i32m4((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i32m8((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i64m1((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwadd_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i64m2((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwadd_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i64m4((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwadd_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vwadd_vx_i64m8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwsub_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i16mf4((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vwsub_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwsub_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i16mf2((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vwsub_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwsub_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i16m1((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vwsub_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwsub_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i16m2((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vwsub_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwsub_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i16m4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vwsub_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwsub_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i16m8((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vwsub_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwsub_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i32mf2((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vwsub_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwsub_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i32m1((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vwsub_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwsub_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i32m2((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vwsub_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwsub_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i32m4((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vwsub_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwsub_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i32m8((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vwsub_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwsub_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i64m1((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vwsub_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwsub_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i64m2((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vwsub_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwsub_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i64m4((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vwsub_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwsub_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vwsub_vv_i64m8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vwsub_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vwsub_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i16mf4((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i16mf2((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i16m1((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i16m2((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i16m4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i16m8((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i32mf2((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i32m1((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i32m2((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i32m4((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i32m8((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i64m1((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i64m2((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i64m4((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vwsub_vx_i64m8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u16mf4((vuint16mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u16mf2((vuint16mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u16m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u16m1((vuint16m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u16m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u16m2((vuint16m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u16m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u16m4((vuint16m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u16m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u16m8((vuint16m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u32mf2((vuint32mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u32m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u32m1((vuint32m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u32m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u32m2((vuint32m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u32m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u32m4((vuint32m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u32m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u32m8((vuint32m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u64m1(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u64m1((vuint64m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u64m2(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u64m2((vuint64m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u64m4(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u64m4((vuint64m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwaddu_wx_u64m8(op0, op1, op2) \ +__builtin_rvv_vwaddu_wx_u64m8((vuint64m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwaddu_wx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwaddu_wx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u16mf4((vuint16mf4_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u16mf2((vuint16mf2_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u16m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u16m1((vuint16m1_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u16m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u16m2((vuint16m2_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u16m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u16m4((vuint16m4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u16m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u16m8((vuint16m8_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u32mf2((vuint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u32m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u32m1((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u32m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u32m2((vuint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u32m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u32m4((vuint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u32m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u32m8((vuint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u64m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u64m1((vuint64m1_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u64m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u64m2((vuint64m2_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u64m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u64m4((vuint64m4_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwsubu_wv_u64m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_wv_u64m8((vuint64m8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vwsubu_wv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u16mf4((vuint16mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u16mf2((vuint16mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u16m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u16m1((vuint16m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u16m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u16m2((vuint16m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u16m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u16m4((vuint16m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u16m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u16m8((vuint16m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u32mf2((vuint32mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u32m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u32m1((vuint32m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u32m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u32m2((vuint32m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u32m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u32m4((vuint32m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u32m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u32m8((vuint32m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u64m1(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u64m1((vuint64m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u64m2(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u64m2((vuint64m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u64m4(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u64m4((vuint64m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwsubu_wx_u64m8(op0, op1, op2) \ +__builtin_rvv_vwsubu_wx_u64m8((vuint64m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwsubu_wx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsubu_wx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwadd_wv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i16mf4((vint16mf4_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vwadd_wv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwadd_wv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i16mf2((vint16mf2_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vwadd_wv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwadd_wv_i16m1(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i16m1((vint16m1_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vwadd_wv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwadd_wv_i16m2(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i16m2((vint16m2_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vwadd_wv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwadd_wv_i16m4(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i16m4((vint16m4_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vwadd_wv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwadd_wv_i16m8(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i16m8((vint16m8_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vwadd_wv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwadd_wv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i32mf2((vint32mf2_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vwadd_wv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwadd_wv_i32m1(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i32m1((vint32m1_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vwadd_wv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwadd_wv_i32m2(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i32m2((vint32m2_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vwadd_wv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwadd_wv_i32m4(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i32m4((vint32m4_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vwadd_wv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwadd_wv_i32m8(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i32m8((vint32m8_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vwadd_wv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwadd_wv_i64m1(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i64m1((vint64m1_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vwadd_wv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwadd_wv_i64m2(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i64m2((vint64m2_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vwadd_wv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwadd_wv_i64m4(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i64m4((vint64m4_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vwadd_wv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwadd_wv_i64m8(op0, op1, op2) \ +__builtin_rvv_vwadd_wv_i64m8((vint64m8_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vwadd_wv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vwadd_wx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i16mf4((vint16mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_wx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_wx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i16mf2((vint16mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_wx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_wx_i16m1(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i16m1((vint16m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_wx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_wx_i16m2(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i16m2((vint16m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_wx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_wx_i16m4(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i16m4((vint16m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_wx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_wx_i16m8(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i16m8((vint16m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwadd_wx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwadd_wx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i32mf2((vint32mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_wx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_wx_i32m1(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i32m1((vint32m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_wx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_wx_i32m2(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i32m2((vint32m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_wx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_wx_i32m4(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i32m4((vint32m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_wx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_wx_i32m8(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i32m8((vint32m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwadd_wx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwadd_wx_i64m1(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i64m1((vint64m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_wx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwadd_wx_i64m2(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i64m2((vint64m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_wx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwadd_wx_i64m4(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i64m4((vint64m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_wx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwadd_wx_i64m8(op0, op1, op2) \ +__builtin_rvv_vwadd_wx_i64m8((vint64m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwadd_wx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwadd_wx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_wv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i16mf4((vint16mf4_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vwsub_wv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwsub_wv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i16mf2((vint16mf2_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vwsub_wv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwsub_wv_i16m1(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i16m1((vint16m1_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vwsub_wv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwsub_wv_i16m2(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i16m2((vint16m2_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vwsub_wv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwsub_wv_i16m4(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i16m4((vint16m4_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vwsub_wv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwsub_wv_i16m8(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i16m8((vint16m8_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vwsub_wv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwsub_wv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i32mf2((vint32mf2_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vwsub_wv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwsub_wv_i32m1(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i32m1((vint32m1_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vwsub_wv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwsub_wv_i32m2(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i32m2((vint32m2_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vwsub_wv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwsub_wv_i32m4(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i32m4((vint32m4_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vwsub_wv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwsub_wv_i32m8(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i32m8((vint32m8_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vwsub_wv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwsub_wv_i64m1(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i64m1((vint64m1_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vwsub_wv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwsub_wv_i64m2(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i64m2((vint64m2_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vwsub_wv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwsub_wv_i64m4(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i64m4((vint64m4_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vwsub_wv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwsub_wv_i64m8(op0, op1, op2) \ +__builtin_rvv_vwsub_wv_i64m8((vint64m8_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vwsub_wv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vwsub_wx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i16mf4((vint16mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_wx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_wx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i16mf2((vint16mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_wx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_wx_i16m1(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i16m1((vint16m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_wx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_wx_i16m2(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i16m2((vint16m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_wx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_wx_i16m4(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i16m4((vint16m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_wx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_wx_i16m8(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i16m8((vint16m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwsub_wx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwsub_wx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i32mf2((vint32mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_wx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_wx_i32m1(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i32m1((vint32m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_wx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_wx_i32m2(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i32m2((vint32m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_wx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_wx_i32m4(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i32m4((vint32m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_wx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_wx_i32m8(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i32m8((vint32m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwsub_wx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwsub_wx_i64m1(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i64m1((vint64m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_wx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_wx_i64m2(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i64m2((vint64m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_wx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_wx_i64m4(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i64m4((vint64m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_wx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwsub_wx_i64m8(op0, op1, op2) \ +__builtin_rvv_vwsub_wx_i64m8((vint64m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwsub_wx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwsub_wx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwcvt_x_x_v_i16m2(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i16m2((vint8m1_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i16m4(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i16m4((vint8m2_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i16m8(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i16m8((vint8m4_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i16m1(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i16m1((vint8mf2_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i16mf2(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i16mf2((vint8mf4_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i16mf4(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i16mf4((vint8mf8_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i32m2(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i32m2((vint16m1_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i32m4(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i32m4((vint16m2_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i32m8(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i32m8((vint16m4_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i32m1(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i32m1((vint16mf2_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i32mf2(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i32mf2((vint16mf4_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i64m2(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i64m2((vint32m1_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i64m4(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i64m4((vint32m2_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i64m8(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i64m8((vint32m4_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vwcvt_x_x_v_i64m1(op0, op1) \ +__builtin_rvv_vwcvt_x_x_v_i64m1((vint32mf2_t)(op0), (size_t)(op1)) +#define vwcvt_x_x_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvt_x_x_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vadc_vvm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vvm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vvm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vadc_vvm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vvm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vvm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vvm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8m1((vint8m1_t)(op0), (int8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8m2((vint8m2_t)(op0), (int8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8m4((vint8m4_t)(op0), (int8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8m8((vint8m8_t)(op0), (int8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i16m1((vint16m1_t)(op0), (int16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i16m2((vint16m2_t)(op0), (int16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i16m4((vint16m4_t)(op0), (int16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vxm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i16m8((vint16m8_t)(op0), (int16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vxm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i32m1((vint32m1_t)(op0), (int32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i32m2((vint32m2_t)(op0), (int32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i32m4((vint32m4_t)(op0), (int32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i32m8((vint32m8_t)(op0), (int32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vxm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i64m1((vint64m1_t)(op0), (int64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i64m2((vint64m2_t)(op0), (int64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i64m4((vint64m4_t)(op0), (int64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_i64m8((vint64m8_t)(op0), (int64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vvm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vvm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vvm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vvm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vvm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vvm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vvm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vvm_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vxm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vadc_vxm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vadc_vxm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vadc_vxm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vadc_vxm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vadc_vxm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vadc_vxm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vadc_vxm_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_i64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_i64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vvm_u64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vvm_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmadc_vxm_u64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmadc_vxm_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmadc_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmadc_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmadc_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmadc_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmadc_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmadc_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmadc_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmadc_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmadc_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmadc_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmadc_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmadc_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmadc_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmadc_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmadc_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmadc_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmadc_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmadc_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmadc_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmadc_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmadc_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmadc_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsuxei8_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i16m4((int16_t *)(op0), (vuint8m2_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint8m2_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i16m8((int16_t *)(op0), (vuint8m4_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i16m8_m((vbool2_t)(op0), (int16_t *)(op1), (vuint8m4_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmadc_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmadc_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmadc_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmadc_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmadc_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmadc_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmadc_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmadc_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmadc_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmadc_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmadc_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmadc_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmadc_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmadc_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmadc_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmadc_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmadc_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmadc_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmadc_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmadc_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmadc_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmadc_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmadc_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmadc_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmadc_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmadc_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsbc_vvm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8m1((vint8m1_t)(op0), (int8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8m2((vint8m2_t)(op0), (int8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8m4((vint8m4_t)(op0), (int8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8m8((vint8m8_t)(op0), (int8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i16m1((vint16m1_t)(op0), (int16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i16m2((vint16m2_t)(op0), (int16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i16m4((vint16m4_t)(op0), (int16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i16m8((vint16m8_t)(op0), (int16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i32m1((vint32m1_t)(op0), (int32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i32m2((vint32m2_t)(op0), (int32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i32m4((vint32m4_t)(op0), (int32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i32m8((vint32m8_t)(op0), (int32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i64m1((vint64m1_t)(op0), (int64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i64m2((vint64m2_t)(op0), (int64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i64m4((vint64m4_t)(op0), (int64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_i64m8((vint64m8_t)(op0), (int64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vvm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vvm_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vsbc_vxm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsbc_vxm_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_i64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_i64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vvm_u64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vvm_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8m1_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8m2_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8m4_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8m8_b1(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8mf2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8mf4_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u8mf8_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u16m1_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u16m2_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u16m4_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u16m8_b2(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u16mf2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u16mf4_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u32m1_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u32m2_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u32m4_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u32m8_b4(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u32mf2_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u64m1_b64(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u64m2_b32(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u64m4_b16(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbc_vxm_u64m8_b8(op0, op1, op2, op3) \ +__builtin_rvv_vmsbc_vxm_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u16m4((uint16_t *)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint8m2_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u16m8((uint16_t *)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u16m8_m((vbool2_t)(op0), (uint16_t *)(op1), (vuint8m4_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmsbc_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsbc_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmsbc_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsbc_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsbc_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vand_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vand_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vand_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vand_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vand_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vand_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vand_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vand_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vand_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vand_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vand_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vand_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vand_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vand_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vand_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vand_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vand_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vand_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vand_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vand_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vand_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vand_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vand_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vand_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vand_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vand_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vand_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vand_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vand_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vand_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vand_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vand_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vand_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vand_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vand_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vand_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vand_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vand_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vand_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vand_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vand_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vand_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vand_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vand_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vand_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vand_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vand_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vand_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vand_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vand_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vand_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vand_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vand_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vand_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vand_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vand_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vand_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vand_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vand_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vand_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vand_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vand_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vand_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vand_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vand_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vand_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vand_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vand_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vand_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vand_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vand_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vand_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vand_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vand_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vand_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vand_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vand_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vand_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vand_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vand_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vand_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vand_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vand_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vand_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vand_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vand_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vand_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vand_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vand_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vand_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vand_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vand_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vand_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vand_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vand_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vand_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vand_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vand_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vand_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vand_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vand_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vand_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vand_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vand_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vand_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vand_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vand_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vand_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vand_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vand_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vand_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vand_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vand_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vand_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vand_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vand_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vand_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vand_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vand_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vand_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vand_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vand_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vand_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vand_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vand_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vand_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vand_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vand_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vand_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vand_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vand_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vand_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vand_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vand_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vand_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vand_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vand_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vand_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vand_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vand_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vand_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vand_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vand_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vand_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vand_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vand_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vand_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vand_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vand_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vand_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vand_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vand_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vand_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vand_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vand_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vand_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vand_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vand_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vand_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vand_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vand_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vand_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vand_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vand_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vand_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vand_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vand_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vand_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vand_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vxor_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vxor_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vxor_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vxor_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vxor_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vxor_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vxor_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vxor_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vxor_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vxor_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vxor_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vxor_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vxor_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vxor_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vxor_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vxor_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vxor_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vxor_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vxor_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vxor_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vxor_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vxor_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vxor_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vxor_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vxor_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vxor_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vxor_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vxor_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vxor_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vxor_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vxor_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vxor_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vxor_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vxor_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vxor_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vxor_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vxor_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vxor_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vxor_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vxor_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vxor_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vxor_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vxor_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vxor_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vxor_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vxor_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vxor_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vxor_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vxor_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vxor_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vxor_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vxor_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vxor_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vxor_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vxor_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vxor_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vxor_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vxor_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vxor_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vxor_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vxor_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vxor_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vxor_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vxor_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vxor_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vxor_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vxor_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vxor_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vxor_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vxor_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vxor_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vxor_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vxor_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vxor_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vxor_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i16m4((int16_t *)(op0), (vuint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i16m8((int16_t *)(op0), (vuint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i16m8_m((vbool2_t)(op0), (int16_t *)(op1), (vuint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vxor_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vxor_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vxor_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vxor_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vxor_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vxor_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vxor_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vxor_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vxor_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vxor_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vxor_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vxor_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vxor_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vxor_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vxor_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vxor_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vxor_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vxor_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vxor_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vxor_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vxor_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vxor_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vxor_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vxor_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vxor_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vxor_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vxor_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vxor_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vxor_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vxor_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vxor_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vxor_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vxor_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vxor_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vxor_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vxor_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vxor_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vxor_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vxor_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vxor_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vxor_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vxor_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vxor_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vxor_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vxor_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vxor_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vxor_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vxor_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vxor_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vxor_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vxor_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vxor_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vxor_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vxor_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vxor_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vxor_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vxor_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vxor_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vxor_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vxor_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vxor_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vxor_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vxor_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vxor_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vxor_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vxor_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vxor_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vxor_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vxor_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vxor_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vxor_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vxor_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vxor_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vxor_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vxor_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vxor_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vxor_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vxor_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vor_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vor_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vor_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vor_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vor_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vor_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vor_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vor_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vor_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vor_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vor_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vor_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vor_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vor_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vor_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vor_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vor_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vor_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vor_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vor_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vor_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vor_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vor_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vor_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vor_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vor_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vor_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vor_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vor_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vor_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vor_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vor_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vor_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vor_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vor_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vor_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vor_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vor_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vor_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vor_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vor_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vor_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vor_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vor_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vor_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vor_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vor_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vor_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vor_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vor_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vor_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vor_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vor_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vor_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vor_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vor_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vor_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vor_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vor_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vor_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vor_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vor_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vor_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vor_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vor_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vor_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vor_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vor_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vor_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vor_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vor_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vor_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vor_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vor_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vor_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vor_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vor_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vor_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vor_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vor_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vor_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vor_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vor_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vor_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vor_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vor_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vor_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vor_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vor_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vor_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vor_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vor_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vor_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vor_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vor_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vor_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vor_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vor_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vor_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vor_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vor_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vor_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vor_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vor_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vor_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vor_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vor_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vor_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vor_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vor_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vor_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vor_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vor_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vor_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vor_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vor_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vor_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vor_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vor_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vor_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vor_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vor_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vor_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vor_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vor_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vor_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vor_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vor_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vor_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vor_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vor_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vor_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vor_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vor_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vor_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vor_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vor_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vor_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vor_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vor_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vor_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vor_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vor_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vor_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vor_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vor_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vor_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vor_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vor_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vor_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vor_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vor_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vor_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vor_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vor_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vor_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vor_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vor_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vor_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vor_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vor_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vor_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vor_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vor_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vor_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vor_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vor_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vor_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vor_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsll_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8m1((vint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vsll_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsll_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8m2((vint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vsll_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsll_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8m4((vint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vsll_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsll_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8m8((vint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vsll_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsll_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8mf2((vint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vsll_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsll_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8mf4((vint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vsll_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsll_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i8mf8((vint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vsll_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsll_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i16m1((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vsll_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsll_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i16m2((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vsll_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsll_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i16m4((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vsll_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsll_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i16m8((vint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vsll_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsll_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i16mf2((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vsll_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsll_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i16mf4((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vsll_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsll_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i32m1((vint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vsll_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsll_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i32m2((vint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vsll_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsll_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i32m4((vint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vsll_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsll_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i32m8((vint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vsll_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsll_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i32mf2((vint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vsll_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsll_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i64m1((vint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vsll_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsll_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i64m2((vint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vsll_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsll_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i64m4((vint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vsll_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsll_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_i64m8((vint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vsll_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsll_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8m1((vint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8m2((vint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8m4((vint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8m8((vint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8mf2((vint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8mf4((vint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i8mf8((vint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i16m1((vint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i16m2((vint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i16m4((vint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i16m8((vint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i16mf2((vint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i16mf4((vint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i32m1((vint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i32m2((vint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i32m4((vint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i32m8((vint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i32mf2((vint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i64m1((vint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i64m2((vint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i64m4((vint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_i64m8((vint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vsll_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsll_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vsll_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsll_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vsll_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsll_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vsll_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsll_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vsll_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsll_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vsll_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsll_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vsll_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsll_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vsll_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsll_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vsll_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsll_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vsll_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsll_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vsll_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsll_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vsll_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsll_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vsll_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsll_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vsll_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsll_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vsll_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsll_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vsll_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsll_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vsll_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsll_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vsll_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsll_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vsll_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsll_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vsll_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsll_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vsll_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsll_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vsll_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vsll_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsll_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8m1((vuint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8m2((vuint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8m4((vuint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8m8((vuint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8mf2((vuint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8mf4((vuint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u8mf8((vuint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u16m1((vuint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u16m2((vuint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u16m4((vuint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u16m8((vuint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u16mf2((vuint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u16mf4((vuint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u32m1((vuint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u32m2((vuint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u32m4((vuint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u32m8((vuint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u32mf2((vuint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u64m1((vuint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u64m2((vuint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u64m4((vuint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsll_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vsll_vx_u64m8((vuint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsll_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsll_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u16m4((uint16_t *)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u16m8((uint16_t *)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u16m8_m((vbool2_t)(op0), (uint16_t *)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vsrl_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsrl_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vsrl_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsrl_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vsrl_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vsrl_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsrl_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vsrl_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vsrl_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsrl_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vsrl_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vsrl_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsrl_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vsrl_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsrl_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vsrl_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsrl_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vsrl_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsrl_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vsrl_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8m1((vuint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8m2((vuint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8m4((vuint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8m8((vuint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8mf2((vuint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8mf4((vuint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u8mf8((vuint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u16m1((vuint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u16m2((vuint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u16m4((vuint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u16m8((vuint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u16mf2((vuint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u16mf4((vuint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u32m1((vuint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u32m2((vuint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u32m4((vuint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u32m8((vuint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u32mf2((vuint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u64m1((vuint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u64m2((vuint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u64m4((vuint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsrl_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vsrl_vx_u64m8((vuint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsrl_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsrl_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8m1((vint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vsra_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsra_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8m2((vint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vsra_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsra_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8m4((vint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vsra_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsra_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8m8((vint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vsra_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsra_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8mf2((vint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vsra_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsra_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8mf4((vint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vsra_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsra_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i8mf8((vint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vsra_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsra_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i16m1((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vsra_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsra_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i16m2((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vsra_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsra_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i16m4((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vsra_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsra_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i16m8((vint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vsra_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsra_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i16mf2((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vsra_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsra_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i16mf4((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vsra_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsra_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i32m1((vint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vsra_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsra_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i32m2((vint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vsra_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsra_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i32m4((vint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vsra_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsra_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i32m8((vint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vsra_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsra_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i32mf2((vint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vsra_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsra_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i64m1((vint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vsra_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsra_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i64m2((vint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vsra_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsra_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i64m4((vint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vsra_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsra_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vsra_vv_i64m8((vint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vsra_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsra_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8m1((vint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8m2((vint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8m4((vint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8m8((vint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8mf2((vint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8mf4((vint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i8mf8((vint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i16m1((vint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i16m2((vint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i16m4((vint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i16m8((vint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i16mf2((vint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i16mf4((vint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i32m1((vint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i32m2((vint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i32m4((vint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i32m8((vint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i32mf2((vint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i64m1((vint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i64m2((vint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i64m4((vint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsra_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vsra_vx_i64m8((vint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vsra_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsra_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u8m1(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u8m1((vuint16m2_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u8m2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u8m2((vuint16m4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u8m4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u8m4((vuint16m8_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u8mf2((vuint16m1_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u8mf4((vuint16mf2_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u8mf8((vuint16mf4_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u16m1(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u16m1((vuint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u16m2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u16m2((vuint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u16m4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u16m4((vuint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u16mf2((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u16mf4((vuint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u32m1(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u32m1((vuint64m2_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u32m2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u32m2((vuint64m4_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u32m4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u32m4((vuint64m8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnsrl_wv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wv_u32mf2((vuint64m1_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vnsrl_wv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u8m1(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u8m1((vuint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u8m2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u8m2((vuint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u8m4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u8m4((vuint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u8mf2((vuint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u8mf4((vuint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u8mf8((vuint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u16m1(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u16m1((vuint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u16m2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u16m2((vuint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u16m4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u16m4((vuint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u16mf2((vuint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u16mf4((vuint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u32m1(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u32m1((vuint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u32m2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u32m2((vuint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u32m4(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u32m4((vuint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsrl_wx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vnsrl_wx_u32mf2((vuint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsrl_wx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsrl_wx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wv_i8m1(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i8m1((vint16m2_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vnsra_wv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnsra_wv_i8m2(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i8m2((vint16m4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vnsra_wv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnsra_wv_i8m4(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i8m4((vint16m8_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vnsra_wv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnsra_wv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i8mf2((vint16m1_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vnsra_wv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnsra_wv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i8mf4((vint16mf2_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vnsra_wv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnsra_wv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i8mf8((vint16mf4_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vnsra_wv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnsra_wv_i16m1(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i16m1((vint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vnsra_wv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnsra_wv_i16m2(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i16m2((vint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vnsra_wv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnsra_wv_i16m4(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i16m4((vint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vnsra_wv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnsra_wv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i16mf2((vint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vnsra_wv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnsra_wv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i16mf4((vint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vnsra_wv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnsra_wv_i32m1(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i32m1((vint64m2_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vnsra_wv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnsra_wv_i32m2(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i32m2((vint64m4_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vnsra_wv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnsra_wv_i32m4(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i32m4((vint64m8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vnsra_wv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnsra_wv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vnsra_wv_i32mf2((vint64m1_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vnsra_wv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnsra_wx_i8m1(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i8m1((vint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i8m2(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i8m2((vint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i8m4(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i8m4((vint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i8mf2((vint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i8mf4((vint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i8mf8((vint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i16m1(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i16m1((vint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i16m2(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i16m2((vint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i16m4(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i16m4((vint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i16mf2((vint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i16mf4((vint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i32m1(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i32m1((vint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i32m2(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i32m2((vint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i32m4(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i32m4((vint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnsra_wx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vnsra_wx_i32mf2((vint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnsra_wx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnsra_wx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vncvt_x_x_w_u8m1(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u8m1((vuint16m2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u8m2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u8m2((vuint16m4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u8m4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u8m4((vuint16m8_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u8mf2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u8mf2((vuint16m1_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u8mf4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u8mf4((vuint16mf2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u8mf8(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u8mf8((vuint16mf4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u16m1(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u16m1((vuint32m2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u16m2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u16m2((vuint32m4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u16m4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u16m4((vuint32m8_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u16mf2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u16mf2((vuint32m1_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u16mf4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u16mf4((vuint32mf2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u32m1(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u32m1((vuint64m2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u32m2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u32m2((vuint64m4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u32m4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u32m4((vuint64m8_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_u32mf2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_u32mf2((vuint64m1_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vmseq_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmseq_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmseq_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmseq_vv_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmseq_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmseq_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmseq_vv_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmseq_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmseq_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsse8_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsse8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsse8_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8m2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsse8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsse8_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8m4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vsse8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsse8_v_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8m8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vsse8_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8m8_m((vbool1_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsse8_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsse8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsse8_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsse8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsse8_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsse8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsse8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i16m4((int16_t *)(op0), (vuint32m8_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint32m8_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmseq_vx_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmseq_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmseq_vx_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmseq_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmseq_vx_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmseq_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmseq_vx_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmseq_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmseq_vx_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmseq_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmseq_vx_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmseq_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmseq_vx_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmseq_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmseq_vx_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmseq_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmseq_vx_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmseq_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmseq_vx_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmseq_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmseq_vx_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmseq_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmseq_vx_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmseq_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmseq_vx_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmseq_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmseq_vx_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmseq_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmseq_vx_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmseq_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmseq_vx_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmseq_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmseq_vv_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmseq_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmseq_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmseq_vv_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmseq_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmseq_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmseq_vv_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmseq_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmseq_vv_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmseq_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmseq_vv_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmseq_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmseq_vv_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vv_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmseq_vx_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmseq_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmseq_vx_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmseq_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmseq_vx_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmseq_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmseq_vx_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmseq_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmseq_vx_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmseq_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmseq_vx_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmseq_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmseq_vx_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmseq_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmseq_vx_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmseq_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmseq_vx_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmseq_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmseq_vx_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmseq_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmseq_vx_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmseq_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmseq_vx_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmseq_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmseq_vx_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmseq_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmseq_vx_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmseq_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmseq_vx_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmseq_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmseq_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmseq_vx_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmseq_vx_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmsne_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmsne_vv_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmsne_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmsne_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmsne_vv_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmsne_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmsne_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsne_vx_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsne_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsne_vx_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsne_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsne_vx_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsne_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsne_vx_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsne_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsne_vx_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsne_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsne_vx_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsne_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsne_vx_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsne_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsne_vx_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsne_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsne_vx_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsne_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsne_vx_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsne_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsne_vx_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsne_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsne_vx_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsne_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsne_vx_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsne_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsne_vx_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsne_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsne_vx_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsne_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsne_vx_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmsne_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmsne_vv_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmsne_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmsne_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmsne_vv_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmsne_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmsne_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmsne_vv_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmsne_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmsne_vv_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmsne_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmsne_vv_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmsne_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmsne_vv_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vv_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsne_vx_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsne_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsne_vx_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsne_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsne_vx_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsne_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsne_vx_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsne_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsne_vx_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsne_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsne_vx_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsne_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsne_vx_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsne_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsne_vx_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsne_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsne_vx_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsne_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsne_vx_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsne_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsne_vx_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsne_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsne_vx_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsne_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsne_vx_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsne_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsne_vx_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsne_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsne_vx_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsne_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsne_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsne_vx_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsne_vx_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmsltu_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmsltu_vv_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vv_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsltu_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsltu_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsltu_vx_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsltu_vx_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmslt_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmslt_vv_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmslt_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmslt_vv_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmslt_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmslt_vv_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmslt_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmslt_vv_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmslt_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmslt_vv_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmslt_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmslt_vv_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmslt_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmslt_vv_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmslt_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmslt_vv_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmslt_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmslt_vv_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmslt_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmslt_vv_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmslt_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmslt_vv_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmslt_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmslt_vv_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vv_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u16m4((uint16_t *)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmslt_vx_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmslt_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmslt_vx_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmslt_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmslt_vx_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmslt_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmslt_vx_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmslt_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmslt_vx_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmslt_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmslt_vx_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmslt_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmslt_vx_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmslt_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmslt_vx_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmslt_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmslt_vx_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmslt_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmslt_vx_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmslt_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmslt_vx_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmslt_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmslt_vx_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmslt_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmslt_vx_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmslt_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmslt_vx_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmslt_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmslt_vx_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmslt_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmslt_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmslt_vx_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmslt_vx_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmsleu_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmsleu_vv_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vv_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsleu_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsleu_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsleu_vx_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsleu_vx_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmsle_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmsle_vv_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmsle_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmsle_vv_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmsle_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmsle_vv_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmsle_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmsle_vv_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmsle_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmsle_vv_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmsle_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmsle_vv_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmsle_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmsle_vv_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmsle_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmsle_vv_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmsle_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmsle_vv_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmsle_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmsle_vv_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmsle_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmsle_vv_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmsle_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmsle_vv_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vv_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsle_vx_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsle_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsle_vx_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsle_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsle_vx_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsle_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsle_vx_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsle_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsle_vx_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsle_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsle_vx_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsle_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsle_vx_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsle_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsle_vx_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsle_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsle_vx_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsle_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsle_vx_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsle_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsle_vx_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsle_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsle_vx_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsle_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsle_vx_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsle_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsle_vx_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsle_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsle_vx_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsle_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsle_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsle_vx_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsle_vx_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmsgtu_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmsgtu_vv_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vv_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgtu_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsgtu_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgtu_vx_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgtu_vx_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmsgt_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmsgt_vv_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vv_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsgt_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsgt_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsgt_vx_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgt_vx_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8m1_b8((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8m2_b4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8m4_b2((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8m8_b1((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8mf2_b16((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8mf4_b32((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u8mf8_b64((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u16m1_b16((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u16m2_b8((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u16m4_b4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u16m8_b2((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u16mf2_b32((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u16mf4_b64((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u32m1_b32((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u32m2_b16((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u32m4_b8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u32m8_b4((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u32mf2_b64((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u64m1_b64((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u64m2_b32((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u64m4_b16((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmsgeu_vv_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vv_u64m8_b8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmsgeu_vv_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vv_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8m1_b8((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8m2_b4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8m4_b2((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8m8_b1((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8mf2_b16((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8mf4_b32((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u8mf8_b64((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u16m1_b16((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u16m2_b8((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u16m4_b4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u16m8_b2((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u16mf2_b32((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u16mf4_b64((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u32m1_b32((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u32m2_b16((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u32m4_b8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u32m8_b4((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u32mf2_b64((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u64m1_b64((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u64m2_b32((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u64m4_b16((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsgeu_vx_u64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsgeu_vx_u64m8_b8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmsgeu_vx_u64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsgeu_vx_u64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8m1_b8((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8m2_b4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8m4_b2((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8m8_b1((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8mf2_b16((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8mf4_b32((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmsge_vv_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i8mf8_b64((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmsge_vv_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmsge_vv_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i16m1_b16((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmsge_vv_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmsge_vv_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i16m2_b8((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i16m4_b4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmsge_vv_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmsge_vv_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i16m8_b2((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmsge_vv_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmsge_vv_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i16mf2_b32((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i16mf4_b64((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmsge_vv_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmsge_vv_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i32m1_b32((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmsge_vv_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmsge_vv_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i32m2_b16((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i32m4_b8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmsge_vv_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmsge_vv_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i32m8_b4((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmsge_vv_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmsge_vv_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i32mf2_b64((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i64m1_b64((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmsge_vv_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmsge_vv_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i64m2_b32((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmsge_vv_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmsge_vv_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i64m4_b16((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmsge_vv_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmsge_vv_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vv_i64m8_b8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmsge_vv_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vv_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8m1_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8m1_b8((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8m1_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8m1_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8m2_b4(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8m2_b4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8m2_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8m2_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8m4_b2(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8m4_b2((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8m4_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8m4_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8m8_b1(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8m8_b1((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8m8_b1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8m8_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8mf2_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8mf2_b16((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8mf2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8mf2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8mf4_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8mf4_b32((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8mf4_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8mf4_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i8mf8_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i8mf8_b64((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmsge_vx_i8mf8_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i8mf8_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmsge_vx_i16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i16m1_b16((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsge_vx_i16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsge_vx_i16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i16m2_b8((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsge_vx_i16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsge_vx_i16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i16m4_b4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsge_vx_i16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsge_vx_i16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i16m8_b2((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsge_vx_i16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsge_vx_i16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i16mf2_b32((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsge_vx_i16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsge_vx_i16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i16mf4_b64((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmsge_vx_i16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmsge_vx_i32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i32m1_b32((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsge_vx_i32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsge_vx_i32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i32m2_b16((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsge_vx_i32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsge_vx_i32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i32m4_b8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsge_vx_i32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsge_vx_i32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i32m8_b4((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsge_vx_i32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsge_vx_i32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i32mf2_b64((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmsge_vx_i32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmsge_vx_i64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i64m1_b64((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsge_vx_i64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsge_vx_i64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i64m2_b32((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsge_vx_i64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsge_vx_i64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i64m4_b16((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsge_vx_i64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmsge_vx_i64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmsge_vx_i64m8_b8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmsge_vx_i64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmsge_vx_i64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vminu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vminu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vminu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vminu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vminu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vminu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vminu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vminu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vminu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vminu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vminu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vminu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vminu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vminu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vminu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vminu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vminu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vminu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vminu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vminu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vminu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vminu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vminu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vminu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vminu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vminu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vminu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vminu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vminu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vminu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vminu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vminu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vminu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vminu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vminu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vminu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vminu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vminu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vminu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vminu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vminu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vminu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vminu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vminu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vminu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vminu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vminu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vminu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vminu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vminu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vminu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vminu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vminu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vminu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vminu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vminu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vminu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vminu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vminu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vminu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vminu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vminu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vminu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vminu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vminu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vminu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vminu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vminu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vminu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vminu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vminu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vminu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vminu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vminu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vminu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vminu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vminu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vminu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmin_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmin_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmin_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmin_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmin_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmin_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmin_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmin_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmin_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmin_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmin_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmin_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmin_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmin_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmin_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmin_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmin_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmin_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmin_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmin_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmin_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmin_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmin_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmin_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmin_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmin_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmin_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmin_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmin_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmin_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmin_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmin_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmin_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmin_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmin_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmin_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmin_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmin_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmin_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmin_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmin_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmin_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmin_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vmin_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmin_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmin_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmin_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmin_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmin_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmin_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmin_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmin_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmin_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmin_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmin_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmin_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmin_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmin_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmin_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmin_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmin_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmin_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmin_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmin_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmin_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmin_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmin_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmin_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmin_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmin_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmin_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmin_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmin_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmin_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmin_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vmin_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmin_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmin_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmaxu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmaxu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmaxu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vmaxu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmaxu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmaxu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmax_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmax_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmax_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmax_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmax_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmax_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmax_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmax_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmax_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmax_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmax_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmax_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmax_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmax_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmax_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmax_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmax_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmax_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmax_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmax_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmax_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmax_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmax_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmax_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmax_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmax_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmax_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmax_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmax_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmax_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmax_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmax_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmax_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmax_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmax_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmax_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmax_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmax_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmax_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmax_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmax_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmax_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmax_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vmax_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmax_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmax_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmax_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmax_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmax_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmax_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmax_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmax_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmax_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmax_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmax_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmax_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmax_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmax_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmax_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmax_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmax_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmax_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmax_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmax_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmax_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmax_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmax_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmax_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmax_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmax_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmax_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmax_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmax_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmax_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmax_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmax_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vmax_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmax_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmax_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmul_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmul_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmul_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmul_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmul_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmul_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmul_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmul_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmul_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmul_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmul_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmul_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmul_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmul_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmul_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmul_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmul_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmul_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmul_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmul_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmul_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmul_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmul_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmul_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmul_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmul_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmul_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmul_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmul_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmul_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmul_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmul_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmul_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmul_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmul_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmul_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmul_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmul_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmul_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmul_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmul_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmul_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmul_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmul_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmul_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmul_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmul_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmul_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmul_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmul_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmul_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmul_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmul_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmul_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmul_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmul_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmul_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmul_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmul_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmul_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmul_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmul_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmul_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmul_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmul_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmul_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmul_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmul_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmul_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmul_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmul_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmul_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmul_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmul_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmul_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmul_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmul_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmul_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmul_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmul_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmul_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmul_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmul_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmul_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmul_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmul_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmul_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmul_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmul_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmul_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmul_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmul_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmul_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmul_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmul_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmul_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmul_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmul_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmul_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmul_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmul_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmul_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmul_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmul_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmul_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmul_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmul_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmul_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmul_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmul_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmul_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmul_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmul_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmul_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmul_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmul_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmul_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmul_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vmul_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmul_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmul_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmul_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmul_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmul_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmul_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmul_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmul_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmul_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmul_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmul_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmul_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmul_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmul_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmul_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmul_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmul_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmul_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmul_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmul_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmul_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmul_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmul_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmul_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmul_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmul_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmul_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmul_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmul_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmul_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmul_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vmul_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmul_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmul_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmulh_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vmulh_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmulh_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vmulh_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmulh_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vmulh_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmulh_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vmulh_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmulh_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vmulh_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmulh_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vmulh_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmulh_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vmulh_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmulh_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vmulh_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmulh_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vmulh_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmulh_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vmulh_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmulh_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vmulh_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmulh_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vmulh_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmulh_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vmulh_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmulh_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmulh_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmulh_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmulh_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmulh_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmulh_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmulh_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmulh_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmulh_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmulh_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmulh_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vmulh_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmulh_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmulh_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmulh_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmulh_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmulh_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmulh_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmulh_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmulh_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmulh_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vmulh_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmulh_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmulh_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmulh_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmulh_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmulh_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmulh_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vmulh_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmulh_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulh_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmulhu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmulhu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vmulhu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8m1((vint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8m2((vint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8m4((vint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8m8((vint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8mf2((vint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8mf4((vint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i8mf8((vint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i16m1((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i16m2((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i16m4((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i16m8((vint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i16mf2((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i16mf4((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i32m1((vint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i32m2((vint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i32m4((vint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i32m8((vint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i32mf2((vint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i64m1((vint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i64m2((vint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i64m4((vint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmulhsu_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vv_i64m8((vint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmulhsu_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8m1((vint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8m2((vint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8m4((vint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8m8((vint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8mf2((vint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8mf4((vint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i8mf8((vint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i16m1((vint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i16m2((vint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i16m4((vint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i16m8((vint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i16mf2((vint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i16mf4((vint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i32m1((vint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i32m2((vint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i32m4((vint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i32m8((vint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i32mf2((vint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i64m1((vint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i64m2((vint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i64m4((vint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vmulhsu_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vmulhsu_vx_i64m8((vint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmulhsu_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmulhsu_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vdivu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vdivu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vdivu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vdivu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vdivu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vdivu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vdivu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vdivu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vdivu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vdivu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vdivu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vdivu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vdivu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vdivu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vdivu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vdivu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vdivu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vdivu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vdivu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vdivu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vdivu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vdivu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vdivu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vdivu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vdivu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vdivu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vdivu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vdivu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vdivu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vdivu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vdivu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vdivu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vdivu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vdivu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vdivu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vdivu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vdivu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vdivu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vdivu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vdivu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vdivu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vdivu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vdivu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vdivu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vdivu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vdivu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vdivu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vdivu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vdivu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vdivu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vdivu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vdivu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vdivu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vdivu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vdivu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vdivu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdivu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vdiv_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vdiv_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vdiv_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vdiv_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vdiv_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vdiv_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vdiv_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vdiv_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vdiv_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vdiv_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vdiv_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vdiv_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vdiv_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vdiv_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vdiv_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vdiv_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vdiv_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vdiv_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vdiv_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vdiv_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vdiv_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vdiv_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vdiv_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vdiv_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vdiv_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vdiv_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vdiv_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vdiv_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vdiv_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vdiv_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vdiv_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vdiv_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vdiv_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vdiv_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vdiv_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vdiv_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vdiv_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vdiv_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vdiv_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vdiv_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vdiv_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vdiv_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vdiv_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vdiv_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vdiv_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vdiv_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vdiv_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vdiv_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vdiv_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vdiv_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vdiv_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vdiv_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vdiv_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vdiv_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vdiv_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vdiv_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vdiv_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vremu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vremu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vremu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vremu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vremu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vremu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vremu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vremu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vremu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vremu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vremu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vremu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vremu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vremu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vremu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vremu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vremu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vremu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vremu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vremu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vremu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vremu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vremu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vremu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vremu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vremu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vremu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vremu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vremu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vremu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vremu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vremu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vremu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vremu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vremu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vremu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vremu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vremu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vremu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vremu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vremu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vremu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vremu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vremu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vremu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vremu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vremu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vremu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vremu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vremu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vremu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vremu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vremu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vremu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vremu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vremu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vremu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vremu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vremu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vremu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vremu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vremu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vremu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vremu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vremu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vremu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vremu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vremu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vremu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vremu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vremu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vremu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vremu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vremu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vremu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vremu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vremu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vremu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vrem_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vrem_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vrem_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vrem_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vrem_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vrem_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vrem_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vrem_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vrem_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vrem_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vrem_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vrem_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vrem_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vrem_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vrem_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vrem_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vrem_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vrem_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vrem_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vrem_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vrem_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vrem_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vrem_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vrem_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vrem_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vrem_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vrem_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vrem_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vrem_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vrem_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vrem_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vrem_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vrem_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vrem_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vrem_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vrem_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vrem_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vrem_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vrem_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vrem_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vrem_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vrem_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vrem_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vrem_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vrem_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vrem_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vrem_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrem_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrem_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrem_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrem_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrem_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrem_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrem_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrem_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrem_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrem_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vrem_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vrem_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrem_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrem_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrem_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrem_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrem_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrem_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrem_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrem_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vrem_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vrem_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrem_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrem_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrem_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrem_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrem_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrem_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vrem_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vrem_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrem_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vwmul_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i16mf4((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vwmul_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwmul_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i16mf2((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vwmul_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwmul_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i16m1((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vwmul_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwmul_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i16m2((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vwmul_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwmul_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i16m4((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vwmul_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwmul_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i16m8((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vwmul_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwmul_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i32mf2((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vwmul_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwmul_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i32m1((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vwmul_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwmul_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i32m2((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vwmul_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwmul_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i32m4((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vwmul_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwmul_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i32m8((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vwmul_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwmul_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i64m1((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vwmul_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwmul_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i64m2((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vwmul_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwmul_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i64m4((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vwmul_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwmul_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vwmul_vv_i64m8((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vwmul_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmul_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i16mf4((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwmul_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwmul_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i16mf2((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwmul_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwmul_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i16m1((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwmul_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwmul_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i16m2((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwmul_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwmul_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i16m4((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwmul_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwmul_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i16m8((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vwmul_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vwmul_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i32mf2((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwmul_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwmul_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i32m1((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwmul_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwmul_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i32m2((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwmul_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwmul_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i32m4((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwmul_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwmul_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i32m8((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vwmul_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vwmul_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i64m1((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwmul_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwmul_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i64m2((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwmul_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwmul_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i64m4((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwmul_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwmul_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vwmul_vx_i64m8((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vwmul_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmul_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u16mf4((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u16mf2((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u16m1((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u16m2((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u16m4((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u16m8((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u32mf2((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u32m1((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u32m2((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u32m4((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u32m8((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u64m1((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u64m2((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u64m4((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwmulu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vwmulu_vv_u64m8((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vwmulu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u16mf4((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u16mf2((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u16m1((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u16m2((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u16m4((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u16m8((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u32mf2((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u32m1((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u32m2((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u32m4((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u32m8((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u64m1((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u64m2((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u64m4((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vwmulu_vx_u64m8((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i16mf4((vint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i16mf2((vint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i16m1((vint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i16m2((vint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i16m4((vint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i16m8((vint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i32mf2((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i32m1((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i32m2((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i32m4((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i32m8((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i64m1((vint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i64m2((vint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i64m4((vint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwmulsu_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vv_i64m8((vint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vwmulsu_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i16mf4((vint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i16mf2((vint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i16m1((vint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i16m2((vint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i16m4((vint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i16m8((vint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i32mf2((vint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i32m1((vint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i32m2((vint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i32m4((vint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i32m8((vint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i64m1((vint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i64m2((vint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i64m4((vint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vwmulsu_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vwmulsu_vx_i64m8((vint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vwmulsu_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmulsu_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmacc_vv_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vmacc_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmacc_vv_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vmacc_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmacc_vv_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (int8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (int8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (int8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (int8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (int8_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (int8_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmacc_vx_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vmacc_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (int8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmacc_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (int16_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (int16_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (int16_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (int16_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (int16_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vmacc_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (int16_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmacc_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (int32_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (int32_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (int32_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (int32_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (int32_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (int64_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (int64_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (int64_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (int64_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmacc_vv_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vmacc_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmacc_vv_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vmacc_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmacc_vv_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmacc_vv_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vmacc_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmacc_vv_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vmacc_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmacc_vv_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vmacc_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmacc_vv_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vmacc_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (uint8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (uint8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (uint8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (uint8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (uint8_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (uint8_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmacc_vx_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vmacc_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (uint8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmacc_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (uint16_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (uint16_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (uint16_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (uint16_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (uint16_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vmacc_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (uint16_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmacc_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (uint32_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (uint32_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (uint32_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (uint32_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmacc_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (uint32_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vmacc_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (uint64_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmacc_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vmacc_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (uint64_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmacc_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vmacc_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (uint64_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmacc_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmacc_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vmacc_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmacc_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (uint64_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (int8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (int8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (int8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (int8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (int8_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (int8_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (int8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (int16_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (int16_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (int16_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (int16_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (int16_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (int16_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (int32_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (int32_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (int32_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (int32_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (int32_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (int64_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (int64_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (int64_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (int64_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vnmsac_vv_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vnmsac_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (uint8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (uint8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (uint8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (uint8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (uint8_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (uint8_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (uint8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (uint16_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (uint16_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (uint16_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (uint16_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (uint16_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (uint16_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (uint32_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (uint32_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (uint32_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (uint32_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (uint32_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (uint64_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (uint64_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (uint64_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vnmsac_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsac_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vnmsac_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsac_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (uint64_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmadd_vv_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vmadd_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmadd_vv_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vmadd_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmadd_vv_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (int8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (int8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (int8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (int8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (int8_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (int8_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vmadd_vx_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vmadd_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (int8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vmadd_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (int16_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (int16_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (int16_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (int16_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (int16_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vmadd_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (int16_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vmadd_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (int32_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (int32_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (int32_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (int32_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (int32_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (int64_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (int64_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (int64_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (int64_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmadd_vv_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vmadd_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmadd_vv_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vmadd_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmadd_vv_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmadd_vv_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vmadd_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmadd_vv_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vmadd_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmadd_vv_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vmadd_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmadd_vv_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vmadd_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (uint8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (uint8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (uint8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (uint8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (uint8_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (uint8_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vmadd_vx_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vmadd_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (uint8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmadd_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (uint16_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (uint16_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (uint16_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (uint16_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (uint16_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vmadd_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (uint16_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vmadd_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (uint32_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (uint32_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (uint32_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (uint32_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vmadd_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (uint32_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vmadd_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (uint64_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmadd_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vmadd_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (uint64_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vmadd_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vmadd_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (uint64_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vmadd_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmadd_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vmadd_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmadd_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (uint64_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (int8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (int8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (int8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (int8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (int8_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (int8_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (int8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (int16_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (int16_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (int16_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (int16_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (int16_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (int16_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (int32_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (int32_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (int32_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (int32_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (int32_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (int64_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (int64_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (int64_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (int64_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vnmsub_vv_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vnmsub_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (uint8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (uint8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (uint8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (uint8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (uint8_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (uint8_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (uint8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (uint16_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (uint16_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (uint16_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (uint16_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (uint16_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (uint16_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (uint32_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (uint32_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (uint32_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (uint32_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (uint32_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (uint64_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (uint64_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (uint64_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vnmsub_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vnmsub_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vnmsub_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnmsub_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (uint64_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u16mf4((vuint16mf4_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u16mf2((vuint16mf2_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u16m1((vuint16m1_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u16m2((vuint16m2_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u16m4((vuint16m4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u16m8((vuint16m8_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u32mf2((vuint32mf2_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u32m1((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u32m2((vuint32m2_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u32m4((vuint32m4_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u32m8((vuint32m8_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u64m1((vuint64m1_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u64m2((vuint64m2_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u64m4((vuint64m4_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwmaccu_vv_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vv_u64m8((vuint64m8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vwmaccu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u16mf4((vuint16mf4_t)(op0), (uint8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (uint8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u16mf2((vuint16mf2_t)(op0), (uint8_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (uint8_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u16m1((vuint16m1_t)(op0), (uint8_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (uint8_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u16m2((vuint16m2_t)(op0), (uint8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (uint8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u16m4((vuint16m4_t)(op0), (uint8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (uint8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u16m8((vuint16m8_t)(op0), (uint8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (uint8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u32mf2((vuint32mf2_t)(op0), (uint16_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (uint16_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u32m1((vuint32m1_t)(op0), (uint16_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (uint16_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u32m2((vuint32m2_t)(op0), (uint16_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (uint16_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u32m4((vuint32m4_t)(op0), (uint16_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (uint16_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u32m8((vuint32m8_t)(op0), (uint16_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (uint16_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u64m1((vuint64m1_t)(op0), (uint32_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (uint32_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u64m2((vuint64m2_t)(op0), (uint32_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (uint32_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u64m4((vuint64m4_t)(op0), (uint32_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (uint32_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwmaccu_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccu_vx_u64m8((vuint64m8_t)(op0), (uint32_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vwmaccu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (uint32_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i16mf4((vint16mf4_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i16mf2((vint16mf2_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i16m1((vint16m1_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i16m2((vint16m2_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i16m4((vint16m4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i16m8((vint16m8_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i32mf2((vint32mf2_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i32m1((vint32m1_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i32m2((vint32m2_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i32m4((vint32m4_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i32m8((vint32m8_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i64m1((vint64m1_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i64m2((vint64m2_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i64m4((vint64m4_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwmacc_vv_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vv_i64m8((vint64m8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vwmacc_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i16mf4((vint16mf4_t)(op0), (int8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (int8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i16mf2((vint16mf2_t)(op0), (int8_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (int8_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i16m1((vint16m1_t)(op0), (int8_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (int8_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i16m2((vint16m2_t)(op0), (int8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (int8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i16m4((vint16m4_t)(op0), (int8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (int8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i16m8((vint16m8_t)(op0), (int8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (int8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i32mf2((vint32mf2_t)(op0), (int16_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (int16_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i32m1((vint32m1_t)(op0), (int16_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (int16_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i32m2((vint32m2_t)(op0), (int16_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (int16_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i32m4((vint32m4_t)(op0), (int16_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (int16_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i32m8((vint32m8_t)(op0), (int16_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (int16_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i64m1((vint64m1_t)(op0), (int32_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (int32_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i64m2((vint64m2_t)(op0), (int32_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (int32_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i64m4((vint64m4_t)(op0), (int32_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (int32_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwmacc_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmacc_vx_i64m8((vint64m8_t)(op0), (int32_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vwmacc_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmacc_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (int32_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i16mf4((vint16mf4_t)(op0), (vint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i16mf2((vint16mf2_t)(op0), (vint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i16m1((vint16m1_t)(op0), (vint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i16m2((vint16m2_t)(op0), (vint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i16m4((vint16m4_t)(op0), (vint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i16m8((vint16m8_t)(op0), (vint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i32mf2((vint32mf2_t)(op0), (vint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i32m1((vint32m1_t)(op0), (vint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i32m2((vint32m2_t)(op0), (vint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i32m4((vint32m4_t)(op0), (vint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i32m8((vint32m8_t)(op0), (vint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i64m1((vint64m1_t)(op0), (vint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i64m2((vint64m2_t)(op0), (vint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i64m4((vint64m4_t)(op0), (vint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vv_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vv_i64m8((vint64m8_t)(op0), (vint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i16mf4((vint16mf4_t)(op0), (int8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (int8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i16mf2((vint16mf2_t)(op0), (int8_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (int8_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i16m1((vint16m1_t)(op0), (int8_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (int8_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i16m2((vint16m2_t)(op0), (int8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (int8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i16m4((vint16m4_t)(op0), (int8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (int8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i16m8((vint16m8_t)(op0), (int8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (int8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i32mf2((vint32mf2_t)(op0), (int16_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (int16_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i32m1((vint32m1_t)(op0), (int16_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (int16_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i32m2((vint32m2_t)(op0), (int16_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (int16_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i32m4((vint32m4_t)(op0), (int16_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (int16_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i32m8((vint32m8_t)(op0), (int16_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (int16_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i64m1((vint64m1_t)(op0), (int32_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (int32_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i64m2((vint64m2_t)(op0), (int32_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (int32_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i64m4((vint64m4_t)(op0), (int32_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (int32_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vwmaccsu_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccsu_vx_i64m8((vint64m8_t)(op0), (int32_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vwmaccsu_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccsu_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (int32_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i16mf4((vint16mf4_t)(op0), (uint8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (uint8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i16mf2((vint16mf2_t)(op0), (uint8_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (uint8_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i16m1((vint16m1_t)(op0), (uint8_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (uint8_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i16m2((vint16m2_t)(op0), (uint8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (uint8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i16m4((vint16m4_t)(op0), (uint8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (uint8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i16m8((vint16m8_t)(op0), (uint8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (uint8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i32mf2((vint32mf2_t)(op0), (uint16_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (uint16_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i32m1((vint32m1_t)(op0), (uint16_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (uint16_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i32m2((vint32m2_t)(op0), (uint16_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (uint16_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i32m4((vint32m4_t)(op0), (uint16_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (uint16_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i32m8((vint32m8_t)(op0), (uint16_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (uint16_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i64m1((vint64m1_t)(op0), (uint32_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (uint32_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i64m2((vint64m2_t)(op0), (uint32_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (uint32_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i64m4((vint64m4_t)(op0), (uint32_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (uint32_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vwmaccus_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vwmaccus_vx_i64m8((vint64m8_t)(op0), (uint32_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vwmaccus_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwmaccus_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (uint32_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vmerge_vvm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8m1((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8m2((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8m4((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8m8((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8mf2((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8mf4((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i8mf8((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i16m1((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i16m2((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i16m4((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i16m8((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i16mf2((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i16mf4((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i32m1((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i32m2((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i32m4((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i32m8((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i32mf2((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i64m1((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i64m2((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i64m4((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_i64m8((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8m1((vbool8_t)(op0), (vint8m1_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8m2((vbool4_t)(op0), (vint8m2_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8m4((vbool2_t)(op0), (vint8m4_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8m8((vbool1_t)(op0), (vint8m8_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8mf2((vbool16_t)(op0), (vint8mf2_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8mf4((vbool32_t)(op0), (vint8mf4_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i8mf8((vbool64_t)(op0), (vint8mf8_t)(op1), (int8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i16m1((vbool16_t)(op0), (vint16m1_t)(op1), (int16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i16m2((vbool8_t)(op0), (vint16m2_t)(op1), (int16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i16m4((vbool4_t)(op0), (vint16m4_t)(op1), (int16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i16m8((vbool2_t)(op0), (vint16m8_t)(op1), (int16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i16mf2((vbool32_t)(op0), (vint16mf2_t)(op1), (int16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i16mf4((vbool64_t)(op0), (vint16mf4_t)(op1), (int16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i32m1((vbool32_t)(op0), (vint32m1_t)(op1), (int32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i32m2((vbool16_t)(op0), (vint32m2_t)(op1), (int32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i32m4((vbool8_t)(op0), (vint32m4_t)(op1), (int32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i32m8((vbool4_t)(op0), (vint32m8_t)(op1), (int32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i32mf2((vbool64_t)(op0), (vint32mf2_t)(op1), (int32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i64m1((vbool64_t)(op0), (vint64m1_t)(op1), (int64_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i64m2((vbool32_t)(op0), (vint64m2_t)(op1), (int64_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i64m4((vbool16_t)(op0), (vint64m4_t)(op1), (int64_t)(op2), (size_t)(op3)) +#define vmerge_vxm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_i64m8((vbool8_t)(op0), (vint64m8_t)(op1), (int64_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vmerge_vvm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8m1((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8m2((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8m4((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8m8((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8mf2((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8mf4((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u8mf8((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u16m1((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u16m2((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u16m4((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u16m8((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u16mf2((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u16mf4((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u32m1((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u32m2((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u32m4((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u32m8((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u32mf2((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u64m1((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u64m2((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u64m4((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_u64m8((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8m1((vbool8_t)(op0), (vuint8m1_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8m2((vbool4_t)(op0), (vuint8m2_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8m4((vbool2_t)(op0), (vuint8m4_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8m8((vbool1_t)(op0), (vuint8m8_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8mf2((vbool16_t)(op0), (vuint8mf2_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8mf4((vbool32_t)(op0), (vuint8mf4_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u8mf8((vbool64_t)(op0), (vuint8mf8_t)(op1), (uint8_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u16m1((vbool16_t)(op0), (vuint16m1_t)(op1), (uint16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u16m2((vbool8_t)(op0), (vuint16m2_t)(op1), (uint16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u16m4((vbool4_t)(op0), (vuint16m4_t)(op1), (uint16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u16m8((vbool2_t)(op0), (vuint16m8_t)(op1), (uint16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u16mf2((vbool32_t)(op0), (vuint16mf2_t)(op1), (uint16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u16mf4((vbool64_t)(op0), (vuint16mf4_t)(op1), (uint16_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u32m1((vbool32_t)(op0), (vuint32m1_t)(op1), (uint32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u32m2((vbool16_t)(op0), (vuint32m2_t)(op1), (uint32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u32m4((vbool8_t)(op0), (vuint32m4_t)(op1), (uint32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u32m8((vbool4_t)(op0), (vuint32m8_t)(op1), (uint32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u32mf2((vbool64_t)(op0), (vuint32mf2_t)(op1), (uint32_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u64m1((vbool64_t)(op0), (vuint64m1_t)(op1), (uint64_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u64m2((vbool32_t)(op0), (vuint64m2_t)(op1), (uint64_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u64m4((vbool16_t)(op0), (vuint64m4_t)(op1), (uint64_t)(op2), (size_t)(op3)) +#define vmerge_vxm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vxm_u64m8((vbool8_t)(op0), (vuint64m8_t)(op1), (uint64_t)(op2), (size_t)(op3)) +#define vmv_v_v_u8m1(op0, op1) \ +__builtin_rvv_vmv_v_v_u8m1((vuint8m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_u8m2(op0, op1) \ +__builtin_rvv_vmv_v_v_u8m2((vuint8m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u8m4(op0, op1) \ +__builtin_rvv_vmv_v_v_u8m4((vuint8m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_u8m8(op0, op1) \ +__builtin_rvv_vmv_v_v_u8m8((vuint8m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_u8mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_u8mf2((vuint8mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u8mf4(op0, op1) \ +__builtin_rvv_vmv_v_v_u8mf4((vuint8mf4_t)(op0), (size_t)(op1)) +#define vmv_v_v_u8mf8(op0, op1) \ +__builtin_rvv_vmv_v_v_u8mf8((vuint8mf8_t)(op0), (size_t)(op1)) +#define vmv_v_v_u16m1(op0, op1) \ +__builtin_rvv_vmv_v_v_u16m1((vuint16m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_u16m2(op0, op1) \ +__builtin_rvv_vmv_v_v_u16m2((vuint16m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u16m4(op0, op1) \ +__builtin_rvv_vmv_v_v_u16m4((vuint16m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_u16m8(op0, op1) \ +__builtin_rvv_vmv_v_v_u16m8((vuint16m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_u16mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_u16mf2((vuint16mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u16mf4(op0, op1) \ +__builtin_rvv_vmv_v_v_u16mf4((vuint16mf4_t)(op0), (size_t)(op1)) +#define vmv_v_v_u32m1(op0, op1) \ +__builtin_rvv_vmv_v_v_u32m1((vuint32m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_u32m2(op0, op1) \ +__builtin_rvv_vmv_v_v_u32m2((vuint32m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u32m4(op0, op1) \ +__builtin_rvv_vmv_v_v_u32m4((vuint32m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_u32m8(op0, op1) \ +__builtin_rvv_vmv_v_v_u32m8((vuint32m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_u32mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_u32mf2((vuint32mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u64m1(op0, op1) \ +__builtin_rvv_vmv_v_v_u64m1((vuint64m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_u64m2(op0, op1) \ +__builtin_rvv_vmv_v_v_u64m2((vuint64m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_u64m4(op0, op1) \ +__builtin_rvv_vmv_v_v_u64m4((vuint64m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_u64m8(op0, op1) \ +__builtin_rvv_vmv_v_v_u64m8((vuint64m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8m1(op0, op1) \ +__builtin_rvv_vmv_v_v_i8m1((vint8m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8m2(op0, op1) \ +__builtin_rvv_vmv_v_v_i8m2((vint8m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8m4(op0, op1) \ +__builtin_rvv_vmv_v_v_i8m4((vint8m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8m8(op0, op1) \ +__builtin_rvv_vmv_v_v_i8m8((vint8m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_i8mf2((vint8mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8mf4(op0, op1) \ +__builtin_rvv_vmv_v_v_i8mf4((vint8mf4_t)(op0), (size_t)(op1)) +#define vmv_v_v_i8mf8(op0, op1) \ +__builtin_rvv_vmv_v_v_i8mf8((vint8mf8_t)(op0), (size_t)(op1)) +#define vmv_v_v_i16m1(op0, op1) \ +__builtin_rvv_vmv_v_v_i16m1((vint16m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_i16m2(op0, op1) \ +__builtin_rvv_vmv_v_v_i16m2((vint16m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i16m4(op0, op1) \ +__builtin_rvv_vmv_v_v_i16m4((vint16m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_i16m8(op0, op1) \ +__builtin_rvv_vmv_v_v_i16m8((vint16m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_i16mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_i16mf2((vint16mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i16mf4(op0, op1) \ +__builtin_rvv_vmv_v_v_i16mf4((vint16mf4_t)(op0), (size_t)(op1)) +#define vmv_v_v_i32m1(op0, op1) \ +__builtin_rvv_vmv_v_v_i32m1((vint32m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_i32m2(op0, op1) \ +__builtin_rvv_vmv_v_v_i32m2((vint32m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i32m4(op0, op1) \ +__builtin_rvv_vmv_v_v_i32m4((vint32m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_i32m8(op0, op1) \ +__builtin_rvv_vmv_v_v_i32m8((vint32m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_i32mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_i32mf2((vint32mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i64m1(op0, op1) \ +__builtin_rvv_vmv_v_v_i64m1((vint64m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_i64m2(op0, op1) \ +__builtin_rvv_vmv_v_v_i64m2((vint64m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_i64m4(op0, op1) \ +__builtin_rvv_vmv_v_v_i64m4((vint64m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_i64m8(op0, op1) \ +__builtin_rvv_vmv_v_v_i64m8((vint64m8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8m1(op0, op1) \ +__builtin_rvv_vmv_v_x_i8m1((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8m2(op0, op1) \ +__builtin_rvv_vmv_v_x_i8m2((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8m4(op0, op1) \ +__builtin_rvv_vmv_v_x_i8m4((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8m8(op0, op1) \ +__builtin_rvv_vmv_v_x_i8m8((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8mf2(op0, op1) \ +__builtin_rvv_vmv_v_x_i8mf2((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8mf4(op0, op1) \ +__builtin_rvv_vmv_v_x_i8mf4((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i8mf8(op0, op1) \ +__builtin_rvv_vmv_v_x_i8mf8((int8_t)(op0), (size_t)(op1)) +#define vmv_v_x_i16m1(op0, op1) \ +__builtin_rvv_vmv_v_x_i16m1((int16_t)(op0), (size_t)(op1)) +#define vmv_v_x_i16m2(op0, op1) \ +__builtin_rvv_vmv_v_x_i16m2((int16_t)(op0), (size_t)(op1)) +#define vmv_v_x_i16m4(op0, op1) \ +__builtin_rvv_vmv_v_x_i16m4((int16_t)(op0), (size_t)(op1)) +#define vmv_v_x_i16m8(op0, op1) \ +__builtin_rvv_vmv_v_x_i16m8((int16_t)(op0), (size_t)(op1)) +#define vmv_v_x_i16mf2(op0, op1) \ +__builtin_rvv_vmv_v_x_i16mf2((int16_t)(op0), (size_t)(op1)) +#define vmv_v_x_i16mf4(op0, op1) \ +__builtin_rvv_vmv_v_x_i16mf4((int16_t)(op0), (size_t)(op1)) +#define vmv_v_x_i32m1(op0, op1) \ +__builtin_rvv_vmv_v_x_i32m1((int32_t)(op0), (size_t)(op1)) +#define vmv_v_x_i32m2(op0, op1) \ +__builtin_rvv_vmv_v_x_i32m2((int32_t)(op0), (size_t)(op1)) +#define vmv_v_x_i32m4(op0, op1) \ +__builtin_rvv_vmv_v_x_i32m4((int32_t)(op0), (size_t)(op1)) +#define vmv_v_x_i32m8(op0, op1) \ +__builtin_rvv_vmv_v_x_i32m8((int32_t)(op0), (size_t)(op1)) +#define vmv_v_x_i32mf2(op0, op1) \ +__builtin_rvv_vmv_v_x_i32mf2((int32_t)(op0), (size_t)(op1)) +#define vmv_v_x_i64m1(op0, op1) \ +__builtin_rvv_vmv_v_x_i64m1((int64_t)(op0), (size_t)(op1)) +#define vmv_v_x_i64m2(op0, op1) \ +__builtin_rvv_vmv_v_x_i64m2((int64_t)(op0), (size_t)(op1)) +#define vmv_v_x_i64m4(op0, op1) \ +__builtin_rvv_vmv_v_x_i64m4((int64_t)(op0), (size_t)(op1)) +#define vmv_v_x_i64m8(op0, op1) \ +__builtin_rvv_vmv_v_x_i64m8((int64_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8m1(op0, op1) \ +__builtin_rvv_vmv_v_x_u8m1((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8m2(op0, op1) \ +__builtin_rvv_vmv_v_x_u8m2((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8m4(op0, op1) \ +__builtin_rvv_vmv_v_x_u8m4((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8m8(op0, op1) \ +__builtin_rvv_vmv_v_x_u8m8((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8mf2(op0, op1) \ +__builtin_rvv_vmv_v_x_u8mf2((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8mf4(op0, op1) \ +__builtin_rvv_vmv_v_x_u8mf4((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u8mf8(op0, op1) \ +__builtin_rvv_vmv_v_x_u8mf8((uint8_t)(op0), (size_t)(op1)) +#define vmv_v_x_u16m1(op0, op1) \ +__builtin_rvv_vmv_v_x_u16m1((uint16_t)(op0), (size_t)(op1)) +#define vmv_v_x_u16m2(op0, op1) \ +__builtin_rvv_vmv_v_x_u16m2((uint16_t)(op0), (size_t)(op1)) +#define vmv_v_x_u16m4(op0, op1) \ +__builtin_rvv_vmv_v_x_u16m4((uint16_t)(op0), (size_t)(op1)) +#define vmv_v_x_u16m8(op0, op1) \ +__builtin_rvv_vmv_v_x_u16m8((uint16_t)(op0), (size_t)(op1)) +#define vmv_v_x_u16mf2(op0, op1) \ +__builtin_rvv_vmv_v_x_u16mf2((uint16_t)(op0), (size_t)(op1)) +#define vmv_v_x_u16mf4(op0, op1) \ +__builtin_rvv_vmv_v_x_u16mf4((uint16_t)(op0), (size_t)(op1)) +#define vmv_v_x_u32m1(op0, op1) \ +__builtin_rvv_vmv_v_x_u32m1((uint32_t)(op0), (size_t)(op1)) +#define vmv_v_x_u32m2(op0, op1) \ +__builtin_rvv_vmv_v_x_u32m2((uint32_t)(op0), (size_t)(op1)) +#define vmv_v_x_u32m4(op0, op1) \ +__builtin_rvv_vmv_v_x_u32m4((uint32_t)(op0), (size_t)(op1)) +#define vmv_v_x_u32m8(op0, op1) \ +__builtin_rvv_vmv_v_x_u32m8((uint32_t)(op0), (size_t)(op1)) +#define vmv_v_x_u32mf2(op0, op1) \ +__builtin_rvv_vmv_v_x_u32mf2((uint32_t)(op0), (size_t)(op1)) +#define vmv_v_x_u64m1(op0, op1) \ +__builtin_rvv_vmv_v_x_u64m1((uint64_t)(op0), (size_t)(op1)) +#define vmv_v_x_u64m2(op0, op1) \ +__builtin_rvv_vmv_v_x_u64m2((uint64_t)(op0), (size_t)(op1)) +#define vmv_v_x_u64m4(op0, op1) \ +__builtin_rvv_vmv_v_x_u64m4((uint64_t)(op0), (size_t)(op1)) +#define vmv_v_x_u64m8(op0, op1) \ +__builtin_rvv_vmv_v_x_u64m8((uint64_t)(op0), (size_t)(op1)) +#define vsaddu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsaddu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vsaddu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsaddu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vsaddu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vsaddu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsaddu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsadd_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vsadd_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsadd_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vsadd_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsadd_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vsadd_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsadd_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vsadd_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsadd_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vsadd_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsadd_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vsadd_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsadd_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vsadd_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsadd_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vsadd_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsadd_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vsadd_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsadd_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vsadd_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsadd_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vsadd_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsadd_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vsadd_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsadd_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsadd_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsadd_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsadd_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsadd_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsadd_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsadd_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsadd_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsadd_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsadd_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsadd_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsadd_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsadd_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsadd_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsadd_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsadd_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsadd_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsadd_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsadd_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsadd_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsadd_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsadd_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsadd_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsadd_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsadd_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsadd_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsadd_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsadd_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsadd_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsadd_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vsadd_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsadd_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsadd_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vssubu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vssubu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vssubu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vssubu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vssubu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vssubu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vssubu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vssubu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vssubu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vssubu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vssubu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vssubu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vssubu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vssubu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vssubu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vssubu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vssubu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vssubu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vssubu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vssubu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vssubu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vssubu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vssubu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vssubu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vssubu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vssubu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vssubu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vssubu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vssubu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vssubu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vssubu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vssubu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vssubu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vssubu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vssubu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vssubu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vssubu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vssubu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vssubu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vssubu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vssubu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vssubu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vssubu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vssubu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vssubu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vssubu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vssubu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vssubu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vssubu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vssubu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vssubu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vssubu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vssubu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vssubu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vssubu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vssubu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssubu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vssub_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vssub_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vssub_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vssub_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vssub_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vssub_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vssub_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vssub_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vssub_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vssub_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vssub_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vssub_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vssub_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vssub_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vssub_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vssub_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vssub_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vssub_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vssub_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vssub_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vssub_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vssub_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vssub_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vssub_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vssub_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vssub_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vssub_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vssub_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vssub_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vssub_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vssub_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vssub_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vssub_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vssub_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vssub_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vssub_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vssub_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vssub_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vssub_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vssub_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vssub_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vssub_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vssub_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vssub_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vssub_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vssub_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vssub_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vssub_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vssub_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vssub_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vssub_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vssub_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vssub_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vssub_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vssub_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vssub_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vssub_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vssub_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vssub_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vssub_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vssub_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vssub_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vssub_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vssub_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vssub_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vssub_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vssub_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vssub_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vssub_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vssub_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vssub_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vssub_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vssub_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vssub_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vssub_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vssub_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vssub_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssub_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vaaddu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vaaddu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vaaddu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vaaddu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vaaddu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaaddu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vaadd_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vaadd_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vaadd_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vaadd_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vaadd_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vaadd_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vaadd_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vaadd_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vaadd_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vaadd_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vaadd_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vaadd_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vaadd_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vaadd_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vaadd_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vaadd_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vaadd_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vaadd_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vaadd_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vaadd_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vaadd_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vaadd_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vaadd_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vaadd_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vaadd_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vaadd_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vaadd_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vaadd_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vaadd_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vaadd_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vaadd_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vaadd_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vaadd_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vaadd_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vaadd_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vaadd_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vaadd_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vaadd_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vaadd_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vaadd_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vaadd_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vaadd_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vaadd_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vaadd_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vaadd_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vaadd_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vaadd_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vaadd_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vaadd_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vaadd_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vaadd_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vaadd_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vaadd_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vaadd_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vaadd_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vaadd_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vaadd_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vasubu_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vasubu_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vasubu_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vasubu_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vasubu_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vasubu_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vasubu_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vasubu_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vasubu_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vasubu_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vasubu_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vasubu_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vasubu_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vasubu_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vasubu_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vasubu_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vasubu_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vasubu_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vasubu_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vasubu_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vasubu_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vasubu_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vasubu_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vasubu_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vasubu_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (uint8_t)(op3), (size_t)(op4)) +#define vasubu_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vasubu_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vasubu_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vasubu_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vasubu_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vasubu_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vasubu_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vasubu_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vasubu_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vasubu_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vasubu_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vasubu_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (uint16_t)(op3), (size_t)(op4)) +#define vasubu_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vasubu_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vasubu_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vasubu_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vasubu_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vasubu_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vasubu_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vasubu_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vasubu_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vasubu_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (uint32_t)(op3), (size_t)(op4)) +#define vasubu_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vasubu_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vasubu_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vasubu_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vasubu_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vasubu_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vasubu_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vasubu_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vasubu_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasubu_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (uint64_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8m4((int8_t *)(op0), (vuint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8m8((int8_t *)(op0), (vuint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8m8_m((vbool1_t)(op0), (int8_t *)(op1), (vuint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vasub_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vasub_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vasub_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vasub_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vasub_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vasub_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vasub_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vasub_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vasub_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vasub_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vasub_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vasub_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vasub_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vasub_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vasub_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vasub_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vasub_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vasub_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vasub_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vasub_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vasub_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vasub_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vasub_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vasub_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vasub_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vasub_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vasub_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vasub_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vasub_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vasub_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vasub_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vasub_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vasub_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vasub_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vasub_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vasub_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vasub_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vasub_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vasub_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vasub_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vasub_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vasub_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vasub_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vasub_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vasub_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vasub_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vasub_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vasub_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vasub_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vasub_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vasub_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vasub_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vasub_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vasub_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vasub_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vasub_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vasub_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vasub_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vasub_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vasub_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vasub_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vasub_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vasub_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vasub_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vasub_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vasub_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vasub_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vasub_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vasub_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vasub_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vasub_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vasub_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vasub_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vasub_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vasub_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vasub_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vasub_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vasub_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsmul_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vsmul_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsmul_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vsmul_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsmul_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vsmul_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsmul_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vsmul_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsmul_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vsmul_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsmul_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vsmul_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsmul_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vsmul_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsmul_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vsmul_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsmul_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vsmul_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsmul_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vsmul_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsmul_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vsmul_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsmul_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vsmul_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vsmul_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vsmul_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsmul_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsmul_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsmul_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsmul_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsmul_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsmul_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsmul_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsmul_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsmul_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsmul_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vsmul_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vsmul_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsmul_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsmul_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsmul_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsmul_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsmul_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsmul_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsmul_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsmul_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vsmul_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vsmul_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsmul_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsmul_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsmul_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsmul_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsmul_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vsmul_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vsmul_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vsmul_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsmul_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vssrl_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vssrl_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vssrl_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vssrl_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vssrl_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vssrl_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vssrl_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vssrl_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vssrl_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vssrl_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vssrl_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vssrl_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vssrl_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vssrl_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vssrl_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vssrl_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vssrl_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vssrl_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vssrl_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vssrl_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vssrl_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vssrl_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vssrl_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vssrl_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8m1((vuint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8m2((vuint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8m4((vuint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8m8((vuint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8mf2((vuint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8mf4((vuint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u8mf8((vuint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u16m1((vuint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u16m2((vuint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u16m4((vuint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u16m8((vuint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u16mf2((vuint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u16mf4((vuint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u32m1((vuint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u32m2((vuint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u32m4((vuint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u32m8((vuint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u32mf2((vuint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u64m1((vuint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u64m2((vuint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u64m4((vuint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssrl_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vssrl_vx_u64m8((vuint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssrl_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssrl_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8m1((vint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vssra_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vssra_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8m2((vint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vssra_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vssra_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8m4((vint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vssra_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vssra_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8m8((vint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vssra_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vssra_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8mf2((vint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vssra_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vssra_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8mf4((vint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vssra_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vssra_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i8mf8((vint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vssra_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vssra_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i16m1((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vssra_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vssra_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i16m2((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vssra_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vssra_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i16m4((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vssra_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vssra_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i16m8((vint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vssra_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vssra_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i16mf2((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vssra_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vssra_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i16mf4((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vssra_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vssra_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i32m1((vint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vssra_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vssra_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i32m2((vint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vssra_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vssra_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i32m4((vint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vssra_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vssra_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i32m8((vint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vssra_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vssra_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i32mf2((vint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vssra_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vssra_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i64m1((vint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vssra_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vssra_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i64m2((vint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vssra_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vssra_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i64m4((vint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vssra_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vssra_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vssra_vv_i64m8((vint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vssra_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vssra_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8m1((vint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8m2((vint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8m4((vint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8m8((vint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8mf2((vint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8mf4((vint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i8mf8((vint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i16m1((vint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i16m2((vint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i16m4((vint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i16m8((vint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i16mf2((vint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i16mf4((vint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i32m1((vint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i32m2((vint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i32m4((vint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i32m8((vint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i32mf2((vint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i64m1((vint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i64m2((vint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i64m4((vint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vssra_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vssra_vx_i64m8((vint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vssra_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssra_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u8m1(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u8m1((vuint16m2_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u8m2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u8m2((vuint16m4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u8m4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u8m4((vuint16m8_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u8mf2((vuint16m1_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u8mf4((vuint16mf2_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u8mf8((vuint16mf4_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u16m1(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u16m1((vuint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u16m2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u16m2((vuint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u16m4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u16m4((vuint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u16mf2((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u16mf4((vuint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u32m1(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u32m1((vuint64m2_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u32m2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u32m2((vuint64m4_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u32m4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u32m4((vuint64m8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnclipu_wv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wv_u32mf2((vuint64m1_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vnclipu_wv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u8m1(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u8m1((vuint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u8m2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u8m2((vuint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u8m4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u8m4((vuint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u8mf2((vuint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u8mf4((vuint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u8mf8((vuint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u16m1(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u16m1((vuint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u16m2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u16m2((vuint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u16m4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u16m4((vuint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u16mf2((vuint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u16mf4((vuint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u32m1(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u32m1((vuint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u32m2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u32m2((vuint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u32m4(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u32m4((vuint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclipu_wx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vnclipu_wx_u32mf2((vuint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclipu_wx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclipu_wx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnclip_wv_i8m1(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i8m1((vint16m2_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vnclip_wv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vnclip_wv_i8m2(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i8m2((vint16m4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vnclip_wv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vnclip_wv_i8m4(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i8m4((vint16m8_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vnclip_wv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vnclip_wv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i8mf2((vint16m1_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vnclip_wv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vnclip_wv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i8mf4((vint16mf2_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vnclip_wv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vnclip_wv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i8mf8((vint16mf4_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vnclip_wv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vnclip_wv_i16m1(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i16m1((vint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vnclip_wv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vnclip_wv_i16m2(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i16m2((vint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vnclip_wv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vnclip_wv_i16m4(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i16m4((vint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vnclip_wv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vnclip_wv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i16mf2((vint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vnclip_wv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vnclip_wv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i16mf4((vint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vnclip_wv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vnclip_wv_i32m1(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i32m1((vint64m2_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vnclip_wv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vnclip_wv_i32m2(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i32m2((vint64m4_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vnclip_wv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vnclip_wv_i32m4(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i32m4((vint64m8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vnclip_wv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vnclip_wv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vnclip_wv_i32mf2((vint64m1_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vnclip_wv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vnclip_wx_i8m1(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i8m1((vint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i8m2(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i8m2((vint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i8m4(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i8m4((vint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i8mf2((vint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i8mf4((vint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i8mf8((vint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i16m1(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i16m1((vint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i16m2(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i16m2((vint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i16m4(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i16m4((vint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i16mf2((vint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i16mf4((vint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i32m1(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i32m1((vint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i32m2(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i32m2((vint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i32m4(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i32m4((vint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vnclip_wx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vnclip_wx_i32mf2((vint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vnclip_wx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vnclip_wx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsuxei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8m1_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8m1_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8m1_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8m1_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8m2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8m2_i8m1((vint8m1_t)(op0), (vint8m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8m2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8m2_i8m1_m((vbool4_t)(op0), (vint8m1_t)(op1), (vint8m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8m4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8m4_i8m1((vint8m1_t)(op0), (vint8m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8m4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8m4_i8m1_m((vbool2_t)(op0), (vint8m1_t)(op1), (vint8m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8m8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8m8_i8m1((vint8m1_t)(op0), (vint8m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8m8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8m8_i8m1_m((vbool1_t)(op0), (vint8m1_t)(op1), (vint8m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8mf2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8mf2_i8m1((vint8m1_t)(op0), (vint8mf2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8mf2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8mf2_i8m1_m((vbool16_t)(op0), (vint8m1_t)(op1), (vint8mf2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8mf4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8mf4_i8m1((vint8m1_t)(op0), (vint8mf4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8mf4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8mf4_i8m1_m((vbool32_t)(op0), (vint8m1_t)(op1), (vint8mf4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i8mf8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i8mf8_i8m1((vint8m1_t)(op0), (vint8mf8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i8mf8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i8mf8_i8m1_m((vbool64_t)(op0), (vint8m1_t)(op1), (vint8mf8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i16m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i16m1_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i16m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i16m1_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i16m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i16m2_i16m1((vint16m1_t)(op0), (vint16m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i16m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i16m2_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint16m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i16m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i16m4_i16m1((vint16m1_t)(op0), (vint16m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i16m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i16m4_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint16m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i16m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i16m8_i16m1((vint16m1_t)(op0), (vint16m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i16m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i16m8_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint16m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i16mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i16mf2_i16m1((vint16m1_t)(op0), (vint16mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i16mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i16mf2_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint16mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i16mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i16mf4_i16m1((vint16m1_t)(op0), (vint16mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i16mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i16mf4_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint16mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i32m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i32m1_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i32m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i32m1_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i32m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i32m2_i32m1((vint32m1_t)(op0), (vint32m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i32m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i32m2_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint32m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i32m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i32m4_i32m1((vint32m1_t)(op0), (vint32m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i32m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i32m4_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint32m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i32m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i32m8_i32m1((vint32m1_t)(op0), (vint32m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i32m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i32m8_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint32m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i32mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i32mf2_i32m1((vint32m1_t)(op0), (vint32mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i32mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i32mf2_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint32mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i64m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i64m1_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i64m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i64m1_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i64m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i64m2_i64m1((vint64m1_t)(op0), (vint64m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i64m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i64m2_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint64m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i64m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i64m4_i64m1((vint64m1_t)(op0), (vint64m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i64m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i64m4_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint64m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_i64m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_i64m8_i64m1((vint64m1_t)(op0), (vint64m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_i64m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_i64m8_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint64m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8m1_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8m1_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8m1_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8m1_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8m2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8m2_u8m1((vuint8m1_t)(op0), (vuint8m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8m2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8m2_u8m1_m((vbool4_t)(op0), (vuint8m1_t)(op1), (vuint8m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8m4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8m4_u8m1((vuint8m1_t)(op0), (vuint8m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8m4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8m4_u8m1_m((vbool2_t)(op0), (vuint8m1_t)(op1), (vuint8m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8m8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8m8_u8m1((vuint8m1_t)(op0), (vuint8m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8m8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8m8_u8m1_m((vbool1_t)(op0), (vuint8m1_t)(op1), (vuint8m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8mf2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8mf2_u8m1((vuint8m1_t)(op0), (vuint8mf2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8mf2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8mf2_u8m1_m((vbool16_t)(op0), (vuint8m1_t)(op1), (vuint8mf2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8mf4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8mf4_u8m1((vuint8m1_t)(op0), (vuint8mf4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8mf4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8mf4_u8m1_m((vbool32_t)(op0), (vuint8m1_t)(op1), (vuint8mf4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u8mf8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u8mf8_u8m1((vuint8m1_t)(op0), (vuint8mf8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u8mf8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u8mf8_u8m1_m((vbool64_t)(op0), (vuint8m1_t)(op1), (vuint8mf8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u16m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u16m1_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u16m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u16m1_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u16m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u16m2_u16m1((vuint16m1_t)(op0), (vuint16m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u16m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u16m2_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint16m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u16m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u16m4_u16m1((vuint16m1_t)(op0), (vuint16m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u16m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u16m4_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint16m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u16m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u16m8_u16m1((vuint16m1_t)(op0), (vuint16m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u16m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u16m8_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint16m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u16mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u16mf2_u16m1((vuint16m1_t)(op0), (vuint16mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u16mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u16mf2_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint16mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u16mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u16mf4_u16m1((vuint16m1_t)(op0), (vuint16mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u16mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u16mf4_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint16mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u32m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u32m1_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u32m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u32m1_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u32m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u32m2_u32m1((vuint32m1_t)(op0), (vuint32m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u32m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u32m2_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint32m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u32m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u32m4_u32m1((vuint32m1_t)(op0), (vuint32m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u32m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u32m4_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint32m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u32m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u32m8_u32m1((vuint32m1_t)(op0), (vuint32m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u32m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u32m8_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint32m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u32mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u32mf2_u32m1((vuint32m1_t)(op0), (vuint32mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u32mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u32mf2_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint32mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u64m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u64m1_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u64m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u64m1_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u64m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u64m2_u64m1((vuint64m1_t)(op0), (vuint64m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u64m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u64m2_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint64m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u64m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u64m4_u64m1((vuint64m1_t)(op0), (vuint64m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u64m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u64m4_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint64m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredsum_vs_u64m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredsum_vs_u64m8_u64m1((vuint64m1_t)(op0), (vuint64m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredsum_vs_u64m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredsum_vs_u64m8_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint64m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8m1_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8m1_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8m1_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8m1_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8m2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8m2_u8m1((vuint8m1_t)(op0), (vuint8m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8m2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8m2_u8m1_m((vbool4_t)(op0), (vuint8m1_t)(op1), (vuint8m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8m4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8m4_u8m1((vuint8m1_t)(op0), (vuint8m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8m4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8m4_u8m1_m((vbool2_t)(op0), (vuint8m1_t)(op1), (vuint8m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8m8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8m8_u8m1((vuint8m1_t)(op0), (vuint8m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8m8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8m8_u8m1_m((vbool1_t)(op0), (vuint8m1_t)(op1), (vuint8m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8mf2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8mf2_u8m1((vuint8m1_t)(op0), (vuint8mf2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8mf2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8mf2_u8m1_m((vbool16_t)(op0), (vuint8m1_t)(op1), (vuint8mf2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8mf4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8mf4_u8m1((vuint8m1_t)(op0), (vuint8mf4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8mf4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8mf4_u8m1_m((vbool32_t)(op0), (vuint8m1_t)(op1), (vuint8mf4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u8mf8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u8mf8_u8m1((vuint8m1_t)(op0), (vuint8mf8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u8mf8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u8mf8_u8m1_m((vbool64_t)(op0), (vuint8m1_t)(op1), (vuint8mf8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u16m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u16m1_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u16m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u16m1_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u16m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u16m2_u16m1((vuint16m1_t)(op0), (vuint16m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u16m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u16m2_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint16m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u16m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u16m4_u16m1((vuint16m1_t)(op0), (vuint16m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u16m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u16m4_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint16m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u16m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u16m8_u16m1((vuint16m1_t)(op0), (vuint16m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u16m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u16m8_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint16m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u16mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u16mf2_u16m1((vuint16m1_t)(op0), (vuint16mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u16mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u16mf2_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint16mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u16mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u16mf4_u16m1((vuint16m1_t)(op0), (vuint16mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u16mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u16mf4_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint16mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u32m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u32m1_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u32m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u32m1_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u32m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u32m2_u32m1((vuint32m1_t)(op0), (vuint32m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u32m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u32m2_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint32m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u32m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u32m4_u32m1((vuint32m1_t)(op0), (vuint32m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u32m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u32m4_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint32m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u32m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u32m8_u32m1((vuint32m1_t)(op0), (vuint32m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u32m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u32m8_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint32m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u32mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u32mf2_u32m1((vuint32m1_t)(op0), (vuint32mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u32mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u32mf2_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint32mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u64m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u64m1_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u64m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u64m1_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u64m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u64m2_u64m1((vuint64m1_t)(op0), (vuint64m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u64m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u64m2_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint64m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u64m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u64m4_u64m1((vuint64m1_t)(op0), (vuint64m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u64m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u64m4_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint64m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredmaxu_vs_u64m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmaxu_vs_u64m8_u64m1((vuint64m1_t)(op0), (vuint64m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredmaxu_vs_u64m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmaxu_vs_u64m8_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint64m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8m1_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8m1_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8m1_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8m1_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8m2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8m2_i8m1((vint8m1_t)(op0), (vint8m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8m2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8m2_i8m1_m((vbool4_t)(op0), (vint8m1_t)(op1), (vint8m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8m4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8m4_i8m1((vint8m1_t)(op0), (vint8m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8m4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8m4_i8m1_m((vbool2_t)(op0), (vint8m1_t)(op1), (vint8m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8m8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8m8_i8m1((vint8m1_t)(op0), (vint8m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8m8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8m8_i8m1_m((vbool1_t)(op0), (vint8m1_t)(op1), (vint8m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8mf2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8mf2_i8m1((vint8m1_t)(op0), (vint8mf2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8mf2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8mf2_i8m1_m((vbool16_t)(op0), (vint8m1_t)(op1), (vint8mf2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8mf4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8mf4_i8m1((vint8m1_t)(op0), (vint8mf4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8mf4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8mf4_i8m1_m((vbool32_t)(op0), (vint8m1_t)(op1), (vint8mf4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i8mf8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i8mf8_i8m1((vint8m1_t)(op0), (vint8mf8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i8mf8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i8mf8_i8m1_m((vbool64_t)(op0), (vint8m1_t)(op1), (vint8mf8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i16m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i16m1_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i16m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i16m1_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i16m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i16m2_i16m1((vint16m1_t)(op0), (vint16m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i16m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i16m2_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint16m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i16m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i16m4_i16m1((vint16m1_t)(op0), (vint16m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i16m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i16m4_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint16m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i16m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i16m8_i16m1((vint16m1_t)(op0), (vint16m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i16m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i16m8_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint16m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i16mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i16mf2_i16m1((vint16m1_t)(op0), (vint16mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i16mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i16mf2_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint16mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i16mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i16mf4_i16m1((vint16m1_t)(op0), (vint16mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i16mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i16mf4_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint16mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i32m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i32m1_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i32m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i32m1_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i32m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i32m2_i32m1((vint32m1_t)(op0), (vint32m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i32m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i32m2_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint32m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i32m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i32m4_i32m1((vint32m1_t)(op0), (vint32m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i32m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i32m4_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint32m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i32m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i32m8_i32m1((vint32m1_t)(op0), (vint32m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i32m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i32m8_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint32m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i32mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i32mf2_i32m1((vint32m1_t)(op0), (vint32mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i32mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i32mf2_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint32mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i64m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i64m1_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i64m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i64m1_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i64m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i64m2_i64m1((vint64m1_t)(op0), (vint64m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i64m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i64m2_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint64m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i64m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i64m4_i64m1((vint64m1_t)(op0), (vint64m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i64m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i64m4_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint64m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredmax_vs_i64m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmax_vs_i64m8_i64m1((vint64m1_t)(op0), (vint64m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmax_vs_i64m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmax_vs_i64m8_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint64m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8m1_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8m1_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8m1_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8m1_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8m2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8m2_u8m1((vuint8m1_t)(op0), (vuint8m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8m2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8m2_u8m1_m((vbool4_t)(op0), (vuint8m1_t)(op1), (vuint8m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8m4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8m4_u8m1((vuint8m1_t)(op0), (vuint8m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8m4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8m4_u8m1_m((vbool2_t)(op0), (vuint8m1_t)(op1), (vuint8m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8m8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8m8_u8m1((vuint8m1_t)(op0), (vuint8m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8m8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8m8_u8m1_m((vbool1_t)(op0), (vuint8m1_t)(op1), (vuint8m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8mf2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8mf2_u8m1((vuint8m1_t)(op0), (vuint8mf2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8mf2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8mf2_u8m1_m((vbool16_t)(op0), (vuint8m1_t)(op1), (vuint8mf2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8mf4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8mf4_u8m1((vuint8m1_t)(op0), (vuint8mf4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8mf4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8mf4_u8m1_m((vbool32_t)(op0), (vuint8m1_t)(op1), (vuint8mf4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u8mf8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u8mf8_u8m1((vuint8m1_t)(op0), (vuint8mf8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u8mf8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u8mf8_u8m1_m((vbool64_t)(op0), (vuint8m1_t)(op1), (vuint8mf8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u16m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u16m1_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u16m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u16m1_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u16m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u16m2_u16m1((vuint16m1_t)(op0), (vuint16m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u16m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u16m2_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint16m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u16m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u16m4_u16m1((vuint16m1_t)(op0), (vuint16m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u16m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u16m4_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint16m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u16m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u16m8_u16m1((vuint16m1_t)(op0), (vuint16m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u16m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u16m8_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint16m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u16mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u16mf2_u16m1((vuint16m1_t)(op0), (vuint16mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u16mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u16mf2_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint16mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u16mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u16mf4_u16m1((vuint16m1_t)(op0), (vuint16mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u16mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u16mf4_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint16mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u32m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u32m1_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u32m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u32m1_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u32m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u32m2_u32m1((vuint32m1_t)(op0), (vuint32m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u32m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u32m2_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint32m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u32m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u32m4_u32m1((vuint32m1_t)(op0), (vuint32m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u32m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u32m4_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint32m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u32m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u32m8_u32m1((vuint32m1_t)(op0), (vuint32m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u32m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u32m8_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint32m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u32mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u32mf2_u32m1((vuint32m1_t)(op0), (vuint32mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u32mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u32mf2_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint32mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u64m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u64m1_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u64m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u64m1_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u64m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u64m2_u64m1((vuint64m1_t)(op0), (vuint64m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u64m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u64m2_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint64m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u64m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u64m4_u64m1((vuint64m1_t)(op0), (vuint64m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u64m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u64m4_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint64m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredminu_vs_u64m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredminu_vs_u64m8_u64m1((vuint64m1_t)(op0), (vuint64m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredminu_vs_u64m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredminu_vs_u64m8_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint64m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8m1_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8m1_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8m1_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8m1_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8m2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8m2_i8m1((vint8m1_t)(op0), (vint8m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8m2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8m2_i8m1_m((vbool4_t)(op0), (vint8m1_t)(op1), (vint8m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8m4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8m4_i8m1((vint8m1_t)(op0), (vint8m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8m4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8m4_i8m1_m((vbool2_t)(op0), (vint8m1_t)(op1), (vint8m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8m8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8m8_i8m1((vint8m1_t)(op0), (vint8m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8m8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8m8_i8m1_m((vbool1_t)(op0), (vint8m1_t)(op1), (vint8m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8mf2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8mf2_i8m1((vint8m1_t)(op0), (vint8mf2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8mf2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8mf2_i8m1_m((vbool16_t)(op0), (vint8m1_t)(op1), (vint8mf2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8mf4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8mf4_i8m1((vint8m1_t)(op0), (vint8mf4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8mf4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8mf4_i8m1_m((vbool32_t)(op0), (vint8m1_t)(op1), (vint8mf4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i8mf8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i8mf8_i8m1((vint8m1_t)(op0), (vint8mf8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i8mf8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i8mf8_i8m1_m((vbool64_t)(op0), (vint8m1_t)(op1), (vint8mf8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i16m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i16m1_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i16m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i16m1_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i16m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i16m2_i16m1((vint16m1_t)(op0), (vint16m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i16m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i16m2_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint16m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i16m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i16m4_i16m1((vint16m1_t)(op0), (vint16m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i16m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i16m4_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint16m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i16m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i16m8_i16m1((vint16m1_t)(op0), (vint16m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i16m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i16m8_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint16m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i16mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i16mf2_i16m1((vint16m1_t)(op0), (vint16mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i16mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i16mf2_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint16mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i16mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i16mf4_i16m1((vint16m1_t)(op0), (vint16mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i16mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i16mf4_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint16mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i32m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i32m1_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i32m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i32m1_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i32m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i32m2_i32m1((vint32m1_t)(op0), (vint32m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i32m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i32m2_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint32m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i32m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i32m4_i32m1((vint32m1_t)(op0), (vint32m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i32m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i32m4_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint32m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i32m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i32m8_i32m1((vint32m1_t)(op0), (vint32m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i32m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i32m8_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint32m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i32mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i32mf2_i32m1((vint32m1_t)(op0), (vint32mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i32mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i32mf2_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint32mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i64m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i64m1_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i64m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i64m1_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i64m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i64m2_i64m1((vint64m1_t)(op0), (vint64m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i64m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i64m2_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint64m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i64m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i64m4_i64m1((vint64m1_t)(op0), (vint64m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i64m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i64m4_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint64m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredmin_vs_i64m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredmin_vs_i64m8_i64m1((vint64m1_t)(op0), (vint64m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredmin_vs_i64m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredmin_vs_i64m8_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint64m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8m1_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8m1_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8m1_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8m1_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8m2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8m2_i8m1((vint8m1_t)(op0), (vint8m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8m2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8m2_i8m1_m((vbool4_t)(op0), (vint8m1_t)(op1), (vint8m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8m4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8m4_i8m1((vint8m1_t)(op0), (vint8m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8m4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8m4_i8m1_m((vbool2_t)(op0), (vint8m1_t)(op1), (vint8m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8m8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8m8_i8m1((vint8m1_t)(op0), (vint8m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8m8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8m8_i8m1_m((vbool1_t)(op0), (vint8m1_t)(op1), (vint8m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8mf2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8mf2_i8m1((vint8m1_t)(op0), (vint8mf2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8mf2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8mf2_i8m1_m((vbool16_t)(op0), (vint8m1_t)(op1), (vint8mf2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8mf4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8mf4_i8m1((vint8m1_t)(op0), (vint8mf4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8mf4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8mf4_i8m1_m((vbool32_t)(op0), (vint8m1_t)(op1), (vint8mf4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i8mf8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i8mf8_i8m1((vint8m1_t)(op0), (vint8mf8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i8mf8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i8mf8_i8m1_m((vbool64_t)(op0), (vint8m1_t)(op1), (vint8mf8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i16m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i16m1_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i16m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i16m1_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i16m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i16m2_i16m1((vint16m1_t)(op0), (vint16m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i16m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i16m2_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint16m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i16m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i16m4_i16m1((vint16m1_t)(op0), (vint16m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i16m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i16m4_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint16m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i16m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i16m8_i16m1((vint16m1_t)(op0), (vint16m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i16m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i16m8_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint16m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i16mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i16mf2_i16m1((vint16m1_t)(op0), (vint16mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i16mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i16mf2_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint16mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i16mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i16mf4_i16m1((vint16m1_t)(op0), (vint16mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i16mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i16mf4_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint16mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i32m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i32m1_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i32m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i32m1_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i32m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i32m2_i32m1((vint32m1_t)(op0), (vint32m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i32m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i32m2_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint32m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i32m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i32m4_i32m1((vint32m1_t)(op0), (vint32m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i32m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i32m4_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint32m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i32m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i32m8_i32m1((vint32m1_t)(op0), (vint32m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i32m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i32m8_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint32m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i32mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i32mf2_i32m1((vint32m1_t)(op0), (vint32mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i32mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i32mf2_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint32mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i64m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i64m1_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i64m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i64m1_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i64m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i64m2_i64m1((vint64m1_t)(op0), (vint64m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i64m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i64m2_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint64m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i64m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i64m4_i64m1((vint64m1_t)(op0), (vint64m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i64m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i64m4_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint64m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_i64m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_i64m8_i64m1((vint64m1_t)(op0), (vint64m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_i64m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_i64m8_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint64m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8m1_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8m1_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8m1_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8m1_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8m2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8m2_u8m1((vuint8m1_t)(op0), (vuint8m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8m2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8m2_u8m1_m((vbool4_t)(op0), (vuint8m1_t)(op1), (vuint8m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8m4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8m4_u8m1((vuint8m1_t)(op0), (vuint8m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8m4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8m4_u8m1_m((vbool2_t)(op0), (vuint8m1_t)(op1), (vuint8m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8m8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8m8_u8m1((vuint8m1_t)(op0), (vuint8m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8m8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8m8_u8m1_m((vbool1_t)(op0), (vuint8m1_t)(op1), (vuint8m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8mf2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8mf2_u8m1((vuint8m1_t)(op0), (vuint8mf2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8mf2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8mf2_u8m1_m((vbool16_t)(op0), (vuint8m1_t)(op1), (vuint8mf2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8mf4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8mf4_u8m1((vuint8m1_t)(op0), (vuint8mf4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8mf4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8mf4_u8m1_m((vbool32_t)(op0), (vuint8m1_t)(op1), (vuint8mf4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u8mf8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u8mf8_u8m1((vuint8m1_t)(op0), (vuint8mf8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u8mf8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u8mf8_u8m1_m((vbool64_t)(op0), (vuint8m1_t)(op1), (vuint8mf8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u16m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u16m1_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u16m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u16m1_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u16m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u16m2_u16m1((vuint16m1_t)(op0), (vuint16m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u16m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u16m2_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint16m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u16m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u16m4_u16m1((vuint16m1_t)(op0), (vuint16m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u16m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u16m4_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint16m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u16m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u16m8_u16m1((vuint16m1_t)(op0), (vuint16m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u16m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u16m8_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint16m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u16mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u16mf2_u16m1((vuint16m1_t)(op0), (vuint16mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u16mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u16mf2_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint16mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u16mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u16mf4_u16m1((vuint16m1_t)(op0), (vuint16mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u16mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u16mf4_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint16mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u32m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u32m1_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u32m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u32m1_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u32m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u32m2_u32m1((vuint32m1_t)(op0), (vuint32m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u32m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u32m2_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint32m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u32m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u32m4_u32m1((vuint32m1_t)(op0), (vuint32m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u32m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u32m4_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint32m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u32m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u32m8_u32m1((vuint32m1_t)(op0), (vuint32m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u32m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u32m8_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint32m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u32mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u32mf2_u32m1((vuint32m1_t)(op0), (vuint32mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u32mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u32mf2_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint32mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u64m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u64m1_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u64m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u64m1_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u64m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u64m2_u64m1((vuint64m1_t)(op0), (vuint64m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u64m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u64m2_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint64m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u64m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u64m4_u64m1((vuint64m1_t)(op0), (vuint64m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u64m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u64m4_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint64m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredand_vs_u64m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredand_vs_u64m8_u64m1((vuint64m1_t)(op0), (vuint64m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredand_vs_u64m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredand_vs_u64m8_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint64m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsuxei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vredor_vs_i8m1_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8m1_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8m1_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8m1_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i8m2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8m2_i8m1((vint8m1_t)(op0), (vint8m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8m2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8m2_i8m1_m((vbool4_t)(op0), (vint8m1_t)(op1), (vint8m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i8m4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8m4_i8m1((vint8m1_t)(op0), (vint8m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8m4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8m4_i8m1_m((vbool2_t)(op0), (vint8m1_t)(op1), (vint8m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i8m8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8m8_i8m1((vint8m1_t)(op0), (vint8m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8m8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8m8_i8m1_m((vbool1_t)(op0), (vint8m1_t)(op1), (vint8m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i8mf2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8mf2_i8m1((vint8m1_t)(op0), (vint8mf2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8mf2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8mf2_i8m1_m((vbool16_t)(op0), (vint8m1_t)(op1), (vint8mf2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i8mf4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8mf4_i8m1((vint8m1_t)(op0), (vint8mf4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8mf4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8mf4_i8m1_m((vbool32_t)(op0), (vint8m1_t)(op1), (vint8mf4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i8mf8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i8mf8_i8m1((vint8m1_t)(op0), (vint8mf8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i8mf8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i8mf8_i8m1_m((vbool64_t)(op0), (vint8m1_t)(op1), (vint8mf8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i16m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i16m1_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i16m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i16m1_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i16m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i16m2_i16m1((vint16m1_t)(op0), (vint16m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i16m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i16m2_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint16m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i16m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i16m4_i16m1((vint16m1_t)(op0), (vint16m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i16m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i16m4_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint16m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i16m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i16m8_i16m1((vint16m1_t)(op0), (vint16m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i16m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i16m8_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint16m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i16mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i16mf2_i16m1((vint16m1_t)(op0), (vint16mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i16mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i16mf2_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint16mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i16mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i16mf4_i16m1((vint16m1_t)(op0), (vint16mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i16mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i16mf4_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint16mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i32m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i32m1_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i32m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i32m1_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i32m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i32m2_i32m1((vint32m1_t)(op0), (vint32m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i32m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i32m2_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint32m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i32m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i32m4_i32m1((vint32m1_t)(op0), (vint32m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i32m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i32m4_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint32m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i32m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i32m8_i32m1((vint32m1_t)(op0), (vint32m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i32m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i32m8_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint32m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i32mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i32mf2_i32m1((vint32m1_t)(op0), (vint32mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i32mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i32mf2_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint32mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i64m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i64m1_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i64m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i64m1_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i64m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i64m2_i64m1((vint64m1_t)(op0), (vint64m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i64m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i64m2_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint64m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i64m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i64m4_i64m1((vint64m1_t)(op0), (vint64m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i64m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i64m4_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint64m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_i64m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_i64m8_i64m1((vint64m1_t)(op0), (vint64m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_i64m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_i64m8_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint64m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8m1_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8m1_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8m1_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8m1_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8m2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8m2_u8m1((vuint8m1_t)(op0), (vuint8m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8m2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8m2_u8m1_m((vbool4_t)(op0), (vuint8m1_t)(op1), (vuint8m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8m4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8m4_u8m1((vuint8m1_t)(op0), (vuint8m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8m4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8m4_u8m1_m((vbool2_t)(op0), (vuint8m1_t)(op1), (vuint8m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8m8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8m8_u8m1((vuint8m1_t)(op0), (vuint8m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8m8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8m8_u8m1_m((vbool1_t)(op0), (vuint8m1_t)(op1), (vuint8m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8mf2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8mf2_u8m1((vuint8m1_t)(op0), (vuint8mf2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8mf2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8mf2_u8m1_m((vbool16_t)(op0), (vuint8m1_t)(op1), (vuint8mf2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8mf4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8mf4_u8m1((vuint8m1_t)(op0), (vuint8mf4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8mf4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8mf4_u8m1_m((vbool32_t)(op0), (vuint8m1_t)(op1), (vuint8mf4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u8mf8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u8mf8_u8m1((vuint8m1_t)(op0), (vuint8mf8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u8mf8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u8mf8_u8m1_m((vbool64_t)(op0), (vuint8m1_t)(op1), (vuint8mf8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u16m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u16m1_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u16m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u16m1_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u16m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u16m2_u16m1((vuint16m1_t)(op0), (vuint16m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u16m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u16m2_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint16m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u16m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u16m4_u16m1((vuint16m1_t)(op0), (vuint16m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u16m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u16m4_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint16m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u16m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u16m8_u16m1((vuint16m1_t)(op0), (vuint16m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u16m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u16m8_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint16m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u16mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u16mf2_u16m1((vuint16m1_t)(op0), (vuint16mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u16mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u16mf2_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint16mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u16mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u16mf4_u16m1((vuint16m1_t)(op0), (vuint16mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u16mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u16mf4_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint16mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u32m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u32m1_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u32m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u32m1_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u32m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u32m2_u32m1((vuint32m1_t)(op0), (vuint32m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u32m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u32m2_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint32m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u32m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u32m4_u32m1((vuint32m1_t)(op0), (vuint32m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u32m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u32m4_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint32m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u32m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u32m8_u32m1((vuint32m1_t)(op0), (vuint32m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u32m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u32m8_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint32m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u32mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u32mf2_u32m1((vuint32m1_t)(op0), (vuint32mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u32mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u32mf2_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint32mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u64m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u64m1_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u64m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u64m1_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u64m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u64m2_u64m1((vuint64m1_t)(op0), (vuint64m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u64m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u64m2_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint64m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u64m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u64m4_u64m1((vuint64m1_t)(op0), (vuint64m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u64m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u64m4_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint64m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredor_vs_u64m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredor_vs_u64m8_u64m1((vuint64m1_t)(op0), (vuint64m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredor_vs_u64m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredor_vs_u64m8_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint64m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8m1_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8m1_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8m1_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8m1_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8m2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8m2_i8m1((vint8m1_t)(op0), (vint8m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8m2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8m2_i8m1_m((vbool4_t)(op0), (vint8m1_t)(op1), (vint8m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8m4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8m4_i8m1((vint8m1_t)(op0), (vint8m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8m4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8m4_i8m1_m((vbool2_t)(op0), (vint8m1_t)(op1), (vint8m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8m8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8m8_i8m1((vint8m1_t)(op0), (vint8m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8m8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8m8_i8m1_m((vbool1_t)(op0), (vint8m1_t)(op1), (vint8m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8mf2_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8mf2_i8m1((vint8m1_t)(op0), (vint8mf2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8mf2_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8mf2_i8m1_m((vbool16_t)(op0), (vint8m1_t)(op1), (vint8mf2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8mf4_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8mf4_i8m1((vint8m1_t)(op0), (vint8mf4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8mf4_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8mf4_i8m1_m((vbool32_t)(op0), (vint8m1_t)(op1), (vint8mf4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i8mf8_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i8mf8_i8m1((vint8m1_t)(op0), (vint8mf8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i8mf8_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i8mf8_i8m1_m((vbool64_t)(op0), (vint8m1_t)(op1), (vint8mf8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i16m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i16m1_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i16m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i16m1_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i16m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i16m2_i16m1((vint16m1_t)(op0), (vint16m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i16m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i16m2_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint16m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i16m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i16m4_i16m1((vint16m1_t)(op0), (vint16m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i16m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i16m4_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint16m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i16m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i16m8_i16m1((vint16m1_t)(op0), (vint16m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i16m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i16m8_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint16m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i16mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i16mf2_i16m1((vint16m1_t)(op0), (vint16mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i16mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i16mf2_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint16mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i16mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i16mf4_i16m1((vint16m1_t)(op0), (vint16mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i16mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i16mf4_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint16mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i32m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i32m1_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i32m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i32m1_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i32m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i32m2_i32m1((vint32m1_t)(op0), (vint32m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i32m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i32m2_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint32m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i32m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i32m4_i32m1((vint32m1_t)(op0), (vint32m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i32m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i32m4_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint32m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i32m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i32m8_i32m1((vint32m1_t)(op0), (vint32m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i32m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i32m8_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint32m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i32mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i32mf2_i32m1((vint32m1_t)(op0), (vint32mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i32mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i32mf2_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint32mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i64m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i64m1_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i64m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i64m1_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i64m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i64m2_i64m1((vint64m1_t)(op0), (vint64m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i64m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i64m2_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint64m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i64m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i64m4_i64m1((vint64m1_t)(op0), (vint64m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i64m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i64m4_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint64m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_i64m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_i64m8_i64m1((vint64m1_t)(op0), (vint64m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_i64m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_i64m8_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint64m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8m1_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8m1_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8m1_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8m1_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8m2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8m2_u8m1((vuint8m1_t)(op0), (vuint8m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8m2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8m2_u8m1_m((vbool4_t)(op0), (vuint8m1_t)(op1), (vuint8m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8m4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8m4_u8m1((vuint8m1_t)(op0), (vuint8m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8m4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8m4_u8m1_m((vbool2_t)(op0), (vuint8m1_t)(op1), (vuint8m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8m8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8m8_u8m1((vuint8m1_t)(op0), (vuint8m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8m8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8m8_u8m1_m((vbool1_t)(op0), (vuint8m1_t)(op1), (vuint8m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8mf2_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8mf2_u8m1((vuint8m1_t)(op0), (vuint8mf2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8mf2_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8mf2_u8m1_m((vbool16_t)(op0), (vuint8m1_t)(op1), (vuint8mf2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8mf4_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8mf4_u8m1((vuint8m1_t)(op0), (vuint8mf4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8mf4_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8mf4_u8m1_m((vbool32_t)(op0), (vuint8m1_t)(op1), (vuint8mf4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u8mf8_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u8mf8_u8m1((vuint8m1_t)(op0), (vuint8mf8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u8mf8_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u8mf8_u8m1_m((vbool64_t)(op0), (vuint8m1_t)(op1), (vuint8mf8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u16m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u16m1_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u16m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u16m1_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u16m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u16m2_u16m1((vuint16m1_t)(op0), (vuint16m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u16m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u16m2_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint16m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u16m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u16m4_u16m1((vuint16m1_t)(op0), (vuint16m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u16m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u16m4_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint16m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u16m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u16m8_u16m1((vuint16m1_t)(op0), (vuint16m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u16m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u16m8_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint16m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u16mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u16mf2_u16m1((vuint16m1_t)(op0), (vuint16mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u16mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u16mf2_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint16mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u16mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u16mf4_u16m1((vuint16m1_t)(op0), (vuint16mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u16mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u16mf4_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint16mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u32m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u32m1_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u32m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u32m1_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u32m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u32m2_u32m1((vuint32m1_t)(op0), (vuint32m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u32m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u32m2_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint32m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u32m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u32m4_u32m1((vuint32m1_t)(op0), (vuint32m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u32m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u32m4_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint32m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u32m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u32m8_u32m1((vuint32m1_t)(op0), (vuint32m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u32m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u32m8_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint32m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u32mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u32mf2_u32m1((vuint32m1_t)(op0), (vuint32mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u32mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u32mf2_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint32mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u64m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u64m1_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u64m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u64m1_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u64m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u64m2_u64m1((vuint64m1_t)(op0), (vuint64m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u64m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u64m2_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint64m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u64m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u64m4_u64m1((vuint64m1_t)(op0), (vuint64m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u64m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u64m4_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint64m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vredxor_vs_u64m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vredxor_vs_u64m8_u64m1((vuint64m1_t)(op0), (vuint64m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vredxor_vs_u64m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vredxor_vs_u64m8_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint64m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8m1_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8m1_i16m1((vint16m1_t)(op0), (vint8m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8m1_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8m1_i16m1_m((vbool8_t)(op0), (vint16m1_t)(op1), (vint8m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8m2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8m2_i16m1((vint16m1_t)(op0), (vint8m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8m2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8m2_i16m1_m((vbool4_t)(op0), (vint16m1_t)(op1), (vint8m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8m4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8m4_i16m1((vint16m1_t)(op0), (vint8m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8m4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8m4_i16m1_m((vbool2_t)(op0), (vint16m1_t)(op1), (vint8m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8m8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8m8_i16m1((vint16m1_t)(op0), (vint8m8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8m8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8m8_i16m1_m((vbool1_t)(op0), (vint16m1_t)(op1), (vint8m8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8mf2_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8mf2_i16m1((vint16m1_t)(op0), (vint8mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8mf2_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8mf2_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8mf4_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8mf4_i16m1((vint16m1_t)(op0), (vint8mf4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8mf4_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8mf4_i16m1_m((vbool32_t)(op0), (vint16m1_t)(op1), (vint8mf4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i8mf8_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i8mf8_i16m1((vint16m1_t)(op0), (vint8mf8_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i8mf8_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i8mf8_i16m1_m((vbool64_t)(op0), (vint16m1_t)(op1), (vint8mf8_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i16m1_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i16m1_i32m1((vint32m1_t)(op0), (vint16m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i16m1_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i16m1_i32m1_m((vbool16_t)(op0), (vint32m1_t)(op1), (vint16m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i16m2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i16m2_i32m1((vint32m1_t)(op0), (vint16m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i16m2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i16m2_i32m1_m((vbool8_t)(op0), (vint32m1_t)(op1), (vint16m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i16m4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i16m4_i32m1((vint32m1_t)(op0), (vint16m4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i16m4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i16m4_i32m1_m((vbool4_t)(op0), (vint32m1_t)(op1), (vint16m4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i16m8_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i16m8_i32m1((vint32m1_t)(op0), (vint16m8_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i16m8_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i16m8_i32m1_m((vbool2_t)(op0), (vint32m1_t)(op1), (vint16m8_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i16mf2_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i16mf2_i32m1((vint32m1_t)(op0), (vint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i16mf2_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i16mf2_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i16mf4_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i16mf4_i32m1((vint32m1_t)(op0), (vint16mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i16mf4_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i16mf4_i32m1_m((vbool64_t)(op0), (vint32m1_t)(op1), (vint16mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i32m1_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i32m1_i64m1((vint64m1_t)(op0), (vint32m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i32m1_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i32m1_i64m1_m((vbool32_t)(op0), (vint64m1_t)(op1), (vint32m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i32m2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i32m2_i64m1((vint64m1_t)(op0), (vint32m2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i32m2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i32m2_i64m1_m((vbool16_t)(op0), (vint64m1_t)(op1), (vint32m2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i32m4_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i32m4_i64m1((vint64m1_t)(op0), (vint32m4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i32m4_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i32m4_i64m1_m((vbool8_t)(op0), (vint64m1_t)(op1), (vint32m4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i32m8_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i32m8_i64m1((vint64m1_t)(op0), (vint32m8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i32m8_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i32m8_i64m1_m((vbool4_t)(op0), (vint64m1_t)(op1), (vint32m8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vwredsum_vs_i32mf2_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsum_vs_i32mf2_i64m1((vint64m1_t)(op0), (vint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vwredsum_vs_i32mf2_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsum_vs_i32mf2_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8m1_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8m1_u16m1((vuint16m1_t)(op0), (vuint8m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8m1_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8m1_u16m1_m((vbool8_t)(op0), (vuint16m1_t)(op1), (vuint8m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8m2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8m2_u16m1((vuint16m1_t)(op0), (vuint8m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8m2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8m2_u16m1_m((vbool4_t)(op0), (vuint16m1_t)(op1), (vuint8m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8m4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8m4_u16m1((vuint16m1_t)(op0), (vuint8m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8m4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8m4_u16m1_m((vbool2_t)(op0), (vuint16m1_t)(op1), (vuint8m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8m8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8m8_u16m1((vuint16m1_t)(op0), (vuint8m8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8m8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8m8_u16m1_m((vbool1_t)(op0), (vuint16m1_t)(op1), (vuint8m8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8mf2_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8mf2_u16m1((vuint16m1_t)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8mf2_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8mf2_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8mf4_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8mf4_u16m1((vuint16m1_t)(op0), (vuint8mf4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8mf4_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8mf4_u16m1_m((vbool32_t)(op0), (vuint16m1_t)(op1), (vuint8mf4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u8mf8_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u8mf8_u16m1((vuint16m1_t)(op0), (vuint8mf8_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u8mf8_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u8mf8_u16m1_m((vbool64_t)(op0), (vuint16m1_t)(op1), (vuint8mf8_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u16m1_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u16m1_u32m1((vuint32m1_t)(op0), (vuint16m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u16m1_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u16m1_u32m1_m((vbool16_t)(op0), (vuint32m1_t)(op1), (vuint16m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u16m2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u16m2_u32m1((vuint32m1_t)(op0), (vuint16m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u16m2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u16m2_u32m1_m((vbool8_t)(op0), (vuint32m1_t)(op1), (vuint16m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u16m4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u16m4_u32m1((vuint32m1_t)(op0), (vuint16m4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u16m4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u16m4_u32m1_m((vbool4_t)(op0), (vuint32m1_t)(op1), (vuint16m4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u16m8_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u16m8_u32m1((vuint32m1_t)(op0), (vuint16m8_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u16m8_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u16m8_u32m1_m((vbool2_t)(op0), (vuint32m1_t)(op1), (vuint16m8_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u16mf2_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u16mf2_u32m1((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u16mf2_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u16mf2_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u16mf4_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u16mf4_u32m1((vuint32m1_t)(op0), (vuint16mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u16mf4_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u16mf4_u32m1_m((vbool64_t)(op0), (vuint32m1_t)(op1), (vuint16mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u32m1_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u32m1_u64m1((vuint64m1_t)(op0), (vuint32m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u32m1_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u32m1_u64m1_m((vbool32_t)(op0), (vuint64m1_t)(op1), (vuint32m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u32m2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u32m2_u64m1((vuint64m1_t)(op0), (vuint32m2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u32m2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u32m2_u64m1_m((vbool16_t)(op0), (vuint64m1_t)(op1), (vuint32m2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u32m4_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u32m4_u64m1((vuint64m1_t)(op0), (vuint32m4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u32m4_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u32m4_u64m1_m((vbool8_t)(op0), (vuint64m1_t)(op1), (vuint32m4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u32m8_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u32m8_u64m1((vuint64m1_t)(op0), (vuint32m8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u32m8_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u32m8_u64m1_m((vbool4_t)(op0), (vuint64m1_t)(op1), (vuint32m8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vwredsumu_vs_u32mf2_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vwredsumu_vs_u32mf2_u64m1((vuint64m1_t)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vwredsumu_vs_u32mf2_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vwredsumu_vs_u32mf2_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8m4((uint8_t *)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8m8((uint8_t *)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8m8_m((vbool1_t)(op0), (uint8_t *)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vmnot_m_b8(op0, op1) \ +__builtin_rvv_vmnot_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vmnot_m_b4(op0, op1) \ +__builtin_rvv_vmnot_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vmnot_m_b2(op0, op1) \ +__builtin_rvv_vmnot_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vmnot_m_b1(op0, op1) \ +__builtin_rvv_vmnot_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vmnot_m_b16(op0, op1) \ +__builtin_rvv_vmnot_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vmnot_m_b32(op0, op1) \ +__builtin_rvv_vmnot_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vmnot_m_b64(op0, op1) \ +__builtin_rvv_vmnot_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define viota_m_u8m1(op0, op1) \ +__builtin_rvv_viota_m_u8m1((vbool8_t)(op0), (size_t)(op1)) +#define viota_m_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define viota_m_u8m2(op0, op1) \ +__builtin_rvv_viota_m_u8m2((vbool4_t)(op0), (size_t)(op1)) +#define viota_m_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define viota_m_u8m4(op0, op1) \ +__builtin_rvv_viota_m_u8m4((vbool2_t)(op0), (size_t)(op1)) +#define viota_m_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define viota_m_u8m8(op0, op1) \ +__builtin_rvv_viota_m_u8m8((vbool1_t)(op0), (size_t)(op1)) +#define viota_m_u8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define viota_m_u8mf2(op0, op1) \ +__builtin_rvv_viota_m_u8mf2((vbool16_t)(op0), (size_t)(op1)) +#define viota_m_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define viota_m_u8mf4(op0, op1) \ +__builtin_rvv_viota_m_u8mf4((vbool32_t)(op0), (size_t)(op1)) +#define viota_m_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define viota_m_u8mf8(op0, op1) \ +__builtin_rvv_viota_m_u8mf8((vbool64_t)(op0), (size_t)(op1)) +#define viota_m_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define viota_m_u16m1(op0, op1) \ +__builtin_rvv_viota_m_u16m1((vbool16_t)(op0), (size_t)(op1)) +#define viota_m_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define viota_m_u16m2(op0, op1) \ +__builtin_rvv_viota_m_u16m2((vbool8_t)(op0), (size_t)(op1)) +#define viota_m_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define viota_m_u16m4(op0, op1) \ +__builtin_rvv_viota_m_u16m4((vbool4_t)(op0), (size_t)(op1)) +#define viota_m_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define viota_m_u16m8(op0, op1) \ +__builtin_rvv_viota_m_u16m8((vbool2_t)(op0), (size_t)(op1)) +#define viota_m_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define viota_m_u16mf2(op0, op1) \ +__builtin_rvv_viota_m_u16mf2((vbool32_t)(op0), (size_t)(op1)) +#define viota_m_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define viota_m_u16mf4(op0, op1) \ +__builtin_rvv_viota_m_u16mf4((vbool64_t)(op0), (size_t)(op1)) +#define viota_m_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define viota_m_u32m1(op0, op1) \ +__builtin_rvv_viota_m_u32m1((vbool32_t)(op0), (size_t)(op1)) +#define viota_m_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define viota_m_u32m2(op0, op1) \ +__builtin_rvv_viota_m_u32m2((vbool16_t)(op0), (size_t)(op1)) +#define viota_m_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define viota_m_u32m4(op0, op1) \ +__builtin_rvv_viota_m_u32m4((vbool8_t)(op0), (size_t)(op1)) +#define viota_m_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define viota_m_u32m8(op0, op1) \ +__builtin_rvv_viota_m_u32m8((vbool4_t)(op0), (size_t)(op1)) +#define viota_m_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define viota_m_u32mf2(op0, op1) \ +__builtin_rvv_viota_m_u32mf2((vbool64_t)(op0), (size_t)(op1)) +#define viota_m_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define viota_m_u64m1(op0, op1) \ +__builtin_rvv_viota_m_u64m1((vbool64_t)(op0), (size_t)(op1)) +#define viota_m_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define viota_m_u64m2(op0, op1) \ +__builtin_rvv_viota_m_u64m2((vbool32_t)(op0), (size_t)(op1)) +#define viota_m_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define viota_m_u64m4(op0, op1) \ +__builtin_rvv_viota_m_u64m4((vbool16_t)(op0), (size_t)(op1)) +#define viota_m_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define viota_m_u64m8(op0, op1) \ +__builtin_rvv_viota_m_u64m8((vbool8_t)(op0), (size_t)(op1)) +#define viota_m_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_viota_m_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vid_v_i8m1(op0) \ +__builtin_rvv_vid_v_i8m1((size_t)(op0)) +#define vid_v_i8m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (size_t)(op2)) +#define vid_v_i8m2(op0) \ +__builtin_rvv_vid_v_i8m2((size_t)(op0)) +#define vid_v_i8m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (size_t)(op2)) +#define vid_v_i8m4(op0) \ +__builtin_rvv_vid_v_i8m4((size_t)(op0)) +#define vid_v_i8m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (size_t)(op2)) +#define vid_v_i8m8(op0) \ +__builtin_rvv_vid_v_i8m8((size_t)(op0)) +#define vid_v_i8m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (size_t)(op2)) +#define vid_v_i8mf2(op0) \ +__builtin_rvv_vid_v_i8mf2((size_t)(op0)) +#define vid_v_i8mf2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (size_t)(op2)) +#define vid_v_i8mf4(op0) \ +__builtin_rvv_vid_v_i8mf4((size_t)(op0)) +#define vid_v_i8mf4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (size_t)(op2)) +#define vid_v_i8mf8(op0) \ +__builtin_rvv_vid_v_i8mf8((size_t)(op0)) +#define vid_v_i8mf8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (size_t)(op2)) +#define vid_v_i16m1(op0) \ +__builtin_rvv_vid_v_i16m1((size_t)(op0)) +#define vid_v_i16m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vid_v_i16m2(op0) \ +__builtin_rvv_vid_v_i16m2((size_t)(op0)) +#define vid_v_i16m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vid_v_i16m4(op0) \ +__builtin_rvv_vid_v_i16m4((size_t)(op0)) +#define vid_v_i16m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vid_v_i16m8(op0) \ +__builtin_rvv_vid_v_i16m8((size_t)(op0)) +#define vid_v_i16m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vid_v_i16mf2(op0) \ +__builtin_rvv_vid_v_i16mf2((size_t)(op0)) +#define vid_v_i16mf2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vid_v_i16mf4(op0) \ +__builtin_rvv_vid_v_i16mf4((size_t)(op0)) +#define vid_v_i16mf4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vid_v_i32m1(op0) \ +__builtin_rvv_vid_v_i32m1((size_t)(op0)) +#define vid_v_i32m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vid_v_i32m2(op0) \ +__builtin_rvv_vid_v_i32m2((size_t)(op0)) +#define vid_v_i32m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vid_v_i32m4(op0) \ +__builtin_rvv_vid_v_i32m4((size_t)(op0)) +#define vid_v_i32m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vid_v_i32m8(op0) \ +__builtin_rvv_vid_v_i32m8((size_t)(op0)) +#define vid_v_i32m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vid_v_i32mf2(op0) \ +__builtin_rvv_vid_v_i32mf2((size_t)(op0)) +#define vid_v_i32mf2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vid_v_i64m1(op0) \ +__builtin_rvv_vid_v_i64m1((size_t)(op0)) +#define vid_v_i64m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vid_v_i64m2(op0) \ +__builtin_rvv_vid_v_i64m2((size_t)(op0)) +#define vid_v_i64m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vid_v_i64m4(op0) \ +__builtin_rvv_vid_v_i64m4((size_t)(op0)) +#define vid_v_i64m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vid_v_i64m8(op0) \ +__builtin_rvv_vid_v_i64m8((size_t)(op0)) +#define vid_v_i64m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vid_v_u8m1(op0) \ +__builtin_rvv_vid_v_u8m1((size_t)(op0)) +#define vid_v_u8m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vid_v_u8m2(op0) \ +__builtin_rvv_vid_v_u8m2((size_t)(op0)) +#define vid_v_u8m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vid_v_u8m4(op0) \ +__builtin_rvv_vid_v_u8m4((size_t)(op0)) +#define vid_v_u8m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vid_v_u8m8(op0) \ +__builtin_rvv_vid_v_u8m8((size_t)(op0)) +#define vid_v_u8m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vid_v_u8mf2(op0) \ +__builtin_rvv_vid_v_u8mf2((size_t)(op0)) +#define vid_v_u8mf2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vid_v_u8mf4(op0) \ +__builtin_rvv_vid_v_u8mf4((size_t)(op0)) +#define vid_v_u8mf4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vid_v_u8mf8(op0) \ +__builtin_rvv_vid_v_u8mf8((size_t)(op0)) +#define vid_v_u8mf8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vid_v_u16m1(op0) \ +__builtin_rvv_vid_v_u16m1((size_t)(op0)) +#define vid_v_u16m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vid_v_u16m2(op0) \ +__builtin_rvv_vid_v_u16m2((size_t)(op0)) +#define vid_v_u16m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vid_v_u16m4(op0) \ +__builtin_rvv_vid_v_u16m4((size_t)(op0)) +#define vid_v_u16m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vid_v_u16m8(op0) \ +__builtin_rvv_vid_v_u16m8((size_t)(op0)) +#define vid_v_u16m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vid_v_u16mf2(op0) \ +__builtin_rvv_vid_v_u16mf2((size_t)(op0)) +#define vid_v_u16mf2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vid_v_u16mf4(op0) \ +__builtin_rvv_vid_v_u16mf4((size_t)(op0)) +#define vid_v_u16mf4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vid_v_u32m1(op0) \ +__builtin_rvv_vid_v_u32m1((size_t)(op0)) +#define vid_v_u32m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vid_v_u32m2(op0) \ +__builtin_rvv_vid_v_u32m2((size_t)(op0)) +#define vid_v_u32m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vid_v_u32m4(op0) \ +__builtin_rvv_vid_v_u32m4((size_t)(op0)) +#define vid_v_u32m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vid_v_u32m8(op0) \ +__builtin_rvv_vid_v_u32m8((size_t)(op0)) +#define vid_v_u32m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vid_v_u32mf2(op0) \ +__builtin_rvv_vid_v_u32mf2((size_t)(op0)) +#define vid_v_u32mf2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vid_v_u64m1(op0) \ +__builtin_rvv_vid_v_u64m1((size_t)(op0)) +#define vid_v_u64m1_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vid_v_u64m2(op0) \ +__builtin_rvv_vid_v_u64m2((size_t)(op0)) +#define vid_v_u64m2_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vid_v_u64m4(op0) \ +__builtin_rvv_vid_v_u64m4((size_t)(op0)) +#define vid_v_u64m4_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vid_v_u64m8(op0) \ +__builtin_rvv_vid_v_u64m8((size_t)(op0)) +#define vid_v_u64m8_m(op0, op1, op2) \ +__builtin_rvv_vid_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vmv_x_s_i8m1_i8(op0) \ +__builtin_rvv_vmv_x_s_i8m1_i8((vint8m1_t)(op0)) +#define vmv_x_s_i8m2_i8(op0) \ +__builtin_rvv_vmv_x_s_i8m2_i8((vint8m2_t)(op0)) +#define vmv_x_s_i8m4_i8(op0) \ +__builtin_rvv_vmv_x_s_i8m4_i8((vint8m4_t)(op0)) +#define vmv_x_s_i8m8_i8(op0) \ +__builtin_rvv_vmv_x_s_i8m8_i8((vint8m8_t)(op0)) +#define vmv_x_s_i8mf2_i8(op0) \ +__builtin_rvv_vmv_x_s_i8mf2_i8((vint8mf2_t)(op0)) +#define vmv_x_s_i8mf4_i8(op0) \ +__builtin_rvv_vmv_x_s_i8mf4_i8((vint8mf4_t)(op0)) +#define vmv_x_s_i8mf8_i8(op0) \ +__builtin_rvv_vmv_x_s_i8mf8_i8((vint8mf8_t)(op0)) +#define vmv_x_s_i16m1_i16(op0) \ +__builtin_rvv_vmv_x_s_i16m1_i16((vint16m1_t)(op0)) +#define vmv_x_s_i16m2_i16(op0) \ +__builtin_rvv_vmv_x_s_i16m2_i16((vint16m2_t)(op0)) +#define vmv_x_s_i16m4_i16(op0) \ +__builtin_rvv_vmv_x_s_i16m4_i16((vint16m4_t)(op0)) +#define vmv_x_s_i16m8_i16(op0) \ +__builtin_rvv_vmv_x_s_i16m8_i16((vint16m8_t)(op0)) +#define vmv_x_s_i16mf2_i16(op0) \ +__builtin_rvv_vmv_x_s_i16mf2_i16((vint16mf2_t)(op0)) +#define vmv_x_s_i16mf4_i16(op0) \ +__builtin_rvv_vmv_x_s_i16mf4_i16((vint16mf4_t)(op0)) +#define vmv_x_s_i32m1_i32(op0) \ +__builtin_rvv_vmv_x_s_i32m1_i32((vint32m1_t)(op0)) +#define vmv_x_s_i32m2_i32(op0) \ +__builtin_rvv_vmv_x_s_i32m2_i32((vint32m2_t)(op0)) +#define vmv_x_s_i32m4_i32(op0) \ +__builtin_rvv_vmv_x_s_i32m4_i32((vint32m4_t)(op0)) +#define vmv_x_s_i32m8_i32(op0) \ +__builtin_rvv_vmv_x_s_i32m8_i32((vint32m8_t)(op0)) +#define vmv_x_s_i32mf2_i32(op0) \ +__builtin_rvv_vmv_x_s_i32mf2_i32((vint32mf2_t)(op0)) +#define vmv_x_s_i64m1_i64(op0) \ +__builtin_rvv_vmv_x_s_i64m1_i64((vint64m1_t)(op0)) +#define vmv_x_s_i64m2_i64(op0) \ +__builtin_rvv_vmv_x_s_i64m2_i64((vint64m2_t)(op0)) +#define vmv_x_s_i64m4_i64(op0) \ +__builtin_rvv_vmv_x_s_i64m4_i64((vint64m4_t)(op0)) +#define vmv_x_s_i64m8_i64(op0) \ +__builtin_rvv_vmv_x_s_i64m8_i64((vint64m8_t)(op0)) +#define vmv_x_s_u8m1_u8(op0) \ +__builtin_rvv_vmv_x_s_u8m1_u8((vuint8m1_t)(op0)) +#define vmv_x_s_u8m2_u8(op0) \ +__builtin_rvv_vmv_x_s_u8m2_u8((vuint8m2_t)(op0)) +#define vmv_x_s_u8m4_u8(op0) \ +__builtin_rvv_vmv_x_s_u8m4_u8((vuint8m4_t)(op0)) +#define vmv_x_s_u8m8_u8(op0) \ +__builtin_rvv_vmv_x_s_u8m8_u8((vuint8m8_t)(op0)) +#define vmv_x_s_u8mf2_u8(op0) \ +__builtin_rvv_vmv_x_s_u8mf2_u8((vuint8mf2_t)(op0)) +#define vmv_x_s_u8mf4_u8(op0) \ +__builtin_rvv_vmv_x_s_u8mf4_u8((vuint8mf4_t)(op0)) +#define vmv_x_s_u8mf8_u8(op0) \ +__builtin_rvv_vmv_x_s_u8mf8_u8((vuint8mf8_t)(op0)) +#define vmv_x_s_u16m1_u16(op0) \ +__builtin_rvv_vmv_x_s_u16m1_u16((vuint16m1_t)(op0)) +#define vmv_x_s_u16m2_u16(op0) \ +__builtin_rvv_vmv_x_s_u16m2_u16((vuint16m2_t)(op0)) +#define vmv_x_s_u16m4_u16(op0) \ +__builtin_rvv_vmv_x_s_u16m4_u16((vuint16m4_t)(op0)) +#define vmv_x_s_u16m8_u16(op0) \ +__builtin_rvv_vmv_x_s_u16m8_u16((vuint16m8_t)(op0)) +#define vmv_x_s_u16mf2_u16(op0) \ +__builtin_rvv_vmv_x_s_u16mf2_u16((vuint16mf2_t)(op0)) +#define vmv_x_s_u16mf4_u16(op0) \ +__builtin_rvv_vmv_x_s_u16mf4_u16((vuint16mf4_t)(op0)) +#define vmv_x_s_u32m1_u32(op0) \ +__builtin_rvv_vmv_x_s_u32m1_u32((vuint32m1_t)(op0)) +#define vmv_x_s_u32m2_u32(op0) \ +__builtin_rvv_vmv_x_s_u32m2_u32((vuint32m2_t)(op0)) +#define vmv_x_s_u32m4_u32(op0) \ +__builtin_rvv_vmv_x_s_u32m4_u32((vuint32m4_t)(op0)) +#define vmv_x_s_u32m8_u32(op0) \ +__builtin_rvv_vmv_x_s_u32m8_u32((vuint32m8_t)(op0)) +#define vmv_x_s_u32mf2_u32(op0) \ +__builtin_rvv_vmv_x_s_u32mf2_u32((vuint32mf2_t)(op0)) +#define vmv_x_s_u64m1_u64(op0) \ +__builtin_rvv_vmv_x_s_u64m1_u64((vuint64m1_t)(op0)) +#define vmv_x_s_u64m2_u64(op0) \ +__builtin_rvv_vmv_x_s_u64m2_u64((vuint64m2_t)(op0)) +#define vmv_x_s_u64m4_u64(op0) \ +__builtin_rvv_vmv_x_s_u64m4_u64((vuint64m4_t)(op0)) +#define vmv_x_s_u64m8_u64(op0) \ +__builtin_rvv_vmv_x_s_u64m8_u64((vuint64m8_t)(op0)) +#define vmv_s_x_i8m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i8m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i8m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i8m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i8mf2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i8mf4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i8mf8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vmv_s_x_i16m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmv_s_x_i16m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmv_s_x_i16m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmv_s_x_i16m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmv_s_x_i16mf2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmv_s_x_i16mf4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vmv_s_x_i32m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmv_s_x_i32m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmv_s_x_i32m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmv_s_x_i32m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmv_s_x_i32mf2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vmv_s_x_i64m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmv_s_x_i64m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmv_s_x_i64m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmv_s_x_i64m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8mf2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8mf4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u8mf8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2)) +#define vmv_s_x_u16m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmv_s_x_u16m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmv_s_x_u16m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmv_s_x_u16m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmv_s_x_u16mf2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmv_s_x_u16mf4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vmv_s_x_u32m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmv_s_x_u32m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmv_s_x_u32m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmv_s_x_u32m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmv_s_x_u32mf2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vmv_s_x_u64m1(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmv_s_x_u64m2(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmv_s_x_u64m4(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vmv_s_x_u64m8(op0, op1, op2) \ +__builtin_rvv_vmv_s_x_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u16m1(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u16m2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u16m4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u16m8(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u16mf2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u16mf4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u32m1(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u32m2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u32m4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u32m8(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u32mf2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u64m1(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u64m2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u64m4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vfmv_s_x_u64m8(op0, op1, op2) \ +__builtin_rvv_vfmv_s_x_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2)) +#define vslideup_vx_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8m1((vuint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8m2((vuint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8m4((vuint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8m8((vuint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8mf2((vuint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8mf4((vuint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u8mf8((vuint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u16m1((vuint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u16m2((vuint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u16m4((vuint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u16m8((vuint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u16mf2((vuint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u16mf4((vuint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u32m1((vuint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u32m2((vuint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u32m4((vuint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u32m8((vuint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u32mf2((vuint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u64m1((vuint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u64m2((vuint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u64m4((vuint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1up_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vslide1up_vx_u64m8((vuint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1up_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1up_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8m1((vuint8m1_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8m2((vuint8m2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8m4((vuint8m4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8m8((vuint8m8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8mf2((vuint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8mf4((vuint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u8mf8((vuint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (int8_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u16m1((vuint16m1_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u16m2((vuint16m2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u16m4((vuint16m4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u16m8((vuint16m8_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u16mf2((vuint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u16mf4((vuint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (int16_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u32m1((vuint32m1_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u32m2((vuint32m2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u32m4((vuint32m4_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u32m8((vuint32m8_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u32mf2((vuint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (int32_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u64m1((vuint64m1_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u64m2((vuint64m2_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u64m4((vuint64m4_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vslide1down_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vslide1down_vx_u64m8((vuint64m8_t)(op0), (int64_t)(op1), (size_t)(op2)) +#define vslide1down_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslide1down_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (int64_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8m1((vint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8m2((vint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8m4((vint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8m8((vint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8mf2((vint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8mf4((vint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vrgather_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i8mf8((vint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vrgather_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vrgather_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i16m1((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i16m2((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i16m4((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i16m8((vint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i16mf2((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i16mf4((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgather_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgather_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i32m1((vint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i32m2((vint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i32m4((vint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i32m8((vint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i32mf2((vint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i64m1((vint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i64m2((vint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i64m4((vint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_i64m8((vint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8m1((vint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8m2((vint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8m4((vint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8m8((vint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8mf2((vint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8mf4((vint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i8mf8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i8mf8((vint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i16m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i16m1((vint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i16m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i16m2((vint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i16m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i16m4((vint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i16m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i16m8((vint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i16mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i16mf2((vint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i16mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i16mf4((vint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i32m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i32m1((vint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i32m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i32m2((vint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i32m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i32m4((vint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i32m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i32m8((vint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i32mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i32mf2((vint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i64m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i64m1((vint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i64m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i64m2((vint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i64m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i64m4((vint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_i64m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_i64m8((vint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i8m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i8m1((vint8m1_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i8m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i8m2((vint8m2_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i8m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i8m4((vint8m4_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i8mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i8mf2((vint8mf2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i8mf4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i8mf4((vint8mf4_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i8mf8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i8mf8((vint8mf8_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i16m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i16m1((vint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i16m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i16m2((vint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i16m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i16m4((vint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i16m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i16m8((vint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i16mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i16mf2((vint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i16mf4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i16mf4((vint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i32m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i32m1((vint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i32m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i32m2((vint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i32m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i32m4((vint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i32m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i32m8((vint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i32mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i32mf2((vint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i64m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i64m1((vint64m1_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i64m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i64m2((vint64m2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i64m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i64m4((vint64m4_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_i64m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_i64m8((vint64m8_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vrgather_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vrgather_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vrgather_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgather_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgather_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8m1((vuint8m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8m2((vuint8m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8m4((vuint8m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8m8((vuint8m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8mf2((vuint8mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8mf4((vuint8mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u8mf8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u8mf8((vuint8mf8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u16m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u16m1((vuint16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u16m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u16m2((vuint16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u16m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u16m4((vuint16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u16m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u16m8((vuint16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u16mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u16mf2((vuint16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u16mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u16mf4((vuint16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u32m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u32m1((vuint32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u32m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u32m2((vuint32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u32m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u32m4((vuint32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u32m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u32m8((vuint32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u32mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u32mf2((vuint32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u64m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u64m1((vuint64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u64m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u64m2((vuint64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u64m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u64m4((vuint64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_u64m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_u64m8((vuint64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u8m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u8m1((vuint8m1_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u8m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u8m2((vuint8m2_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u8m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u8m4((vuint8m4_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u8mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u8mf2((vuint8mf2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u8mf4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u8mf4((vuint8mf4_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u8mf8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u8mf8((vuint8mf8_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u16m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u16m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u16m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u16m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u16mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u16mf4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u32m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u32m1((vuint32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u32m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u32m2((vuint32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u32m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u32m4((vuint32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u32m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u32m8((vuint32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u32mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u32mf2((vuint32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u64m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u64m1((vuint64m1_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u64m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u64m2((vuint64m2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u64m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u64m4((vuint64m4_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_u64m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_u64m8((vuint64m8_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vcompress_vm_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8m1((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8m2((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8m4((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8m8((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8mf2((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8mf4((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vcompress_vm_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i8mf8((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vcompress_vm_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i16m1((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i16m2((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i16m4((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i16m8((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i16mf2((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i16mf4((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vcompress_vm_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i32m1((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i32m2((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i32m4((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i32m8((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i32mf2((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i64m1((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i64m2((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i64m4((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_i64m8((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8m1((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8m2((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8m4((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8m8((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8mf2((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8mf4((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vcompress_vm_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u8mf8((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vcompress_vm_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u16m1((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u16m2((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u16m4((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u16m8((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u16mf2((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u16mf4((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vcompress_vm_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u32m1((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u32m2((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u32m4((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u32m8((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u32mf2((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u64m1((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u64m2((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u64m4((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_u64m8((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vget_v_i8m2_i8m1(op0, op1) \ +__builtin_rvv_vget_v_i8m2_i8m1((vint8m2_t)(op0), (size_t)(op1)) +#define vget_v_i8m4_i8m1(op0, op1) \ +__builtin_rvv_vget_v_i8m4_i8m1((vint8m4_t)(op0), (size_t)(op1)) +#define vget_v_i8m8_i8m1(op0, op1) \ +__builtin_rvv_vget_v_i8m8_i8m1((vint8m8_t)(op0), (size_t)(op1)) +#define vget_v_i16m2_i16m1(op0, op1) \ +__builtin_rvv_vget_v_i16m2_i16m1((vint16m2_t)(op0), (size_t)(op1)) +#define vget_v_i16m4_i16m1(op0, op1) \ +__builtin_rvv_vget_v_i16m4_i16m1((vint16m4_t)(op0), (size_t)(op1)) +#define vget_v_i16m8_i16m1(op0, op1) \ +__builtin_rvv_vget_v_i16m8_i16m1((vint16m8_t)(op0), (size_t)(op1)) +#define vget_v_i32m2_i32m1(op0, op1) \ +__builtin_rvv_vget_v_i32m2_i32m1((vint32m2_t)(op0), (size_t)(op1)) +#define vget_v_i32m4_i32m1(op0, op1) \ +__builtin_rvv_vget_v_i32m4_i32m1((vint32m4_t)(op0), (size_t)(op1)) +#define vget_v_i32m8_i32m1(op0, op1) \ +__builtin_rvv_vget_v_i32m8_i32m1((vint32m8_t)(op0), (size_t)(op1)) +#define vget_v_i64m2_i64m1(op0, op1) \ +__builtin_rvv_vget_v_i64m2_i64m1((vint64m2_t)(op0), (size_t)(op1)) +#define vget_v_i64m4_i64m1(op0, op1) \ +__builtin_rvv_vget_v_i64m4_i64m1((vint64m4_t)(op0), (size_t)(op1)) +#define vget_v_i64m8_i64m1(op0, op1) \ +__builtin_rvv_vget_v_i64m8_i64m1((vint64m8_t)(op0), (size_t)(op1)) +#define vget_v_u8m2_u8m1(op0, op1) \ +__builtin_rvv_vget_v_u8m2_u8m1((vuint8m2_t)(op0), (size_t)(op1)) +#define vget_v_u8m4_u8m1(op0, op1) \ +__builtin_rvv_vget_v_u8m4_u8m1((vuint8m4_t)(op0), (size_t)(op1)) +#define vget_v_u8m8_u8m1(op0, op1) \ +__builtin_rvv_vget_v_u8m8_u8m1((vuint8m8_t)(op0), (size_t)(op1)) +#define vget_v_u16m2_u16m1(op0, op1) \ +__builtin_rvv_vget_v_u16m2_u16m1((vuint16m2_t)(op0), (size_t)(op1)) +#define vget_v_u16m4_u16m1(op0, op1) \ +__builtin_rvv_vget_v_u16m4_u16m1((vuint16m4_t)(op0), (size_t)(op1)) +#define vget_v_u16m8_u16m1(op0, op1) \ +__builtin_rvv_vget_v_u16m8_u16m1((vuint16m8_t)(op0), (size_t)(op1)) +#define vget_v_u32m2_u32m1(op0, op1) \ +__builtin_rvv_vget_v_u32m2_u32m1((vuint32m2_t)(op0), (size_t)(op1)) +#define vget_v_u32m4_u32m1(op0, op1) \ +__builtin_rvv_vget_v_u32m4_u32m1((vuint32m4_t)(op0), (size_t)(op1)) +#define vget_v_u32m8_u32m1(op0, op1) \ +__builtin_rvv_vget_v_u32m8_u32m1((vuint32m8_t)(op0), (size_t)(op1)) +#define vget_v_u64m2_u64m1(op0, op1) \ +__builtin_rvv_vget_v_u64m2_u64m1((vuint64m2_t)(op0), (size_t)(op1)) +#define vget_v_u64m4_u64m1(op0, op1) \ +__builtin_rvv_vget_v_u64m4_u64m1((vuint64m4_t)(op0), (size_t)(op1)) +#define vget_v_u64m8_u64m1(op0, op1) \ +__builtin_rvv_vget_v_u64m8_u64m1((vuint64m8_t)(op0), (size_t)(op1)) +#define vget_v_i8m4_i8m2(op0, op1) \ +__builtin_rvv_vget_v_i8m4_i8m2((vint8m4_t)(op0), (size_t)(op1)) +#define vget_v_i8m8_i8m2(op0, op1) \ +__builtin_rvv_vget_v_i8m8_i8m2((vint8m8_t)(op0), (size_t)(op1)) +#define vget_v_i16m4_i16m2(op0, op1) \ +__builtin_rvv_vget_v_i16m4_i16m2((vint16m4_t)(op0), (size_t)(op1)) +#define vget_v_i16m8_i16m2(op0, op1) \ +__builtin_rvv_vget_v_i16m8_i16m2((vint16m8_t)(op0), (size_t)(op1)) +#define vget_v_i32m4_i32m2(op0, op1) \ +__builtin_rvv_vget_v_i32m4_i32m2((vint32m4_t)(op0), (size_t)(op1)) +#define vget_v_i32m8_i32m2(op0, op1) \ +__builtin_rvv_vget_v_i32m8_i32m2((vint32m8_t)(op0), (size_t)(op1)) +#define vget_v_i64m4_i64m2(op0, op1) \ +__builtin_rvv_vget_v_i64m4_i64m2((vint64m4_t)(op0), (size_t)(op1)) +#define vget_v_i64m8_i64m2(op0, op1) \ +__builtin_rvv_vget_v_i64m8_i64m2((vint64m8_t)(op0), (size_t)(op1)) +#define vget_v_u8m4_u8m2(op0, op1) \ +__builtin_rvv_vget_v_u8m4_u8m2((vuint8m4_t)(op0), (size_t)(op1)) +#define vget_v_u8m8_u8m2(op0, op1) \ +__builtin_rvv_vget_v_u8m8_u8m2((vuint8m8_t)(op0), (size_t)(op1)) +#define vget_v_u16m4_u16m2(op0, op1) \ +__builtin_rvv_vget_v_u16m4_u16m2((vuint16m4_t)(op0), (size_t)(op1)) +#define vget_v_u16m8_u16m2(op0, op1) \ +__builtin_rvv_vget_v_u16m8_u16m2((vuint16m8_t)(op0), (size_t)(op1)) +#define vget_v_u32m4_u32m2(op0, op1) \ +__builtin_rvv_vget_v_u32m4_u32m2((vuint32m4_t)(op0), (size_t)(op1)) +#define vget_v_u32m8_u32m2(op0, op1) \ +__builtin_rvv_vget_v_u32m8_u32m2((vuint32m8_t)(op0), (size_t)(op1)) +#define vget_v_u64m4_u64m2(op0, op1) \ +__builtin_rvv_vget_v_u64m4_u64m2((vuint64m4_t)(op0), (size_t)(op1)) +#define vget_v_u64m8_u64m2(op0, op1) \ +__builtin_rvv_vget_v_u64m8_u64m2((vuint64m8_t)(op0), (size_t)(op1)) +#define vget_v_i8m8_i8m4(op0, op1) \ +__builtin_rvv_vget_v_i8m8_i8m4((vint8m8_t)(op0), (size_t)(op1)) +#define vget_v_i16m8_i16m4(op0, op1) \ +__builtin_rvv_vget_v_i16m8_i16m4((vint16m8_t)(op0), (size_t)(op1)) +#define vget_v_i32m8_i32m4(op0, op1) \ +__builtin_rvv_vget_v_i32m8_i32m4((vint32m8_t)(op0), (size_t)(op1)) +#define vget_v_i64m8_i64m4(op0, op1) \ +__builtin_rvv_vget_v_i64m8_i64m4((vint64m8_t)(op0), (size_t)(op1)) +#define vget_v_u8m8_u8m4(op0, op1) \ +__builtin_rvv_vget_v_u8m8_u8m4((vuint8m8_t)(op0), (size_t)(op1)) +#define vget_v_u16m8_u16m4(op0, op1) \ +__builtin_rvv_vget_v_u16m8_u16m4((vuint16m8_t)(op0), (size_t)(op1)) +#define vget_v_u32m8_u32m4(op0, op1) \ +__builtin_rvv_vget_v_u32m8_u32m4((vuint32m8_t)(op0), (size_t)(op1)) +#define vget_v_u64m8_u64m4(op0, op1) \ +__builtin_rvv_vget_v_u64m8_u64m4((vuint64m8_t)(op0), (size_t)(op1)) +#define vset_v_i8m1_i8m2(op0, op1, op2) \ +__builtin_rvv_vset_v_i8m1_i8m2((vint8m2_t)(op0), (size_t)(op1), (vint8m1_t)(op2)) +#define vset_v_i16m1_i16m2(op0, op1, op2) \ +__builtin_rvv_vset_v_i16m1_i16m2((vint16m2_t)(op0), (size_t)(op1), (vint16m1_t)(op2)) +#define vset_v_i32m1_i32m2(op0, op1, op2) \ +__builtin_rvv_vset_v_i32m1_i32m2((vint32m2_t)(op0), (size_t)(op1), (vint32m1_t)(op2)) +#define vset_v_i64m1_i64m2(op0, op1, op2) \ +__builtin_rvv_vset_v_i64m1_i64m2((vint64m2_t)(op0), (size_t)(op1), (vint64m1_t)(op2)) +#define vset_v_u8m1_u8m2(op0, op1, op2) \ +__builtin_rvv_vset_v_u8m1_u8m2((vuint8m2_t)(op0), (size_t)(op1), (vuint8m1_t)(op2)) +#define vset_v_u16m1_u16m2(op0, op1, op2) \ +__builtin_rvv_vset_v_u16m1_u16m2((vuint16m2_t)(op0), (size_t)(op1), (vuint16m1_t)(op2)) +#define vset_v_u32m1_u32m2(op0, op1, op2) \ +__builtin_rvv_vset_v_u32m1_u32m2((vuint32m2_t)(op0), (size_t)(op1), (vuint32m1_t)(op2)) +#define vset_v_u64m1_u64m2(op0, op1, op2) \ +__builtin_rvv_vset_v_u64m1_u64m2((vuint64m2_t)(op0), (size_t)(op1), (vuint64m1_t)(op2)) +#define vset_v_i8m1_i8m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i8m1_i8m4((vint8m4_t)(op0), (size_t)(op1), (vint8m1_t)(op2)) +#define vset_v_i8m2_i8m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i8m2_i8m4((vint8m4_t)(op0), (size_t)(op1), (vint8m2_t)(op2)) +#define vset_v_i16m1_i16m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i16m1_i16m4((vint16m4_t)(op0), (size_t)(op1), (vint16m1_t)(op2)) +#define vset_v_i16m2_i16m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i16m2_i16m4((vint16m4_t)(op0), (size_t)(op1), (vint16m2_t)(op2)) +#define vset_v_i32m1_i32m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i32m1_i32m4((vint32m4_t)(op0), (size_t)(op1), (vint32m1_t)(op2)) +#define vset_v_i32m2_i32m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i32m2_i32m4((vint32m4_t)(op0), (size_t)(op1), (vint32m2_t)(op2)) +#define vset_v_i64m1_i64m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i64m1_i64m4((vint64m4_t)(op0), (size_t)(op1), (vint64m1_t)(op2)) +#define vset_v_i64m2_i64m4(op0, op1, op2) \ +__builtin_rvv_vset_v_i64m2_i64m4((vint64m4_t)(op0), (size_t)(op1), (vint64m2_t)(op2)) +#define vset_v_u8m1_u8m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u8m1_u8m4((vuint8m4_t)(op0), (size_t)(op1), (vuint8m1_t)(op2)) +#define vset_v_u8m2_u8m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u8m2_u8m4((vuint8m4_t)(op0), (size_t)(op1), (vuint8m2_t)(op2)) +#define vset_v_u16m1_u16m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u16m1_u16m4((vuint16m4_t)(op0), (size_t)(op1), (vuint16m1_t)(op2)) +#define vset_v_u16m2_u16m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u16m2_u16m4((vuint16m4_t)(op0), (size_t)(op1), (vuint16m2_t)(op2)) +#define vset_v_u32m1_u32m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u32m1_u32m4((vuint32m4_t)(op0), (size_t)(op1), (vuint32m1_t)(op2)) +#define vset_v_u32m2_u32m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u32m2_u32m4((vuint32m4_t)(op0), (size_t)(op1), (vuint32m2_t)(op2)) +#define vset_v_u64m1_u64m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u64m1_u64m4((vuint64m4_t)(op0), (size_t)(op1), (vuint64m1_t)(op2)) +#define vset_v_u64m2_u64m4(op0, op1, op2) \ +__builtin_rvv_vset_v_u64m2_u64m4((vuint64m4_t)(op0), (size_t)(op1), (vuint64m2_t)(op2)) +#define vset_v_i8m1_i8m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i8m1_i8m8((vint8m8_t)(op0), (size_t)(op1), (vint8m1_t)(op2)) +#define vset_v_i8m2_i8m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i8m2_i8m8((vint8m8_t)(op0), (size_t)(op1), (vint8m2_t)(op2)) +#define vset_v_i8m4_i8m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i8m4_i8m8((vint8m8_t)(op0), (size_t)(op1), (vint8m4_t)(op2)) +#define vset_v_i16m1_i16m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i16m1_i16m8((vint16m8_t)(op0), (size_t)(op1), (vint16m1_t)(op2)) +#define vset_v_i16m2_i16m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i16m2_i16m8((vint16m8_t)(op0), (size_t)(op1), (vint16m2_t)(op2)) +#define vset_v_i16m4_i16m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i16m4_i16m8((vint16m8_t)(op0), (size_t)(op1), (vint16m4_t)(op2)) +#define vset_v_i32m1_i32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i32m1_i32m8((vint32m8_t)(op0), (size_t)(op1), (vint32m1_t)(op2)) +#define vset_v_i32m2_i32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i32m2_i32m8((vint32m8_t)(op0), (size_t)(op1), (vint32m2_t)(op2)) +#define vset_v_i32m4_i32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i32m4_i32m8((vint32m8_t)(op0), (size_t)(op1), (vint32m4_t)(op2)) +#define vset_v_i64m1_i64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i64m1_i64m8((vint64m8_t)(op0), (size_t)(op1), (vint64m1_t)(op2)) +#define vset_v_i64m2_i64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i64m2_i64m8((vint64m8_t)(op0), (size_t)(op1), (vint64m2_t)(op2)) +#define vset_v_i64m4_i64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_i64m4_i64m8((vint64m8_t)(op0), (size_t)(op1), (vint64m4_t)(op2)) +#define vset_v_u8m1_u8m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u8m1_u8m8((vuint8m8_t)(op0), (size_t)(op1), (vuint8m1_t)(op2)) +#define vset_v_u8m2_u8m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u8m2_u8m8((vuint8m8_t)(op0), (size_t)(op1), (vuint8m2_t)(op2)) +#define vset_v_u8m4_u8m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u8m4_u8m8((vuint8m8_t)(op0), (size_t)(op1), (vuint8m4_t)(op2)) +#define vset_v_u16m1_u16m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u16m1_u16m8((vuint16m8_t)(op0), (size_t)(op1), (vuint16m1_t)(op2)) +#define vset_v_u16m2_u16m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u16m2_u16m8((vuint16m8_t)(op0), (size_t)(op1), (vuint16m2_t)(op2)) +#define vset_v_u16m4_u16m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u16m4_u16m8((vuint16m8_t)(op0), (size_t)(op1), (vuint16m4_t)(op2)) +#define vset_v_u32m1_u32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u32m1_u32m8((vuint32m8_t)(op0), (size_t)(op1), (vuint32m1_t)(op2)) +#define vset_v_u32m2_u32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u32m2_u32m8((vuint32m8_t)(op0), (size_t)(op1), (vuint32m2_t)(op2)) +#define vset_v_u32m4_u32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u32m4_u32m8((vuint32m8_t)(op0), (size_t)(op1), (vuint32m4_t)(op2)) +#define vset_v_u64m1_u64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u64m1_u64m8((vuint64m8_t)(op0), (size_t)(op1), (vuint64m1_t)(op2)) +#define vset_v_u64m2_u64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u64m2_u64m8((vuint64m8_t)(op0), (size_t)(op1), (vuint64m2_t)(op2)) +#define vset_v_u64m4_u64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_u64m4_u64m8((vuint64m8_t)(op0), (size_t)(op1), (vuint64m4_t)(op2)) +#define vsoxei8_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8m4((int8_t *)(op0), (vuint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8m8((int8_t *)(op0), (vuint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8m8_m((vbool1_t)(op0), (int8_t *)(op1), (vuint8m8_t)(op2), (vint8m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8m4((uint8_t *)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8m8((uint8_t *)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8m8_m((vbool1_t)(op0), (uint8_t *)(op1), (vuint8m8_t)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i8m4((int8_t *)(op0), (vuint16m8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint16m8_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u8m4((uint8_t *)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i16m4((int16_t *)(op0), (vuint8m2_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint8m2_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i16m8((int16_t *)(op0), (vuint8m4_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i16m8_m((vbool2_t)(op0), (int16_t *)(op1), (vuint8m4_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u16m4((uint16_t *)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint8m2_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u16m8((uint16_t *)(op0), (vuint8m4_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u16m8_m((vbool2_t)(op0), (uint16_t *)(op1), (vuint8m4_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i16m4((int16_t *)(op0), (vuint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i16m8((int16_t *)(op0), (vuint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i16m8_m((vbool2_t)(op0), (int16_t *)(op1), (vuint16m8_t)(op2), (vint16m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u16m4((uint16_t *)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u16m8((uint16_t *)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u16m8_m((vbool2_t)(op0), (uint16_t *)(op1), (vuint16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i16m4((int16_t *)(op0), (vuint32m8_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint32m8_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u16m4((uint16_t *)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vsoxei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vsoxei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vle16ff_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_i16m1((const int16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_i16m2((const int16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_i16m4((const int16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_i16m8((const int16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_i16mf2((const int16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_i16mf4((const int16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_u16m1((const uint16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_u16m2((const uint16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_u16m4((const uint16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_u16m8((const uint16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_u16mf2((const uint16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_u16mf4((const uint16_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_i32m1((const int32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_i32m2((const int32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_i32m4((const int32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_i32m8((const int32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_i32mf2((const int32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_u32m1((const uint32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_u32m2((const uint32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_u32m4((const uint32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_u32m8((const uint32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_u32mf2((const uint32_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_i64m1((const int64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_i64m2((const int64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_i64m4((const int64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_i64m8((const int64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_u64m1((const uint64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_u64m2((const uint64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_u64m4((const uint64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_u64m8((const uint64_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8_v_i8m1(op0, op1) \ +__builtin_rvv_vle8_v_i8m1((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_i8m2(op0, op1) \ +__builtin_rvv_vle8_v_i8m2((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_i8m4(op0, op1) \ +__builtin_rvv_vle8_v_i8m4((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_i8m8(op0, op1) \ +__builtin_rvv_vle8_v_i8m8((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_i8mf2(op0, op1) \ +__builtin_rvv_vle8_v_i8mf2((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_i8mf4(op0, op1) \ +__builtin_rvv_vle8_v_i8mf4((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_i8mf8(op0, op1) \ +__builtin_rvv_vle8_v_i8mf8((const int8_t *)(op0), (size_t)(op1)) +#define vle8_v_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8m1(op0, op1) \ +__builtin_rvv_vle8_v_u8m1((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8m2(op0, op1) \ +__builtin_rvv_vle8_v_u8m2((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8m4(op0, op1) \ +__builtin_rvv_vle8_v_u8m4((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8m8(op0, op1) \ +__builtin_rvv_vle8_v_u8m8((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8mf2(op0, op1) \ +__builtin_rvv_vle8_v_u8mf2((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8mf4(op0, op1) \ +__builtin_rvv_vle8_v_u8mf4((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8_v_u8mf8(op0, op1) \ +__builtin_rvv_vle8_v_u8mf8((const uint8_t *)(op0), (size_t)(op1)) +#define vle8_v_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle8_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vle8ff_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8m1((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8m2((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8m4((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_i8m8(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8m8((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8mf2((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8mf4((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_i8mf8((const int8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8m1((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8m2((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8m4((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8m8(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8m8((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8mf2((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8mf4((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle8ff_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vle8ff_v_u8mf8((const uint8_t *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle8ff_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle8ff_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vneg_v_i8m1(op0, op1) \ +__builtin_rvv_vneg_v_i8m1((vint8m1_t)(op0), (size_t)(op1)) +#define vneg_v_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vneg_v_i8m2(op0, op1) \ +__builtin_rvv_vneg_v_i8m2((vint8m2_t)(op0), (size_t)(op1)) +#define vneg_v_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vneg_v_i8m4(op0, op1) \ +__builtin_rvv_vneg_v_i8m4((vint8m4_t)(op0), (size_t)(op1)) +#define vneg_v_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vneg_v_i8m8(op0, op1) \ +__builtin_rvv_vneg_v_i8m8((vint8m8_t)(op0), (size_t)(op1)) +#define vneg_v_i8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vneg_v_i8mf2(op0, op1) \ +__builtin_rvv_vneg_v_i8mf2((vint8mf2_t)(op0), (size_t)(op1)) +#define vneg_v_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vneg_v_i8mf4(op0, op1) \ +__builtin_rvv_vneg_v_i8mf4((vint8mf4_t)(op0), (size_t)(op1)) +#define vneg_v_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vneg_v_i8mf8(op0, op1) \ +__builtin_rvv_vneg_v_i8mf8((vint8mf8_t)(op0), (size_t)(op1)) +#define vneg_v_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vneg_v_i16m1(op0, op1) \ +__builtin_rvv_vneg_v_i16m1((vint16m1_t)(op0), (size_t)(op1)) +#define vneg_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vneg_v_i16m2(op0, op1) \ +__builtin_rvv_vneg_v_i16m2((vint16m2_t)(op0), (size_t)(op1)) +#define vneg_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vneg_v_i16m4(op0, op1) \ +__builtin_rvv_vneg_v_i16m4((vint16m4_t)(op0), (size_t)(op1)) +#define vneg_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vneg_v_i16m8(op0, op1) \ +__builtin_rvv_vneg_v_i16m8((vint16m8_t)(op0), (size_t)(op1)) +#define vneg_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vneg_v_i16mf2(op0, op1) \ +__builtin_rvv_vneg_v_i16mf2((vint16mf2_t)(op0), (size_t)(op1)) +#define vneg_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vneg_v_i16mf4(op0, op1) \ +__builtin_rvv_vneg_v_i16mf4((vint16mf4_t)(op0), (size_t)(op1)) +#define vneg_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vneg_v_i32m1(op0, op1) \ +__builtin_rvv_vneg_v_i32m1((vint32m1_t)(op0), (size_t)(op1)) +#define vneg_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vneg_v_i32m2(op0, op1) \ +__builtin_rvv_vneg_v_i32m2((vint32m2_t)(op0), (size_t)(op1)) +#define vneg_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vneg_v_i32m4(op0, op1) \ +__builtin_rvv_vneg_v_i32m4((vint32m4_t)(op0), (size_t)(op1)) +#define vneg_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vneg_v_i32m8(op0, op1) \ +__builtin_rvv_vneg_v_i32m8((vint32m8_t)(op0), (size_t)(op1)) +#define vneg_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vneg_v_i32mf2(op0, op1) \ +__builtin_rvv_vneg_v_i32mf2((vint32mf2_t)(op0), (size_t)(op1)) +#define vneg_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vneg_v_i64m1(op0, op1) \ +__builtin_rvv_vneg_v_i64m1((vint64m1_t)(op0), (size_t)(op1)) +#define vneg_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vneg_v_i64m2(op0, op1) \ +__builtin_rvv_vneg_v_i64m2((vint64m2_t)(op0), (size_t)(op1)) +#define vneg_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vneg_v_i64m4(op0, op1) \ +__builtin_rvv_vneg_v_i64m4((vint64m4_t)(op0), (size_t)(op1)) +#define vneg_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vneg_v_i64m8(op0, op1) \ +__builtin_rvv_vneg_v_i64m8((vint64m8_t)(op0), (size_t)(op1)) +#define vneg_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vneg_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vnot_v_i8m1(op0, op1) \ +__builtin_rvv_vnot_v_i8m1((vint8m1_t)(op0), (size_t)(op1)) +#define vnot_v_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vnot_v_i8m2(op0, op1) \ +__builtin_rvv_vnot_v_i8m2((vint8m2_t)(op0), (size_t)(op1)) +#define vnot_v_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vnot_v_i8m4(op0, op1) \ +__builtin_rvv_vnot_v_i8m4((vint8m4_t)(op0), (size_t)(op1)) +#define vnot_v_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vnot_v_i8m8(op0, op1) \ +__builtin_rvv_vnot_v_i8m8((vint8m8_t)(op0), (size_t)(op1)) +#define vnot_v_i8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (size_t)(op3)) +#define vnot_v_i8mf2(op0, op1) \ +__builtin_rvv_vnot_v_i8mf2((vint8mf2_t)(op0), (size_t)(op1)) +#define vnot_v_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vnot_v_i8mf4(op0, op1) \ +__builtin_rvv_vnot_v_i8mf4((vint8mf4_t)(op0), (size_t)(op1)) +#define vnot_v_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vnot_v_i8mf8(op0, op1) \ +__builtin_rvv_vnot_v_i8mf8((vint8mf8_t)(op0), (size_t)(op1)) +#define vnot_v_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vnot_v_i16m1(op0, op1) \ +__builtin_rvv_vnot_v_i16m1((vint16m1_t)(op0), (size_t)(op1)) +#define vnot_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vnot_v_i16m2(op0, op1) \ +__builtin_rvv_vnot_v_i16m2((vint16m2_t)(op0), (size_t)(op1)) +#define vnot_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vnot_v_i16m4(op0, op1) \ +__builtin_rvv_vnot_v_i16m4((vint16m4_t)(op0), (size_t)(op1)) +#define vnot_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vnot_v_i16m8(op0, op1) \ +__builtin_rvv_vnot_v_i16m8((vint16m8_t)(op0), (size_t)(op1)) +#define vnot_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vnot_v_i16mf2(op0, op1) \ +__builtin_rvv_vnot_v_i16mf2((vint16mf2_t)(op0), (size_t)(op1)) +#define vnot_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vnot_v_i16mf4(op0, op1) \ +__builtin_rvv_vnot_v_i16mf4((vint16mf4_t)(op0), (size_t)(op1)) +#define vnot_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vnot_v_i32m1(op0, op1) \ +__builtin_rvv_vnot_v_i32m1((vint32m1_t)(op0), (size_t)(op1)) +#define vnot_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vnot_v_i32m2(op0, op1) \ +__builtin_rvv_vnot_v_i32m2((vint32m2_t)(op0), (size_t)(op1)) +#define vnot_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vnot_v_i32m4(op0, op1) \ +__builtin_rvv_vnot_v_i32m4((vint32m4_t)(op0), (size_t)(op1)) +#define vnot_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vnot_v_i32m8(op0, op1) \ +__builtin_rvv_vnot_v_i32m8((vint32m8_t)(op0), (size_t)(op1)) +#define vnot_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vnot_v_i32mf2(op0, op1) \ +__builtin_rvv_vnot_v_i32mf2((vint32mf2_t)(op0), (size_t)(op1)) +#define vnot_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vnot_v_i64m1(op0, op1) \ +__builtin_rvv_vnot_v_i64m1((vint64m1_t)(op0), (size_t)(op1)) +#define vnot_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vnot_v_i64m2(op0, op1) \ +__builtin_rvv_vnot_v_i64m2((vint64m2_t)(op0), (size_t)(op1)) +#define vnot_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vnot_v_i64m4(op0, op1) \ +__builtin_rvv_vnot_v_i64m4((vint64m4_t)(op0), (size_t)(op1)) +#define vnot_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vnot_v_i64m8(op0, op1) \ +__builtin_rvv_vnot_v_i64m8((vint64m8_t)(op0), (size_t)(op1)) +#define vnot_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vlse8_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8m1((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8m2((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8m4((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_i8m8(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8m8((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8mf2((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8mf4((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vlse8_v_i8mf8((const int8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vnot_v_u8m1(op0, op1) \ +__builtin_rvv_vnot_v_u8m1((vuint8m1_t)(op0), (size_t)(op1)) +#define vnot_v_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vnot_v_u8m2(op0, op1) \ +__builtin_rvv_vnot_v_u8m2((vuint8m2_t)(op0), (size_t)(op1)) +#define vnot_v_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vnot_v_u8m4(op0, op1) \ +__builtin_rvv_vnot_v_u8m4((vuint8m4_t)(op0), (size_t)(op1)) +#define vnot_v_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vnot_v_u8m8(op0, op1) \ +__builtin_rvv_vnot_v_u8m8((vuint8m8_t)(op0), (size_t)(op1)) +#define vnot_v_u8m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (size_t)(op3)) +#define vnot_v_u8mf2(op0, op1) \ +__builtin_rvv_vnot_v_u8mf2((vuint8mf2_t)(op0), (size_t)(op1)) +#define vnot_v_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vnot_v_u8mf4(op0, op1) \ +__builtin_rvv_vnot_v_u8mf4((vuint8mf4_t)(op0), (size_t)(op1)) +#define vnot_v_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vnot_v_u8mf8(op0, op1) \ +__builtin_rvv_vnot_v_u8mf8((vuint8mf8_t)(op0), (size_t)(op1)) +#define vnot_v_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vnot_v_u16m1(op0, op1) \ +__builtin_rvv_vnot_v_u16m1((vuint16m1_t)(op0), (size_t)(op1)) +#define vnot_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vnot_v_u16m2(op0, op1) \ +__builtin_rvv_vnot_v_u16m2((vuint16m2_t)(op0), (size_t)(op1)) +#define vnot_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vnot_v_u16m4(op0, op1) \ +__builtin_rvv_vnot_v_u16m4((vuint16m4_t)(op0), (size_t)(op1)) +#define vnot_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vnot_v_u16m8(op0, op1) \ +__builtin_rvv_vnot_v_u16m8((vuint16m8_t)(op0), (size_t)(op1)) +#define vnot_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vnot_v_u16mf2(op0, op1) \ +__builtin_rvv_vnot_v_u16mf2((vuint16mf2_t)(op0), (size_t)(op1)) +#define vnot_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vnot_v_u16mf4(op0, op1) \ +__builtin_rvv_vnot_v_u16mf4((vuint16mf4_t)(op0), (size_t)(op1)) +#define vnot_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vnot_v_u32m1(op0, op1) \ +__builtin_rvv_vnot_v_u32m1((vuint32m1_t)(op0), (size_t)(op1)) +#define vnot_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vnot_v_u32m2(op0, op1) \ +__builtin_rvv_vnot_v_u32m2((vuint32m2_t)(op0), (size_t)(op1)) +#define vnot_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vnot_v_u32m4(op0, op1) \ +__builtin_rvv_vnot_v_u32m4((vuint32m4_t)(op0), (size_t)(op1)) +#define vnot_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vnot_v_u32m8(op0, op1) \ +__builtin_rvv_vnot_v_u32m8((vuint32m8_t)(op0), (size_t)(op1)) +#define vnot_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vnot_v_u32mf2(op0, op1) \ +__builtin_rvv_vnot_v_u32mf2((vuint32mf2_t)(op0), (size_t)(op1)) +#define vnot_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vnot_v_u64m1(op0, op1) \ +__builtin_rvv_vnot_v_u64m1((vuint64m1_t)(op0), (size_t)(op1)) +#define vnot_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vnot_v_u64m2(op0, op1) \ +__builtin_rvv_vnot_v_u64m2((vuint64m2_t)(op0), (size_t)(op1)) +#define vnot_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vnot_v_u64m4(op0, op1) \ +__builtin_rvv_vnot_v_u64m4((vuint64m4_t)(op0), (size_t)(op1)) +#define vnot_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vnot_v_u64m8(op0, op1) \ +__builtin_rvv_vnot_v_u64m8((vuint64m8_t)(op0), (size_t)(op1)) +#define vnot_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vnot_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vmmv_m_b8(op0, op1) \ +__builtin_rvv_vmmv_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vmmv_m_b4(op0, op1) \ +__builtin_rvv_vmmv_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vmmv_m_b2(op0, op1) \ +__builtin_rvv_vmmv_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vmmv_m_b1(op0, op1) \ +__builtin_rvv_vmmv_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vmmv_m_b16(op0, op1) \ +__builtin_rvv_vmmv_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vmmv_m_b32(op0, op1) \ +__builtin_rvv_vmmv_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vmmv_m_b64(op0, op1) \ +__builtin_rvv_vmmv_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16m2(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u16m2((vuint8m1_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u16m4(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u16m4((vuint8m2_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u16m8(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u16m8((vuint8m4_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u16m1(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u16m1((vuint8mf2_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u16mf2(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u16mf2((vuint8mf4_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u16mf4(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u16mf4((vuint8mf8_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u32m2(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u32m2((vuint16m1_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u32m4(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u32m4((vuint16m2_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u32m8(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u32m8((vuint16m4_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u32m1(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u32m1((vuint16mf2_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u32mf2(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u32mf2((vuint16mf4_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u64m2(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u64m2((vuint32m1_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u64m4(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u64m4((vuint32m2_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u64m8(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u64m8((vuint32m4_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vwcvtu_x_x_v_u64m1(op0, op1) \ +__builtin_rvv_vwcvtu_x_x_v_u64m1((vuint32mf2_t)(op0), (size_t)(op1)) +#define vwcvtu_x_x_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vwcvtu_x_x_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i8m1(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i8m1((vint16m2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i8m2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i8m2((vint16m4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i8m4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i8m4((vint16m8_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i8mf2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i8mf2((vint16m1_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i8mf4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i8mf4((vint16mf2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i8mf8(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i8mf8((vint16mf4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i16m1(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i16m1((vint32m2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i16m2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i16m2((vint32m4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i16m4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i16m4((vint32m8_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i16mf2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i16mf2((vint32m1_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i16mf4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i16mf4((vint32mf2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i32m1(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i32m1((vint64m2_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i32m2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i32m2((vint64m4_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i32m4(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i32m4((vint64m8_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vncvt_x_x_w_i32mf2(op0, op1) \ +__builtin_rvv_vncvt_x_x_w_i32mf2((vint64m1_t)(op0), (size_t)(op1)) +#define vncvt_x_x_w_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vncvt_x_x_w_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vle16_v_i16m1(op0, op1) \ +__builtin_rvv_vle16_v_i16m1((const int16_t *)(op0), (size_t)(op1)) +#define vle16_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vle16_v_i16m2(op0, op1) \ +__builtin_rvv_vle16_v_i16m2((const int16_t *)(op0), (size_t)(op1)) +#define vle16_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vle16_v_i16m4(op0, op1) \ +__builtin_rvv_vle16_v_i16m4((const int16_t *)(op0), (size_t)(op1)) +#define vle16_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vle16_v_i16m8(op0, op1) \ +__builtin_rvv_vle16_v_i16m8((const int16_t *)(op0), (size_t)(op1)) +#define vle16_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vle16_v_i16mf2(op0, op1) \ +__builtin_rvv_vle16_v_i16mf2((const int16_t *)(op0), (size_t)(op1)) +#define vle16_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vle16_v_i16mf4(op0, op1) \ +__builtin_rvv_vle16_v_i16mf4((const int16_t *)(op0), (size_t)(op1)) +#define vle16_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vle16_v_u16m1(op0, op1) \ +__builtin_rvv_vle16_v_u16m1((const uint16_t *)(op0), (size_t)(op1)) +#define vle16_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vle16_v_u16m2(op0, op1) \ +__builtin_rvv_vle16_v_u16m2((const uint16_t *)(op0), (size_t)(op1)) +#define vle16_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vle16_v_u16m4(op0, op1) \ +__builtin_rvv_vle16_v_u16m4((const uint16_t *)(op0), (size_t)(op1)) +#define vle16_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vle16_v_u16m8(op0, op1) \ +__builtin_rvv_vle16_v_u16m8((const uint16_t *)(op0), (size_t)(op1)) +#define vle16_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vle16_v_u16mf2(op0, op1) \ +__builtin_rvv_vle16_v_u16mf2((const uint16_t *)(op0), (size_t)(op1)) +#define vle16_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vle16_v_u16mf4(op0, op1) \ +__builtin_rvv_vle16_v_u16mf4((const uint16_t *)(op0), (size_t)(op1)) +#define vle16_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vle32_v_i32m1(op0, op1) \ +__builtin_rvv_vle32_v_i32m1((const int32_t *)(op0), (size_t)(op1)) +#define vle32_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vle32_v_i32m2(op0, op1) \ +__builtin_rvv_vle32_v_i32m2((const int32_t *)(op0), (size_t)(op1)) +#define vle32_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vle32_v_i32m4(op0, op1) \ +__builtin_rvv_vle32_v_i32m4((const int32_t *)(op0), (size_t)(op1)) +#define vle32_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vle32_v_i32m8(op0, op1) \ +__builtin_rvv_vle32_v_i32m8((const int32_t *)(op0), (size_t)(op1)) +#define vle32_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vle32_v_i32mf2(op0, op1) \ +__builtin_rvv_vle32_v_i32mf2((const int32_t *)(op0), (size_t)(op1)) +#define vle32_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vle32_v_u32m1(op0, op1) \ +__builtin_rvv_vle32_v_u32m1((const uint32_t *)(op0), (size_t)(op1)) +#define vle32_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vle32_v_u32m2(op0, op1) \ +__builtin_rvv_vle32_v_u32m2((const uint32_t *)(op0), (size_t)(op1)) +#define vle32_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vle32_v_u32m4(op0, op1) \ +__builtin_rvv_vle32_v_u32m4((const uint32_t *)(op0), (size_t)(op1)) +#define vle32_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vle32_v_u32m8(op0, op1) \ +__builtin_rvv_vle32_v_u32m8((const uint32_t *)(op0), (size_t)(op1)) +#define vle32_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vle32_v_u32mf2(op0, op1) \ +__builtin_rvv_vle32_v_u32mf2((const uint32_t *)(op0), (size_t)(op1)) +#define vle32_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vlse8_v_u8m1(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8m1((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_u8m2(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8m2((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_u8m4(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8m4((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_u8m8(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8m8((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8m8_m((vbool1_t)(op0), (vuint8m8_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_u8mf2(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8mf2((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_u8mf4(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8mf4((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse8_v_u8mf8(op0, op1, op2) \ +__builtin_rvv_vlse8_v_u8mf8((const uint8_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse8_v_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vle64_v_i64m1(op0, op1) \ +__builtin_rvv_vle64_v_i64m1((const int64_t *)(op0), (size_t)(op1)) +#define vle64_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vle64_v_i64m2(op0, op1) \ +__builtin_rvv_vle64_v_i64m2((const int64_t *)(op0), (size_t)(op1)) +#define vle64_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vle64_v_i64m4(op0, op1) \ +__builtin_rvv_vle64_v_i64m4((const int64_t *)(op0), (size_t)(op1)) +#define vle64_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vle64_v_i64m8(op0, op1) \ +__builtin_rvv_vle64_v_i64m8((const int64_t *)(op0), (size_t)(op1)) +#define vle64_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vle64_v_u64m1(op0, op1) \ +__builtin_rvv_vle64_v_u64m1((const uint64_t *)(op0), (size_t)(op1)) +#define vle64_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vle64_v_u64m2(op0, op1) \ +__builtin_rvv_vle64_v_u64m2((const uint64_t *)(op0), (size_t)(op1)) +#define vle64_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vle64_v_u64m4(op0, op1) \ +__builtin_rvv_vle64_v_u64m4((const uint64_t *)(op0), (size_t)(op1)) +#define vle64_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vle64_v_u64m8(op0, op1) \ +__builtin_rvv_vle64_v_u64m8((const uint64_t *)(op0), (size_t)(op1)) +#define vle64_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vse16_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vse16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (size_t)(op2)) +#define vse16_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vse16_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vse16_v_i16m2((int16_t *)(op0), (vint16m2_t)(op1), (size_t)(op2)) +#define vse16_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vse16_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vse16_v_i16m4((int16_t *)(op0), (vint16m4_t)(op1), (size_t)(op2)) +#define vse16_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vse16_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vse16_v_i16m8((int16_t *)(op0), (vint16m8_t)(op1), (size_t)(op2)) +#define vse16_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_i16m8_m((vbool2_t)(op0), (int16_t *)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vse16_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vse16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (size_t)(op2)) +#define vse16_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vse16_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vse16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (size_t)(op2)) +#define vse16_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vse16_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vse16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vse16_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vse16_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vse16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vse16_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vse16_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vse16_v_u16m4((uint16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vse16_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vse16_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vse16_v_u16m8((uint16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vse16_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_u16m8_m((vbool2_t)(op0), (uint16_t *)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vse16_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vse16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vse16_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vse16_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vse16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vse16_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vse32_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vse32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (size_t)(op2)) +#define vse32_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vse32_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vse32_v_i32m2((int32_t *)(op0), (vint32m2_t)(op1), (size_t)(op2)) +#define vse32_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vse32_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vse32_v_i32m4((int32_t *)(op0), (vint32m4_t)(op1), (size_t)(op2)) +#define vse32_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vse32_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vse32_v_i32m8((int32_t *)(op0), (vint32m8_t)(op1), (size_t)(op2)) +#define vse32_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vse32_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vse32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (size_t)(op2)) +#define vse32_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vse32_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vse32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vse32_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vse32_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vse32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vse32_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vse32_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vse32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vse32_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vse32_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vse32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vse32_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vse32_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vse32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vse32_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vluxei8_v_i8m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8m1((const int8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (const int8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_i8m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8m2((const int8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (const int8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i8m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8m4((const int8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (const int8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxei8_v_i8m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8m8((const int8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8m8_m((vbool1_t)(op0), (vint8m8_t)(op1), (const int8_t *)(op2), (vuint8m8_t)(op3), (size_t)(op4)) +#define vluxei8_v_i8mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8mf2((const int8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (const int8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_i8mf4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8mf4((const int8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (const int8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_i8mf8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_i8mf8((const int8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (const int8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vse64_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vse64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (size_t)(op2)) +#define vse64_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vse64_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vse64_v_i64m2((int64_t *)(op0), (vint64m2_t)(op1), (size_t)(op2)) +#define vse64_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vse64_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vse64_v_i64m4((int64_t *)(op0), (vint64m4_t)(op1), (size_t)(op2)) +#define vse64_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vse64_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vse64_v_i64m8((int64_t *)(op0), (vint64m8_t)(op1), (size_t)(op2)) +#define vse64_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vse64_v_u64m1(op0, op1, op2) \ +__builtin_rvv_vse64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vse64_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vse64_v_u64m2(op0, op1, op2) \ +__builtin_rvv_vse64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vse64_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vse64_v_u64m4(op0, op1, op2) \ +__builtin_rvv_vse64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vse64_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vse64_v_u64m8(op0, op1, op2) \ +__builtin_rvv_vse64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vse64_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vlse16_v_i16m1(op0, op1, op2) \ +__builtin_rvv_vlse16_v_i16m1((const int16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_i16m2(op0, op1, op2) \ +__builtin_rvv_vlse16_v_i16m2((const int16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_i16m4(op0, op1, op2) \ +__builtin_rvv_vlse16_v_i16m4((const int16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_i16m8(op0, op1, op2) \ +__builtin_rvv_vlse16_v_i16m8((const int16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_i16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_i16mf2(op0, op1, op2) \ +__builtin_rvv_vlse16_v_i16mf2((const int16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_i16mf4(op0, op1, op2) \ +__builtin_rvv_vlse16_v_i16mf4((const int16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_u16m1(op0, op1, op2) \ +__builtin_rvv_vlse16_v_u16m1((const uint16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_u16m2(op0, op1, op2) \ +__builtin_rvv_vlse16_v_u16m2((const uint16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_u16m4(op0, op1, op2) \ +__builtin_rvv_vlse16_v_u16m4((const uint16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_u16m8(op0, op1, op2) \ +__builtin_rvv_vlse16_v_u16m8((const uint16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_u16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_u16mf2(op0, op1, op2) \ +__builtin_rvv_vlse16_v_u16mf2((const uint16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_u16mf4(op0, op1, op2) \ +__builtin_rvv_vlse16_v_u16mf4((const uint16_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_i32m1(op0, op1, op2) \ +__builtin_rvv_vlse32_v_i32m1((const int32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_i32m2(op0, op1, op2) \ +__builtin_rvv_vlse32_v_i32m2((const int32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_i32m4(op0, op1, op2) \ +__builtin_rvv_vlse32_v_i32m4((const int32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_i32m8(op0, op1, op2) \ +__builtin_rvv_vlse32_v_i32m8((const int32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_i32mf2(op0, op1, op2) \ +__builtin_rvv_vlse32_v_i32mf2((const int32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_u32m1(op0, op1, op2) \ +__builtin_rvv_vlse32_v_u32m1((const uint32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_u32m2(op0, op1, op2) \ +__builtin_rvv_vlse32_v_u32m2((const uint32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_u32m4(op0, op1, op2) \ +__builtin_rvv_vlse32_v_u32m4((const uint32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_u32m8(op0, op1, op2) \ +__builtin_rvv_vlse32_v_u32m8((const uint32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_u32mf2(op0, op1, op2) \ +__builtin_rvv_vlse32_v_u32mf2((const uint32_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_i64m1(op0, op1, op2) \ +__builtin_rvv_vlse64_v_i64m1((const int64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_i64m2(op0, op1, op2) \ +__builtin_rvv_vlse64_v_i64m2((const int64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_i64m4(op0, op1, op2) \ +__builtin_rvv_vlse64_v_i64m4((const int64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_i64m8(op0, op1, op2) \ +__builtin_rvv_vlse64_v_i64m8((const int64_t *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vfirst_m_b8(op0, op1) \ +__builtin_rvv_vfirst_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vfirst_m_b8_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vfirst_m_b4(op0, op1) \ +__builtin_rvv_vfirst_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vfirst_m_b4_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vfirst_m_b2(op0, op1) \ +__builtin_rvv_vfirst_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vfirst_m_b2_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vfirst_m_b1(op0, op1) \ +__builtin_rvv_vfirst_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vfirst_m_b1_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vfirst_m_b16(op0, op1) \ +__builtin_rvv_vfirst_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vfirst_m_b16_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vfirst_m_b32(op0, op1) \ +__builtin_rvv_vfirst_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vfirst_m_b32_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vfirst_m_b64(op0, op1) \ +__builtin_rvv_vfirst_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define vfirst_m_b64_m(op0, op1, op2) \ +__builtin_rvv_vfirst_m_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vle1_v_b8(op0, op1) \ +__builtin_rvv_vle1_v_b8((const uint8_t *)(op0), (size_t)(op1)) +#define vle1_v_b4(op0, op1) \ +__builtin_rvv_vle1_v_b4((const uint8_t *)(op0), (size_t)(op1)) +#define vle1_v_b2(op0, op1) \ +__builtin_rvv_vle1_v_b2((const uint8_t *)(op0), (size_t)(op1)) +#define vle1_v_b1(op0, op1) \ +__builtin_rvv_vle1_v_b1((const uint8_t *)(op0), (size_t)(op1)) +#define vle1_v_b16(op0, op1) \ +__builtin_rvv_vle1_v_b16((const uint8_t *)(op0), (size_t)(op1)) +#define vle1_v_b32(op0, op1) \ +__builtin_rvv_vle1_v_b32((const uint8_t *)(op0), (size_t)(op1)) +#define vle1_v_b64(op0, op1) \ +__builtin_rvv_vle1_v_b64((const uint8_t *)(op0), (size_t)(op1)) +#define vlmul_ext_v_i8mf2_i8m1(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf2_i8m1((vint8mf2_t)(op0)) +#define vlmul_ext_v_i8mf4_i8m1(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf4_i8m1((vint8mf4_t)(op0)) +#define vlmul_ext_v_i8mf8_i8m1(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf8_i8m1((vint8mf8_t)(op0)) +#define vlmul_ext_v_i16mf2_i16m1(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf2_i16m1((vint16mf2_t)(op0)) +#define vlmul_ext_v_i16mf4_i16m1(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf4_i16m1((vint16mf4_t)(op0)) +#define vlmul_ext_v_i32mf2_i32m1(op0) \ +__builtin_rvv_vlmul_ext_v_i32mf2_i32m1((vint32mf2_t)(op0)) +#define vlmul_ext_v_i8mf4_i8mf2(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf4_i8mf2((vint8mf4_t)(op0)) +#define vlmul_ext_v_i8mf8_i8mf2(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf8_i8mf2((vint8mf8_t)(op0)) +#define vlmul_ext_v_i16mf4_i16mf2(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf4_i16mf2((vint16mf4_t)(op0)) +#define vlmul_ext_v_i8mf8_i8mf4(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf8_i8mf4((vint8mf8_t)(op0)) +#define vlmul_ext_v_i8m1_i8m2(op0) \ +__builtin_rvv_vlmul_ext_v_i8m1_i8m2((vint8m1_t)(op0)) +#define vlmul_ext_v_i8mf2_i8m2(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf2_i8m2((vint8mf2_t)(op0)) +#define vlmul_ext_v_i8mf4_i8m2(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf4_i8m2((vint8mf4_t)(op0)) +#define vlmul_ext_v_i8mf8_i8m2(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf8_i8m2((vint8mf8_t)(op0)) +#define vlmul_ext_v_i16m1_i16m2(op0) \ +__builtin_rvv_vlmul_ext_v_i16m1_i16m2((vint16m1_t)(op0)) +#define vlmul_ext_v_i16mf2_i16m2(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf2_i16m2((vint16mf2_t)(op0)) +#define vlmul_ext_v_i16mf4_i16m2(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf4_i16m2((vint16mf4_t)(op0)) +#define vlmul_ext_v_i32m1_i32m2(op0) \ +__builtin_rvv_vlmul_ext_v_i32m1_i32m2((vint32m1_t)(op0)) +#define vlmul_ext_v_i32mf2_i32m2(op0) \ +__builtin_rvv_vlmul_ext_v_i32mf2_i32m2((vint32mf2_t)(op0)) +#define vlmul_ext_v_i64m1_i64m2(op0) \ +__builtin_rvv_vlmul_ext_v_i64m1_i64m2((vint64m1_t)(op0)) +#define vlmul_ext_v_i8m1_i8m4(op0) \ +__builtin_rvv_vlmul_ext_v_i8m1_i8m4((vint8m1_t)(op0)) +#define vlmul_ext_v_i8m2_i8m4(op0) \ +__builtin_rvv_vlmul_ext_v_i8m2_i8m4((vint8m2_t)(op0)) +#define vlmul_ext_v_i8mf2_i8m4(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf2_i8m4((vint8mf2_t)(op0)) +#define vlmul_ext_v_i8mf4_i8m4(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf4_i8m4((vint8mf4_t)(op0)) +#define vlmul_ext_v_i8mf8_i8m4(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf8_i8m4((vint8mf8_t)(op0)) +#define vlmul_ext_v_i16m1_i16m4(op0) \ +__builtin_rvv_vlmul_ext_v_i16m1_i16m4((vint16m1_t)(op0)) +#define vlmul_ext_v_i16m2_i16m4(op0) \ +__builtin_rvv_vlmul_ext_v_i16m2_i16m4((vint16m2_t)(op0)) +#define vlmul_ext_v_i16mf2_i16m4(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf2_i16m4((vint16mf2_t)(op0)) +#define vlmul_ext_v_i16mf4_i16m4(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf4_i16m4((vint16mf4_t)(op0)) +#define vlmul_ext_v_i32m1_i32m4(op0) \ +__builtin_rvv_vlmul_ext_v_i32m1_i32m4((vint32m1_t)(op0)) +#define vlmul_ext_v_i32m2_i32m4(op0) \ +__builtin_rvv_vlmul_ext_v_i32m2_i32m4((vint32m2_t)(op0)) +#define vlmul_ext_v_i32mf2_i32m4(op0) \ +__builtin_rvv_vlmul_ext_v_i32mf2_i32m4((vint32mf2_t)(op0)) +#define vlmul_ext_v_i64m1_i64m4(op0) \ +__builtin_rvv_vlmul_ext_v_i64m1_i64m4((vint64m1_t)(op0)) +#define vlmul_ext_v_i64m2_i64m4(op0) \ +__builtin_rvv_vlmul_ext_v_i64m2_i64m4((vint64m2_t)(op0)) +#define vlmul_ext_v_i8m1_i8m8(op0) \ +__builtin_rvv_vlmul_ext_v_i8m1_i8m8((vint8m1_t)(op0)) +#define vlmul_ext_v_i8m2_i8m8(op0) \ +__builtin_rvv_vlmul_ext_v_i8m2_i8m8((vint8m2_t)(op0)) +#define vlmul_ext_v_i8m4_i8m8(op0) \ +__builtin_rvv_vlmul_ext_v_i8m4_i8m8((vint8m4_t)(op0)) +#define vlmul_ext_v_i8mf2_i8m8(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf2_i8m8((vint8mf2_t)(op0)) +#define vlmul_ext_v_i8mf4_i8m8(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf4_i8m8((vint8mf4_t)(op0)) +#define vlmul_ext_v_i8mf8_i8m8(op0) \ +__builtin_rvv_vlmul_ext_v_i8mf8_i8m8((vint8mf8_t)(op0)) +#define vlmul_ext_v_i16m1_i16m8(op0) \ +__builtin_rvv_vlmul_ext_v_i16m1_i16m8((vint16m1_t)(op0)) +#define vlmul_ext_v_i16m2_i16m8(op0) \ +__builtin_rvv_vlmul_ext_v_i16m2_i16m8((vint16m2_t)(op0)) +#define vlmul_ext_v_i16m4_i16m8(op0) \ +__builtin_rvv_vlmul_ext_v_i16m4_i16m8((vint16m4_t)(op0)) +#define vlmul_ext_v_i16mf2_i16m8(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf2_i16m8((vint16mf2_t)(op0)) +#define vlmul_ext_v_i16mf4_i16m8(op0) \ +__builtin_rvv_vlmul_ext_v_i16mf4_i16m8((vint16mf4_t)(op0)) +#define vlmul_ext_v_i32m1_i32m8(op0) \ +__builtin_rvv_vlmul_ext_v_i32m1_i32m8((vint32m1_t)(op0)) +#define vlmul_ext_v_i32m2_i32m8(op0) \ +__builtin_rvv_vlmul_ext_v_i32m2_i32m8((vint32m2_t)(op0)) +#define vlmul_ext_v_i32m4_i32m8(op0) \ +__builtin_rvv_vlmul_ext_v_i32m4_i32m8((vint32m4_t)(op0)) +#define vlmul_ext_v_i32mf2_i32m8(op0) \ +__builtin_rvv_vlmul_ext_v_i32mf2_i32m8((vint32mf2_t)(op0)) +#define vlmul_ext_v_i64m1_i64m8(op0) \ +__builtin_rvv_vlmul_ext_v_i64m1_i64m8((vint64m1_t)(op0)) +#define vlmul_ext_v_i64m2_i64m8(op0) \ +__builtin_rvv_vlmul_ext_v_i64m2_i64m8((vint64m2_t)(op0)) +#define vlmul_ext_v_i64m4_i64m8(op0) \ +__builtin_rvv_vlmul_ext_v_i64m4_i64m8((vint64m4_t)(op0)) +#define vlmul_ext_v_u8mf2_u8m1(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf2_u8m1((vuint8mf2_t)(op0)) +#define vlmul_ext_v_u8mf4_u8m1(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf4_u8m1((vuint8mf4_t)(op0)) +#define vlmul_ext_v_u8mf8_u8m1(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf8_u8m1((vuint8mf8_t)(op0)) +#define vlmul_ext_v_u16mf2_u16m1(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf2_u16m1((vuint16mf2_t)(op0)) +#define vlmul_ext_v_u16mf4_u16m1(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf4_u16m1((vuint16mf4_t)(op0)) +#define vlmul_ext_v_u32mf2_u32m1(op0) \ +__builtin_rvv_vlmul_ext_v_u32mf2_u32m1((vuint32mf2_t)(op0)) +#define vlmul_ext_v_u8mf4_u8mf2(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf4_u8mf2((vuint8mf4_t)(op0)) +#define vlmul_ext_v_u8mf8_u8mf2(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf8_u8mf2((vuint8mf8_t)(op0)) +#define vlmul_ext_v_u16mf4_u16mf2(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf4_u16mf2((vuint16mf4_t)(op0)) +#define vlmul_ext_v_u8mf8_u8mf4(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf8_u8mf4((vuint8mf8_t)(op0)) +#define vlmul_ext_v_u8m1_u8m2(op0) \ +__builtin_rvv_vlmul_ext_v_u8m1_u8m2((vuint8m1_t)(op0)) +#define vlmul_ext_v_u8mf2_u8m2(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf2_u8m2((vuint8mf2_t)(op0)) +#define vlmul_ext_v_u8mf4_u8m2(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf4_u8m2((vuint8mf4_t)(op0)) +#define vlmul_ext_v_u8mf8_u8m2(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf8_u8m2((vuint8mf8_t)(op0)) +#define vlmul_ext_v_u16m1_u16m2(op0) \ +__builtin_rvv_vlmul_ext_v_u16m1_u16m2((vuint16m1_t)(op0)) +#define vlmul_ext_v_u16mf2_u16m2(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf2_u16m2((vuint16mf2_t)(op0)) +#define vlmul_ext_v_u16mf4_u16m2(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf4_u16m2((vuint16mf4_t)(op0)) +#define vlmul_ext_v_u32m1_u32m2(op0) \ +__builtin_rvv_vlmul_ext_v_u32m1_u32m2((vuint32m1_t)(op0)) +#define vlmul_ext_v_u32mf2_u32m2(op0) \ +__builtin_rvv_vlmul_ext_v_u32mf2_u32m2((vuint32mf2_t)(op0)) +#define vlmul_ext_v_u64m1_u64m2(op0) \ +__builtin_rvv_vlmul_ext_v_u64m1_u64m2((vuint64m1_t)(op0)) +#define vlmul_ext_v_u8m1_u8m4(op0) \ +__builtin_rvv_vlmul_ext_v_u8m1_u8m4((vuint8m1_t)(op0)) +#define vlmul_ext_v_u8m2_u8m4(op0) \ +__builtin_rvv_vlmul_ext_v_u8m2_u8m4((vuint8m2_t)(op0)) +#define vlmul_ext_v_u8mf2_u8m4(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf2_u8m4((vuint8mf2_t)(op0)) +#define vlmul_ext_v_u8mf4_u8m4(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf4_u8m4((vuint8mf4_t)(op0)) +#define vlmul_ext_v_u8mf8_u8m4(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf8_u8m4((vuint8mf8_t)(op0)) +#define vlmul_ext_v_u16m1_u16m4(op0) \ +__builtin_rvv_vlmul_ext_v_u16m1_u16m4((vuint16m1_t)(op0)) +#define vlmul_ext_v_u16m2_u16m4(op0) \ +__builtin_rvv_vlmul_ext_v_u16m2_u16m4((vuint16m2_t)(op0)) +#define vlmul_ext_v_u16mf2_u16m4(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf2_u16m4((vuint16mf2_t)(op0)) +#define vlmul_ext_v_u16mf4_u16m4(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf4_u16m4((vuint16mf4_t)(op0)) +#define vlmul_ext_v_u32m1_u32m4(op0) \ +__builtin_rvv_vlmul_ext_v_u32m1_u32m4((vuint32m1_t)(op0)) +#define vlmul_ext_v_u32m2_u32m4(op0) \ +__builtin_rvv_vlmul_ext_v_u32m2_u32m4((vuint32m2_t)(op0)) +#define vlmul_ext_v_u32mf2_u32m4(op0) \ +__builtin_rvv_vlmul_ext_v_u32mf2_u32m4((vuint32mf2_t)(op0)) +#define vlmul_ext_v_u64m1_u64m4(op0) \ +__builtin_rvv_vlmul_ext_v_u64m1_u64m4((vuint64m1_t)(op0)) +#define vlmul_ext_v_u64m2_u64m4(op0) \ +__builtin_rvv_vlmul_ext_v_u64m2_u64m4((vuint64m2_t)(op0)) +#define vlmul_ext_v_u8m1_u8m8(op0) \ +__builtin_rvv_vlmul_ext_v_u8m1_u8m8((vuint8m1_t)(op0)) +#define vlmul_ext_v_u8m2_u8m8(op0) \ +__builtin_rvv_vlmul_ext_v_u8m2_u8m8((vuint8m2_t)(op0)) +#define vlmul_ext_v_u8m4_u8m8(op0) \ +__builtin_rvv_vlmul_ext_v_u8m4_u8m8((vuint8m4_t)(op0)) +#define vlmul_ext_v_u8mf2_u8m8(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf2_u8m8((vuint8mf2_t)(op0)) +#define vlmul_ext_v_u8mf4_u8m8(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf4_u8m8((vuint8mf4_t)(op0)) +#define vlmul_ext_v_u8mf8_u8m8(op0) \ +__builtin_rvv_vlmul_ext_v_u8mf8_u8m8((vuint8mf8_t)(op0)) +#define vlmul_ext_v_u16m1_u16m8(op0) \ +__builtin_rvv_vlmul_ext_v_u16m1_u16m8((vuint16m1_t)(op0)) +#define vlmul_ext_v_u16m2_u16m8(op0) \ +__builtin_rvv_vlmul_ext_v_u16m2_u16m8((vuint16m2_t)(op0)) +#define vlmul_ext_v_u16m4_u16m8(op0) \ +__builtin_rvv_vlmul_ext_v_u16m4_u16m8((vuint16m4_t)(op0)) +#define vlmul_ext_v_u16mf2_u16m8(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf2_u16m8((vuint16mf2_t)(op0)) +#define vlmul_ext_v_u16mf4_u16m8(op0) \ +__builtin_rvv_vlmul_ext_v_u16mf4_u16m8((vuint16mf4_t)(op0)) +#define vlmul_ext_v_u32m1_u32m8(op0) \ +__builtin_rvv_vlmul_ext_v_u32m1_u32m8((vuint32m1_t)(op0)) +#define vlmul_ext_v_u32m2_u32m8(op0) \ +__builtin_rvv_vlmul_ext_v_u32m2_u32m8((vuint32m2_t)(op0)) +#define vlmul_ext_v_u32m4_u32m8(op0) \ +__builtin_rvv_vlmul_ext_v_u32m4_u32m8((vuint32m4_t)(op0)) +#define vlmul_ext_v_u32mf2_u32m8(op0) \ +__builtin_rvv_vlmul_ext_v_u32mf2_u32m8((vuint32mf2_t)(op0)) +#define vlmul_ext_v_u64m1_u64m8(op0) \ +__builtin_rvv_vlmul_ext_v_u64m1_u64m8((vuint64m1_t)(op0)) +#define vlmul_ext_v_u64m2_u64m8(op0) \ +__builtin_rvv_vlmul_ext_v_u64m2_u64m8((vuint64m2_t)(op0)) +#define vlmul_ext_v_u64m4_u64m8(op0) \ +__builtin_rvv_vlmul_ext_v_u64m4_u64m8((vuint64m4_t)(op0)) +#define vlmul_trunc_v_i8m1_i8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m1_i8mf2((vint8m1_t)(op0)) +#define vlmul_trunc_v_i8m2_i8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m2_i8mf2((vint8m2_t)(op0)) +#define vlmul_trunc_v_i8m4_i8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m4_i8mf2((vint8m4_t)(op0)) +#define vlmul_trunc_v_i8m8_i8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m8_i8mf2((vint8m8_t)(op0)) +#define vlmul_trunc_v_i16m1_i16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m1_i16mf2((vint16m1_t)(op0)) +#define vlmul_trunc_v_i16m2_i16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m2_i16mf2((vint16m2_t)(op0)) +#define vlmul_trunc_v_i16m4_i16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m4_i16mf2((vint16m4_t)(op0)) +#define vlmul_trunc_v_i16m8_i16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m8_i16mf2((vint16m8_t)(op0)) +#define vlmul_trunc_v_i32m1_i32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m1_i32mf2((vint32m1_t)(op0)) +#define vlmul_trunc_v_i32m2_i32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m2_i32mf2((vint32m2_t)(op0)) +#define vlmul_trunc_v_i32m4_i32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m4_i32mf2((vint32m4_t)(op0)) +#define vlmul_trunc_v_i32m8_i32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m8_i32mf2((vint32m8_t)(op0)) +#define vlmul_trunc_v_i8m1_i8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m1_i8mf4((vint8m1_t)(op0)) +#define vlmul_trunc_v_i8m2_i8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m2_i8mf4((vint8m2_t)(op0)) +#define vlmul_trunc_v_i8m4_i8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m4_i8mf4((vint8m4_t)(op0)) +#define vlmul_trunc_v_i8m8_i8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m8_i8mf4((vint8m8_t)(op0)) +#define vlmul_trunc_v_i8mf2_i8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i8mf2_i8mf4((vint8mf2_t)(op0)) +#define vlmul_trunc_v_i16m1_i16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m1_i16mf4((vint16m1_t)(op0)) +#define vlmul_trunc_v_i16m2_i16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m2_i16mf4((vint16m2_t)(op0)) +#define vlmul_trunc_v_i16m4_i16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m4_i16mf4((vint16m4_t)(op0)) +#define vlmul_trunc_v_i16m8_i16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m8_i16mf4((vint16m8_t)(op0)) +#define vlmul_trunc_v_i16mf2_i16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_i16mf2_i16mf4((vint16mf2_t)(op0)) +#define vlmul_trunc_v_i8m1_i8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m1_i8mf8((vint8m1_t)(op0)) +#define vlmul_trunc_v_i8m2_i8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m2_i8mf8((vint8m2_t)(op0)) +#define vlmul_trunc_v_i8m4_i8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m4_i8mf8((vint8m4_t)(op0)) +#define vlmul_trunc_v_i8m8_i8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m8_i8mf8((vint8m8_t)(op0)) +#define vlmul_trunc_v_i8mf2_i8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_i8mf2_i8mf8((vint8mf2_t)(op0)) +#define vlmul_trunc_v_i8mf4_i8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_i8mf4_i8mf8((vint8mf4_t)(op0)) +#define vlmul_trunc_v_i8m2_i8m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m2_i8m1((vint8m2_t)(op0)) +#define vlmul_trunc_v_i8m4_i8m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m4_i8m1((vint8m4_t)(op0)) +#define vlmul_trunc_v_i8m8_i8m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m8_i8m1((vint8m8_t)(op0)) +#define vlmul_trunc_v_i16m2_i16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m2_i16m1((vint16m2_t)(op0)) +#define vlmul_trunc_v_i16m4_i16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m4_i16m1((vint16m4_t)(op0)) +#define vlmul_trunc_v_i16m8_i16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m8_i16m1((vint16m8_t)(op0)) +#define vlmul_trunc_v_i32m2_i32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m2_i32m1((vint32m2_t)(op0)) +#define vlmul_trunc_v_i32m4_i32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m4_i32m1((vint32m4_t)(op0)) +#define vlmul_trunc_v_i32m8_i32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m8_i32m1((vint32m8_t)(op0)) +#define vlmul_trunc_v_i64m2_i64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i64m2_i64m1((vint64m2_t)(op0)) +#define vlmul_trunc_v_i64m4_i64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i64m4_i64m1((vint64m4_t)(op0)) +#define vlmul_trunc_v_i64m8_i64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_i64m8_i64m1((vint64m8_t)(op0)) +#define vlmul_trunc_v_i8m4_i8m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m4_i8m2((vint8m4_t)(op0)) +#define vlmul_trunc_v_i8m8_i8m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m8_i8m2((vint8m8_t)(op0)) +#define vlmul_trunc_v_i16m4_i16m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m4_i16m2((vint16m4_t)(op0)) +#define vlmul_trunc_v_i16m8_i16m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m8_i16m2((vint16m8_t)(op0)) +#define vlmul_trunc_v_i32m4_i32m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m4_i32m2((vint32m4_t)(op0)) +#define vlmul_trunc_v_i32m8_i32m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m8_i32m2((vint32m8_t)(op0)) +#define vlmul_trunc_v_i64m4_i64m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i64m4_i64m2((vint64m4_t)(op0)) +#define vlmul_trunc_v_i64m8_i64m2(op0) \ +__builtin_rvv_vlmul_trunc_v_i64m8_i64m2((vint64m8_t)(op0)) +#define vlmul_trunc_v_i8m8_i8m4(op0) \ +__builtin_rvv_vlmul_trunc_v_i8m8_i8m4((vint8m8_t)(op0)) +#define vlmul_trunc_v_i16m8_i16m4(op0) \ +__builtin_rvv_vlmul_trunc_v_i16m8_i16m4((vint16m8_t)(op0)) +#define vlmul_trunc_v_i32m8_i32m4(op0) \ +__builtin_rvv_vlmul_trunc_v_i32m8_i32m4((vint32m8_t)(op0)) +#define vlmul_trunc_v_i64m8_i64m4(op0) \ +__builtin_rvv_vlmul_trunc_v_i64m8_i64m4((vint64m8_t)(op0)) +#define vlmul_trunc_v_u8m1_u8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m1_u8mf2((vuint8m1_t)(op0)) +#define vlmul_trunc_v_u8m2_u8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m2_u8mf2((vuint8m2_t)(op0)) +#define vlmul_trunc_v_u8m4_u8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m4_u8mf2((vuint8m4_t)(op0)) +#define vlmul_trunc_v_u8m8_u8mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m8_u8mf2((vuint8m8_t)(op0)) +#define vlmul_trunc_v_u16m1_u16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m1_u16mf2((vuint16m1_t)(op0)) +#define vlmul_trunc_v_u16m2_u16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m2_u16mf2((vuint16m2_t)(op0)) +#define vlmul_trunc_v_u16m4_u16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m4_u16mf2((vuint16m4_t)(op0)) +#define vlmul_trunc_v_u16m8_u16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m8_u16mf2((vuint16m8_t)(op0)) +#define vlmul_trunc_v_u32m1_u32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m1_u32mf2((vuint32m1_t)(op0)) +#define vlmul_trunc_v_u32m2_u32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m2_u32mf2((vuint32m2_t)(op0)) +#define vlmul_trunc_v_u32m4_u32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m4_u32mf2((vuint32m4_t)(op0)) +#define vlmul_trunc_v_u32m8_u32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m8_u32mf2((vuint32m8_t)(op0)) +#define vlmul_trunc_v_u8m1_u8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m1_u8mf4((vuint8m1_t)(op0)) +#define vlmul_trunc_v_u8m2_u8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m2_u8mf4((vuint8m2_t)(op0)) +#define vlmul_trunc_v_u8m4_u8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m4_u8mf4((vuint8m4_t)(op0)) +#define vlmul_trunc_v_u8m8_u8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m8_u8mf4((vuint8m8_t)(op0)) +#define vlmul_trunc_v_u8mf2_u8mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u8mf2_u8mf4((vuint8mf2_t)(op0)) +#define vlmul_trunc_v_u16m1_u16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m1_u16mf4((vuint16m1_t)(op0)) +#define vlmul_trunc_v_u16m2_u16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m2_u16mf4((vuint16m2_t)(op0)) +#define vlmul_trunc_v_u16m4_u16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m4_u16mf4((vuint16m4_t)(op0)) +#define vlmul_trunc_v_u16m8_u16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m8_u16mf4((vuint16m8_t)(op0)) +#define vlmul_trunc_v_u16mf2_u16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_u16mf2_u16mf4((vuint16mf2_t)(op0)) +#define vlmul_trunc_v_u8m1_u8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m1_u8mf8((vuint8m1_t)(op0)) +#define vlmul_trunc_v_u8m2_u8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m2_u8mf8((vuint8m2_t)(op0)) +#define vlmul_trunc_v_u8m4_u8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m4_u8mf8((vuint8m4_t)(op0)) +#define vlmul_trunc_v_u8m8_u8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m8_u8mf8((vuint8m8_t)(op0)) +#define vlmul_trunc_v_u8mf2_u8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_u8mf2_u8mf8((vuint8mf2_t)(op0)) +#define vlmul_trunc_v_u8mf4_u8mf8(op0) \ +__builtin_rvv_vlmul_trunc_v_u8mf4_u8mf8((vuint8mf4_t)(op0)) +#define vlmul_trunc_v_u8m2_u8m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m2_u8m1((vuint8m2_t)(op0)) +#define vlmul_trunc_v_u8m4_u8m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m4_u8m1((vuint8m4_t)(op0)) +#define vlmul_trunc_v_u8m8_u8m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m8_u8m1((vuint8m8_t)(op0)) +#define vlmul_trunc_v_u16m2_u16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m2_u16m1((vuint16m2_t)(op0)) +#define vlmul_trunc_v_u16m4_u16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m4_u16m1((vuint16m4_t)(op0)) +#define vlmul_trunc_v_u16m8_u16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m8_u16m1((vuint16m8_t)(op0)) +#define vlmul_trunc_v_u32m2_u32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m2_u32m1((vuint32m2_t)(op0)) +#define vlmul_trunc_v_u32m4_u32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m4_u32m1((vuint32m4_t)(op0)) +#define vlmul_trunc_v_u32m8_u32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m8_u32m1((vuint32m8_t)(op0)) +#define vlmul_trunc_v_u64m2_u64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u64m2_u64m1((vuint64m2_t)(op0)) +#define vlmul_trunc_v_u64m4_u64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u64m4_u64m1((vuint64m4_t)(op0)) +#define vlmul_trunc_v_u64m8_u64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_u64m8_u64m1((vuint64m8_t)(op0)) +#define vlmul_trunc_v_u8m4_u8m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m4_u8m2((vuint8m4_t)(op0)) +#define vlmul_trunc_v_u8m8_u8m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m8_u8m2((vuint8m8_t)(op0)) +#define vlmul_trunc_v_u16m4_u16m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m4_u16m2((vuint16m4_t)(op0)) +#define vlmul_trunc_v_u16m8_u16m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m8_u16m2((vuint16m8_t)(op0)) +#define vlmul_trunc_v_u32m4_u32m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m4_u32m2((vuint32m4_t)(op0)) +#define vlmul_trunc_v_u32m8_u32m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m8_u32m2((vuint32m8_t)(op0)) +#define vlmul_trunc_v_u64m4_u64m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u64m4_u64m2((vuint64m4_t)(op0)) +#define vlmul_trunc_v_u64m8_u64m2(op0) \ +__builtin_rvv_vlmul_trunc_v_u64m8_u64m2((vuint64m8_t)(op0)) +#define vlmul_trunc_v_u8m8_u8m4(op0) \ +__builtin_rvv_vlmul_trunc_v_u8m8_u8m4((vuint8m8_t)(op0)) +#define vlmul_trunc_v_u16m8_u16m4(op0) \ +__builtin_rvv_vlmul_trunc_v_u16m8_u16m4((vuint16m8_t)(op0)) +#define vlmul_trunc_v_u32m8_u32m4(op0) \ +__builtin_rvv_vlmul_trunc_v_u32m8_u32m4((vuint32m8_t)(op0)) +#define vlmul_trunc_v_u64m8_u64m4(op0) \ +__builtin_rvv_vlmul_trunc_v_u64m8_u64m4((vuint64m8_t)(op0)) +#define vmand_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmand_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmand_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmand_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmand_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmand_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmand_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmand_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmandnot_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmandnot_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmclr_m_b8(op0) \ +__builtin_rvv_vmclr_m_b8((size_t)(op0)) +#define vmclr_m_b4(op0) \ +__builtin_rvv_vmclr_m_b4((size_t)(op0)) +#define vmclr_m_b2(op0) \ +__builtin_rvv_vmclr_m_b2((size_t)(op0)) +#define vmclr_m_b1(op0) \ +__builtin_rvv_vmclr_m_b1((size_t)(op0)) +#define vmclr_m_b16(op0) \ +__builtin_rvv_vmclr_m_b16((size_t)(op0)) +#define vmclr_m_b32(op0) \ +__builtin_rvv_vmclr_m_b32((size_t)(op0)) +#define vmclr_m_b64(op0) \ +__builtin_rvv_vmclr_m_b64((size_t)(op0)) +#define vmnand_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmnand_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmnand_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmnand_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmnand_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmnand_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmnand_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmnand_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmnor_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmnor_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmnor_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmnor_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmnor_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmnor_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmnor_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmnor_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmor_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmor_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmor_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmor_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmor_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmor_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmor_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmor_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmornot_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmornot_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmornot_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmornot_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmornot_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmornot_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmornot_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmornot_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmsbf_m_b8(op0, op1) \ +__builtin_rvv_vmsbf_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vmsbf_m_b8_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsbf_m_b4(op0, op1) \ +__builtin_rvv_vmsbf_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vmsbf_m_b4_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsbf_m_b2(op0, op1) \ +__builtin_rvv_vmsbf_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vmsbf_m_b2_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsbf_m_b1(op0, op1) \ +__builtin_rvv_vmsbf_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vmsbf_m_b1_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsbf_m_b16(op0, op1) \ +__builtin_rvv_vmsbf_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vmsbf_m_b16_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsbf_m_b32(op0, op1) \ +__builtin_rvv_vmsbf_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vmsbf_m_b32_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsbf_m_b64(op0, op1) \ +__builtin_rvv_vmsbf_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define vmsbf_m_b64_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsbf_m_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmset_m_b8(op0) \ +__builtin_rvv_vmset_m_b8((size_t)(op0)) +#define vmset_m_b4(op0) \ +__builtin_rvv_vmset_m_b4((size_t)(op0)) +#define vmset_m_b2(op0) \ +__builtin_rvv_vmset_m_b2((size_t)(op0)) +#define vmset_m_b1(op0) \ +__builtin_rvv_vmset_m_b1((size_t)(op0)) +#define vmset_m_b16(op0) \ +__builtin_rvv_vmset_m_b16((size_t)(op0)) +#define vmset_m_b32(op0) \ +__builtin_rvv_vmset_m_b32((size_t)(op0)) +#define vmset_m_b64(op0) \ +__builtin_rvv_vmset_m_b64((size_t)(op0)) +#define vmsif_m_b8(op0, op1) \ +__builtin_rvv_vmsif_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vmsif_m_b8_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsif_m_b4(op0, op1) \ +__builtin_rvv_vmsif_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vmsif_m_b4_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsif_m_b2(op0, op1) \ +__builtin_rvv_vmsif_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vmsif_m_b2_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsif_m_b1(op0, op1) \ +__builtin_rvv_vmsif_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vmsif_m_b1_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsif_m_b16(op0, op1) \ +__builtin_rvv_vmsif_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vmsif_m_b16_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsif_m_b32(op0, op1) \ +__builtin_rvv_vmsif_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vmsif_m_b32_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsif_m_b64(op0, op1) \ +__builtin_rvv_vmsif_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define vmsif_m_b64_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsif_m_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmsof_m_b8(op0, op1) \ +__builtin_rvv_vmsof_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vmsof_m_b8_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vbool8_t)(op2), (size_t)(op3)) +#define vmsof_m_b4(op0, op1) \ +__builtin_rvv_vmsof_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vmsof_m_b4_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vbool4_t)(op2), (size_t)(op3)) +#define vmsof_m_b2(op0, op1) \ +__builtin_rvv_vmsof_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vmsof_m_b2_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vbool2_t)(op2), (size_t)(op3)) +#define vmsof_m_b1(op0, op1) \ +__builtin_rvv_vmsof_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vmsof_m_b1_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (vbool1_t)(op2), (size_t)(op3)) +#define vmsof_m_b16(op0, op1) \ +__builtin_rvv_vmsof_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vmsof_m_b16_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vbool16_t)(op2), (size_t)(op3)) +#define vmsof_m_b32(op0, op1) \ +__builtin_rvv_vmsof_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vmsof_m_b32_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vbool32_t)(op2), (size_t)(op3)) +#define vmsof_m_b64(op0, op1) \ +__builtin_rvv_vmsof_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define vmsof_m_b64_m(op0, op1, op2, op3) \ +__builtin_rvv_vmsof_m_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vbool64_t)(op2), (size_t)(op3)) +#define vmxnor_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmxnor_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmxnor_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmxnor_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmxnor_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmxnor_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmxnor_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmxnor_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vmxor_mm_b8(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b8((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vmxor_mm_b4(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b4((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vmxor_mm_b2(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b2((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vmxor_mm_b1(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b1((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vmxor_mm_b16(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b16((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vmxor_mm_b32(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b32((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vmxor_mm_b64(op0, op1, op2) \ +__builtin_rvv_vmxor_mm_b64((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vpopc_m_b8(op0, op1) \ +__builtin_rvv_vpopc_m_b8((vbool8_t)(op0), (size_t)(op1)) +#define vpopc_m_b8_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vpopc_m_b4(op0, op1) \ +__builtin_rvv_vpopc_m_b4((vbool4_t)(op0), (size_t)(op1)) +#define vpopc_m_b4_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vpopc_m_b2(op0, op1) \ +__builtin_rvv_vpopc_m_b2((vbool2_t)(op0), (size_t)(op1)) +#define vpopc_m_b2_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vpopc_m_b1(op0, op1) \ +__builtin_rvv_vpopc_m_b1((vbool1_t)(op0), (size_t)(op1)) +#define vpopc_m_b1_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b1_m((vbool1_t)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vpopc_m_b16(op0, op1) \ +__builtin_rvv_vpopc_m_b16((vbool16_t)(op0), (size_t)(op1)) +#define vpopc_m_b16_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vpopc_m_b32(op0, op1) \ +__builtin_rvv_vpopc_m_b32((vbool32_t)(op0), (size_t)(op1)) +#define vpopc_m_b32_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vpopc_m_b64(op0, op1) \ +__builtin_rvv_vpopc_m_b64((vbool64_t)(op0), (size_t)(op1)) +#define vpopc_m_b64_m(op0, op1, op2) \ +__builtin_rvv_vpopc_m_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vreinterpret_v_i8m1_i16m1(op0) \ +__builtin_rvv_vreinterpret_v_i8m1_i16m1((vint8m1_t)(op0)) +#define vreinterpret_v_i8m2_i16m2(op0) \ +__builtin_rvv_vreinterpret_v_i8m2_i16m2((vint8m2_t)(op0)) +#define vreinterpret_v_i8m4_i16m4(op0) \ +__builtin_rvv_vreinterpret_v_i8m4_i16m4((vint8m4_t)(op0)) +#define vreinterpret_v_i8m8_i16m8(op0) \ +__builtin_rvv_vreinterpret_v_i8m8_i16m8((vint8m8_t)(op0)) +#define vreinterpret_v_i8mf2_i16mf2(op0) \ +__builtin_rvv_vreinterpret_v_i8mf2_i16mf2((vint8mf2_t)(op0)) +#define vreinterpret_v_i8mf4_i16mf4(op0) \ +__builtin_rvv_vreinterpret_v_i8mf4_i16mf4((vint8mf4_t)(op0)) +#define vreinterpret_v_i32m1_i16m1(op0) \ +__builtin_rvv_vreinterpret_v_i32m1_i16m1((vint32m1_t)(op0)) +#define vreinterpret_v_i32m2_i16m2(op0) \ +__builtin_rvv_vreinterpret_v_i32m2_i16m2((vint32m2_t)(op0)) +#define vreinterpret_v_i32m4_i16m4(op0) \ +__builtin_rvv_vreinterpret_v_i32m4_i16m4((vint32m4_t)(op0)) +#define vreinterpret_v_i32m8_i16m8(op0) \ +__builtin_rvv_vreinterpret_v_i32m8_i16m8((vint32m8_t)(op0)) +#define vreinterpret_v_i32mf2_i16mf2(op0) \ +__builtin_rvv_vreinterpret_v_i32mf2_i16mf2((vint32mf2_t)(op0)) +#define vreinterpret_v_i64m1_i16m1(op0) \ +__builtin_rvv_vreinterpret_v_i64m1_i16m1((vint64m1_t)(op0)) +#define vreinterpret_v_i64m2_i16m2(op0) \ +__builtin_rvv_vreinterpret_v_i64m2_i16m2((vint64m2_t)(op0)) +#define vreinterpret_v_i64m4_i16m4(op0) \ +__builtin_rvv_vreinterpret_v_i64m4_i16m4((vint64m4_t)(op0)) +#define vreinterpret_v_i64m8_i16m8(op0) \ +__builtin_rvv_vreinterpret_v_i64m8_i16m8((vint64m8_t)(op0)) +#define vreinterpret_v_i8m1_i32m1(op0) \ +__builtin_rvv_vreinterpret_v_i8m1_i32m1((vint8m1_t)(op0)) +#define vreinterpret_v_i8m2_i32m2(op0) \ +__builtin_rvv_vreinterpret_v_i8m2_i32m2((vint8m2_t)(op0)) +#define vreinterpret_v_i8m4_i32m4(op0) \ +__builtin_rvv_vreinterpret_v_i8m4_i32m4((vint8m4_t)(op0)) +#define vreinterpret_v_i8m8_i32m8(op0) \ +__builtin_rvv_vreinterpret_v_i8m8_i32m8((vint8m8_t)(op0)) +#define vreinterpret_v_i8mf2_i32mf2(op0) \ +__builtin_rvv_vreinterpret_v_i8mf2_i32mf2((vint8mf2_t)(op0)) +#define vreinterpret_v_i16m1_i32m1(op0) \ +__builtin_rvv_vreinterpret_v_i16m1_i32m1((vint16m1_t)(op0)) +#define vreinterpret_v_i16m2_i32m2(op0) \ +__builtin_rvv_vreinterpret_v_i16m2_i32m2((vint16m2_t)(op0)) +#define vreinterpret_v_i16m4_i32m4(op0) \ +__builtin_rvv_vreinterpret_v_i16m4_i32m4((vint16m4_t)(op0)) +#define vreinterpret_v_i16m8_i32m8(op0) \ +__builtin_rvv_vreinterpret_v_i16m8_i32m8((vint16m8_t)(op0)) +#define vreinterpret_v_i16mf2_i32mf2(op0) \ +__builtin_rvv_vreinterpret_v_i16mf2_i32mf2((vint16mf2_t)(op0)) +#define vreinterpret_v_i64m1_i32m1(op0) \ +__builtin_rvv_vreinterpret_v_i64m1_i32m1((vint64m1_t)(op0)) +#define vreinterpret_v_i64m2_i32m2(op0) \ +__builtin_rvv_vreinterpret_v_i64m2_i32m2((vint64m2_t)(op0)) +#define vreinterpret_v_i64m4_i32m4(op0) \ +__builtin_rvv_vreinterpret_v_i64m4_i32m4((vint64m4_t)(op0)) +#define vreinterpret_v_i64m8_i32m8(op0) \ +__builtin_rvv_vreinterpret_v_i64m8_i32m8((vint64m8_t)(op0)) +#define vreinterpret_v_i8m1_i64m1(op0) \ +__builtin_rvv_vreinterpret_v_i8m1_i64m1((vint8m1_t)(op0)) +#define vreinterpret_v_i8m2_i64m2(op0) \ +__builtin_rvv_vreinterpret_v_i8m2_i64m2((vint8m2_t)(op0)) +#define vreinterpret_v_i8m4_i64m4(op0) \ +__builtin_rvv_vreinterpret_v_i8m4_i64m4((vint8m4_t)(op0)) +#define vreinterpret_v_i8m8_i64m8(op0) \ +__builtin_rvv_vreinterpret_v_i8m8_i64m8((vint8m8_t)(op0)) +#define vreinterpret_v_i16m1_i64m1(op0) \ +__builtin_rvv_vreinterpret_v_i16m1_i64m1((vint16m1_t)(op0)) +#define vreinterpret_v_i16m2_i64m2(op0) \ +__builtin_rvv_vreinterpret_v_i16m2_i64m2((vint16m2_t)(op0)) +#define vreinterpret_v_i16m4_i64m4(op0) \ +__builtin_rvv_vreinterpret_v_i16m4_i64m4((vint16m4_t)(op0)) +#define vreinterpret_v_i16m8_i64m8(op0) \ +__builtin_rvv_vreinterpret_v_i16m8_i64m8((vint16m8_t)(op0)) +#define vreinterpret_v_i32m1_i64m1(op0) \ +__builtin_rvv_vreinterpret_v_i32m1_i64m1((vint32m1_t)(op0)) +#define vreinterpret_v_i32m2_i64m2(op0) \ +__builtin_rvv_vreinterpret_v_i32m2_i64m2((vint32m2_t)(op0)) +#define vreinterpret_v_i32m4_i64m4(op0) \ +__builtin_rvv_vreinterpret_v_i32m4_i64m4((vint32m4_t)(op0)) +#define vreinterpret_v_i32m8_i64m8(op0) \ +__builtin_rvv_vreinterpret_v_i32m8_i64m8((vint32m8_t)(op0)) +#define vreinterpret_v_i16m1_i8m1(op0) \ +__builtin_rvv_vreinterpret_v_i16m1_i8m1((vint16m1_t)(op0)) +#define vreinterpret_v_i16m2_i8m2(op0) \ +__builtin_rvv_vreinterpret_v_i16m2_i8m2((vint16m2_t)(op0)) +#define vreinterpret_v_i16m4_i8m4(op0) \ +__builtin_rvv_vreinterpret_v_i16m4_i8m4((vint16m4_t)(op0)) +#define vreinterpret_v_i16m8_i8m8(op0) \ +__builtin_rvv_vreinterpret_v_i16m8_i8m8((vint16m8_t)(op0)) +#define vreinterpret_v_i16mf2_i8mf2(op0) \ +__builtin_rvv_vreinterpret_v_i16mf2_i8mf2((vint16mf2_t)(op0)) +#define vreinterpret_v_i16mf4_i8mf4(op0) \ +__builtin_rvv_vreinterpret_v_i16mf4_i8mf4((vint16mf4_t)(op0)) +#define vreinterpret_v_i32m1_i8m1(op0) \ +__builtin_rvv_vreinterpret_v_i32m1_i8m1((vint32m1_t)(op0)) +#define vreinterpret_v_i32m2_i8m2(op0) \ +__builtin_rvv_vreinterpret_v_i32m2_i8m2((vint32m2_t)(op0)) +#define vreinterpret_v_i32m4_i8m4(op0) \ +__builtin_rvv_vreinterpret_v_i32m4_i8m4((vint32m4_t)(op0)) +#define vreinterpret_v_i32m8_i8m8(op0) \ +__builtin_rvv_vreinterpret_v_i32m8_i8m8((vint32m8_t)(op0)) +#define vreinterpret_v_i32mf2_i8mf2(op0) \ +__builtin_rvv_vreinterpret_v_i32mf2_i8mf2((vint32mf2_t)(op0)) +#define vreinterpret_v_i64m1_i8m1(op0) \ +__builtin_rvv_vreinterpret_v_i64m1_i8m1((vint64m1_t)(op0)) +#define vreinterpret_v_i64m2_i8m2(op0) \ +__builtin_rvv_vreinterpret_v_i64m2_i8m2((vint64m2_t)(op0)) +#define vreinterpret_v_i64m4_i8m4(op0) \ +__builtin_rvv_vreinterpret_v_i64m4_i8m4((vint64m4_t)(op0)) +#define vreinterpret_v_i64m8_i8m8(op0) \ +__builtin_rvv_vreinterpret_v_i64m8_i8m8((vint64m8_t)(op0)) +#define vreinterpret_v_u8m1_i8m1(op0) \ +__builtin_rvv_vreinterpret_v_u8m1_i8m1((vuint8m1_t)(op0)) +#define vreinterpret_v_u8m2_i8m2(op0) \ +__builtin_rvv_vreinterpret_v_u8m2_i8m2((vuint8m2_t)(op0)) +#define vreinterpret_v_u8m4_i8m4(op0) \ +__builtin_rvv_vreinterpret_v_u8m4_i8m4((vuint8m4_t)(op0)) +#define vreinterpret_v_u8m8_i8m8(op0) \ +__builtin_rvv_vreinterpret_v_u8m8_i8m8((vuint8m8_t)(op0)) +#define vreinterpret_v_u8mf2_i8mf2(op0) \ +__builtin_rvv_vreinterpret_v_u8mf2_i8mf2((vuint8mf2_t)(op0)) +#define vreinterpret_v_u8mf4_i8mf4(op0) \ +__builtin_rvv_vreinterpret_v_u8mf4_i8mf4((vuint8mf4_t)(op0)) +#define vreinterpret_v_u8mf8_i8mf8(op0) \ +__builtin_rvv_vreinterpret_v_u8mf8_i8mf8((vuint8mf8_t)(op0)) +#define vreinterpret_v_u16m1_i16m1(op0) \ +__builtin_rvv_vreinterpret_v_u16m1_i16m1((vuint16m1_t)(op0)) +#define vreinterpret_v_u16m2_i16m2(op0) \ +__builtin_rvv_vreinterpret_v_u16m2_i16m2((vuint16m2_t)(op0)) +#define vreinterpret_v_u16m4_i16m4(op0) \ +__builtin_rvv_vreinterpret_v_u16m4_i16m4((vuint16m4_t)(op0)) +#define vreinterpret_v_u16m8_i16m8(op0) \ +__builtin_rvv_vreinterpret_v_u16m8_i16m8((vuint16m8_t)(op0)) +#define vreinterpret_v_u16mf2_i16mf2(op0) \ +__builtin_rvv_vreinterpret_v_u16mf2_i16mf2((vuint16mf2_t)(op0)) +#define vreinterpret_v_u16mf4_i16mf4(op0) \ +__builtin_rvv_vreinterpret_v_u16mf4_i16mf4((vuint16mf4_t)(op0)) +#define vreinterpret_v_u32m1_i32m1(op0) \ +__builtin_rvv_vreinterpret_v_u32m1_i32m1((vuint32m1_t)(op0)) +#define vreinterpret_v_u32m2_i32m2(op0) \ +__builtin_rvv_vreinterpret_v_u32m2_i32m2((vuint32m2_t)(op0)) +#define vreinterpret_v_u32m4_i32m4(op0) \ +__builtin_rvv_vreinterpret_v_u32m4_i32m4((vuint32m4_t)(op0)) +#define vreinterpret_v_u32m8_i32m8(op0) \ +__builtin_rvv_vreinterpret_v_u32m8_i32m8((vuint32m8_t)(op0)) +#define vreinterpret_v_u32mf2_i32mf2(op0) \ +__builtin_rvv_vreinterpret_v_u32mf2_i32mf2((vuint32mf2_t)(op0)) +#define vreinterpret_v_u64m1_i64m1(op0) \ +__builtin_rvv_vreinterpret_v_u64m1_i64m1((vuint64m1_t)(op0)) +#define vreinterpret_v_u64m2_i64m2(op0) \ +__builtin_rvv_vreinterpret_v_u64m2_i64m2((vuint64m2_t)(op0)) +#define vreinterpret_v_u64m4_i64m4(op0) \ +__builtin_rvv_vreinterpret_v_u64m4_i64m4((vuint64m4_t)(op0)) +#define vreinterpret_v_u64m8_i64m8(op0) \ +__builtin_rvv_vreinterpret_v_u64m8_i64m8((vuint64m8_t)(op0)) +#define vreinterpret_v_u8m1_u16m1(op0) \ +__builtin_rvv_vreinterpret_v_u8m1_u16m1((vuint8m1_t)(op0)) +#define vreinterpret_v_u8m2_u16m2(op0) \ +__builtin_rvv_vreinterpret_v_u8m2_u16m2((vuint8m2_t)(op0)) +#define vreinterpret_v_u8m4_u16m4(op0) \ +__builtin_rvv_vreinterpret_v_u8m4_u16m4((vuint8m4_t)(op0)) +#define vreinterpret_v_u8m8_u16m8(op0) \ +__builtin_rvv_vreinterpret_v_u8m8_u16m8((vuint8m8_t)(op0)) +#define vreinterpret_v_u8mf2_u16mf2(op0) \ +__builtin_rvv_vreinterpret_v_u8mf2_u16mf2((vuint8mf2_t)(op0)) +#define vreinterpret_v_u8mf4_u16mf4(op0) \ +__builtin_rvv_vreinterpret_v_u8mf4_u16mf4((vuint8mf4_t)(op0)) +#define vreinterpret_v_u32m1_u16m1(op0) \ +__builtin_rvv_vreinterpret_v_u32m1_u16m1((vuint32m1_t)(op0)) +#define vreinterpret_v_u32m2_u16m2(op0) \ +__builtin_rvv_vreinterpret_v_u32m2_u16m2((vuint32m2_t)(op0)) +#define vreinterpret_v_u32m4_u16m4(op0) \ +__builtin_rvv_vreinterpret_v_u32m4_u16m4((vuint32m4_t)(op0)) +#define vreinterpret_v_u32m8_u16m8(op0) \ +__builtin_rvv_vreinterpret_v_u32m8_u16m8((vuint32m8_t)(op0)) +#define vreinterpret_v_u32mf2_u16mf2(op0) \ +__builtin_rvv_vreinterpret_v_u32mf2_u16mf2((vuint32mf2_t)(op0)) +#define vreinterpret_v_u64m1_u16m1(op0) \ +__builtin_rvv_vreinterpret_v_u64m1_u16m1((vuint64m1_t)(op0)) +#define vreinterpret_v_u64m2_u16m2(op0) \ +__builtin_rvv_vreinterpret_v_u64m2_u16m2((vuint64m2_t)(op0)) +#define vreinterpret_v_u64m4_u16m4(op0) \ +__builtin_rvv_vreinterpret_v_u64m4_u16m4((vuint64m4_t)(op0)) +#define vreinterpret_v_u64m8_u16m8(op0) \ +__builtin_rvv_vreinterpret_v_u64m8_u16m8((vuint64m8_t)(op0)) +#define vreinterpret_v_u8m1_u32m1(op0) \ +__builtin_rvv_vreinterpret_v_u8m1_u32m1((vuint8m1_t)(op0)) +#define vreinterpret_v_u8m2_u32m2(op0) \ +__builtin_rvv_vreinterpret_v_u8m2_u32m2((vuint8m2_t)(op0)) +#define vreinterpret_v_u8m4_u32m4(op0) \ +__builtin_rvv_vreinterpret_v_u8m4_u32m4((vuint8m4_t)(op0)) +#define vreinterpret_v_u8m8_u32m8(op0) \ +__builtin_rvv_vreinterpret_v_u8m8_u32m8((vuint8m8_t)(op0)) +#define vreinterpret_v_u8mf2_u32mf2(op0) \ +__builtin_rvv_vreinterpret_v_u8mf2_u32mf2((vuint8mf2_t)(op0)) +#define vreinterpret_v_u16m1_u32m1(op0) \ +__builtin_rvv_vreinterpret_v_u16m1_u32m1((vuint16m1_t)(op0)) +#define vreinterpret_v_u16m2_u32m2(op0) \ +__builtin_rvv_vreinterpret_v_u16m2_u32m2((vuint16m2_t)(op0)) +#define vreinterpret_v_u16m4_u32m4(op0) \ +__builtin_rvv_vreinterpret_v_u16m4_u32m4((vuint16m4_t)(op0)) +#define vreinterpret_v_u16m8_u32m8(op0) \ +__builtin_rvv_vreinterpret_v_u16m8_u32m8((vuint16m8_t)(op0)) +#define vreinterpret_v_u16mf2_u32mf2(op0) \ +__builtin_rvv_vreinterpret_v_u16mf2_u32mf2((vuint16mf2_t)(op0)) +#define vreinterpret_v_u64m1_u32m1(op0) \ +__builtin_rvv_vreinterpret_v_u64m1_u32m1((vuint64m1_t)(op0)) +#define vreinterpret_v_u64m2_u32m2(op0) \ +__builtin_rvv_vreinterpret_v_u64m2_u32m2((vuint64m2_t)(op0)) +#define vreinterpret_v_u64m4_u32m4(op0) \ +__builtin_rvv_vreinterpret_v_u64m4_u32m4((vuint64m4_t)(op0)) +#define vreinterpret_v_u64m8_u32m8(op0) \ +__builtin_rvv_vreinterpret_v_u64m8_u32m8((vuint64m8_t)(op0)) +#define vreinterpret_v_u8m1_u64m1(op0) \ +__builtin_rvv_vreinterpret_v_u8m1_u64m1((vuint8m1_t)(op0)) +#define vreinterpret_v_u8m2_u64m2(op0) \ +__builtin_rvv_vreinterpret_v_u8m2_u64m2((vuint8m2_t)(op0)) +#define vreinterpret_v_u8m4_u64m4(op0) \ +__builtin_rvv_vreinterpret_v_u8m4_u64m4((vuint8m4_t)(op0)) +#define vreinterpret_v_u8m8_u64m8(op0) \ +__builtin_rvv_vreinterpret_v_u8m8_u64m8((vuint8m8_t)(op0)) +#define vreinterpret_v_u16m1_u64m1(op0) \ +__builtin_rvv_vreinterpret_v_u16m1_u64m1((vuint16m1_t)(op0)) +#define vreinterpret_v_u16m2_u64m2(op0) \ +__builtin_rvv_vreinterpret_v_u16m2_u64m2((vuint16m2_t)(op0)) +#define vreinterpret_v_u16m4_u64m4(op0) \ +__builtin_rvv_vreinterpret_v_u16m4_u64m4((vuint16m4_t)(op0)) +#define vreinterpret_v_u16m8_u64m8(op0) \ +__builtin_rvv_vreinterpret_v_u16m8_u64m8((vuint16m8_t)(op0)) +#define vreinterpret_v_u32m1_u64m1(op0) \ +__builtin_rvv_vreinterpret_v_u32m1_u64m1((vuint32m1_t)(op0)) +#define vreinterpret_v_u32m2_u64m2(op0) \ +__builtin_rvv_vreinterpret_v_u32m2_u64m2((vuint32m2_t)(op0)) +#define vreinterpret_v_u32m4_u64m4(op0) \ +__builtin_rvv_vreinterpret_v_u32m4_u64m4((vuint32m4_t)(op0)) +#define vreinterpret_v_u32m8_u64m8(op0) \ +__builtin_rvv_vreinterpret_v_u32m8_u64m8((vuint32m8_t)(op0)) +#define vreinterpret_v_u16m1_u8m1(op0) \ +__builtin_rvv_vreinterpret_v_u16m1_u8m1((vuint16m1_t)(op0)) +#define vreinterpret_v_u16m2_u8m2(op0) \ +__builtin_rvv_vreinterpret_v_u16m2_u8m2((vuint16m2_t)(op0)) +#define vreinterpret_v_u16m4_u8m4(op0) \ +__builtin_rvv_vreinterpret_v_u16m4_u8m4((vuint16m4_t)(op0)) +#define vreinterpret_v_u16m8_u8m8(op0) \ +__builtin_rvv_vreinterpret_v_u16m8_u8m8((vuint16m8_t)(op0)) +#define vreinterpret_v_u16mf2_u8mf2(op0) \ +__builtin_rvv_vreinterpret_v_u16mf2_u8mf2((vuint16mf2_t)(op0)) +#define vreinterpret_v_u16mf4_u8mf4(op0) \ +__builtin_rvv_vreinterpret_v_u16mf4_u8mf4((vuint16mf4_t)(op0)) +#define vreinterpret_v_u32m1_u8m1(op0) \ +__builtin_rvv_vreinterpret_v_u32m1_u8m1((vuint32m1_t)(op0)) +#define vreinterpret_v_u32m2_u8m2(op0) \ +__builtin_rvv_vreinterpret_v_u32m2_u8m2((vuint32m2_t)(op0)) +#define vreinterpret_v_u32m4_u8m4(op0) \ +__builtin_rvv_vreinterpret_v_u32m4_u8m4((vuint32m4_t)(op0)) +#define vreinterpret_v_u32m8_u8m8(op0) \ +__builtin_rvv_vreinterpret_v_u32m8_u8m8((vuint32m8_t)(op0)) +#define vreinterpret_v_u32mf2_u8mf2(op0) \ +__builtin_rvv_vreinterpret_v_u32mf2_u8mf2((vuint32mf2_t)(op0)) +#define vreinterpret_v_u64m1_u8m1(op0) \ +__builtin_rvv_vreinterpret_v_u64m1_u8m1((vuint64m1_t)(op0)) +#define vreinterpret_v_u64m2_u8m2(op0) \ +__builtin_rvv_vreinterpret_v_u64m2_u8m2((vuint64m2_t)(op0)) +#define vreinterpret_v_u64m4_u8m4(op0) \ +__builtin_rvv_vreinterpret_v_u64m4_u8m4((vuint64m4_t)(op0)) +#define vreinterpret_v_u64m8_u8m8(op0) \ +__builtin_rvv_vreinterpret_v_u64m8_u8m8((vuint64m8_t)(op0)) +#define vreinterpret_v_i8m1_u8m1(op0) \ +__builtin_rvv_vreinterpret_v_i8m1_u8m1((vint8m1_t)(op0)) +#define vreinterpret_v_i8m2_u8m2(op0) \ +__builtin_rvv_vreinterpret_v_i8m2_u8m2((vint8m2_t)(op0)) +#define vreinterpret_v_i8m4_u8m4(op0) \ +__builtin_rvv_vreinterpret_v_i8m4_u8m4((vint8m4_t)(op0)) +#define vreinterpret_v_i8m8_u8m8(op0) \ +__builtin_rvv_vreinterpret_v_i8m8_u8m8((vint8m8_t)(op0)) +#define vreinterpret_v_i8mf2_u8mf2(op0) \ +__builtin_rvv_vreinterpret_v_i8mf2_u8mf2((vint8mf2_t)(op0)) +#define vreinterpret_v_i8mf4_u8mf4(op0) \ +__builtin_rvv_vreinterpret_v_i8mf4_u8mf4((vint8mf4_t)(op0)) +#define vreinterpret_v_i8mf8_u8mf8(op0) \ +__builtin_rvv_vreinterpret_v_i8mf8_u8mf8((vint8mf8_t)(op0)) +#define vreinterpret_v_i16m1_u16m1(op0) \ +__builtin_rvv_vreinterpret_v_i16m1_u16m1((vint16m1_t)(op0)) +#define vreinterpret_v_i16m2_u16m2(op0) \ +__builtin_rvv_vreinterpret_v_i16m2_u16m2((vint16m2_t)(op0)) +#define vreinterpret_v_i16m4_u16m4(op0) \ +__builtin_rvv_vreinterpret_v_i16m4_u16m4((vint16m4_t)(op0)) +#define vreinterpret_v_i16m8_u16m8(op0) \ +__builtin_rvv_vreinterpret_v_i16m8_u16m8((vint16m8_t)(op0)) +#define vreinterpret_v_i16mf2_u16mf2(op0) \ +__builtin_rvv_vreinterpret_v_i16mf2_u16mf2((vint16mf2_t)(op0)) +#define vreinterpret_v_i16mf4_u16mf4(op0) \ +__builtin_rvv_vreinterpret_v_i16mf4_u16mf4((vint16mf4_t)(op0)) +#define vreinterpret_v_i32m1_u32m1(op0) \ +__builtin_rvv_vreinterpret_v_i32m1_u32m1((vint32m1_t)(op0)) +#define vreinterpret_v_i32m2_u32m2(op0) \ +__builtin_rvv_vreinterpret_v_i32m2_u32m2((vint32m2_t)(op0)) +#define vreinterpret_v_i32m4_u32m4(op0) \ +__builtin_rvv_vreinterpret_v_i32m4_u32m4((vint32m4_t)(op0)) +#define vreinterpret_v_i32m8_u32m8(op0) \ +__builtin_rvv_vreinterpret_v_i32m8_u32m8((vint32m8_t)(op0)) +#define vreinterpret_v_i32mf2_u32mf2(op0) \ +__builtin_rvv_vreinterpret_v_i32mf2_u32mf2((vint32mf2_t)(op0)) +#define vreinterpret_v_i64m1_u64m1(op0) \ +__builtin_rvv_vreinterpret_v_i64m1_u64m1((vint64m1_t)(op0)) +#define vreinterpret_v_i64m2_u64m2(op0) \ +__builtin_rvv_vreinterpret_v_i64m2_u64m2((vint64m2_t)(op0)) +#define vreinterpret_v_i64m4_u64m4(op0) \ +__builtin_rvv_vreinterpret_v_i64m4_u64m4((vint64m4_t)(op0)) +#define vreinterpret_v_i64m8_u64m8(op0) \ +__builtin_rvv_vreinterpret_v_i64m8_u64m8((vint64m8_t)(op0)) +#define vse1_v_b8(op0, op1, op2) \ +__builtin_rvv_vse1_v_b8((uint8_t *)(op0), (vbool8_t)(op1), (size_t)(op2)) +#define vse1_v_b4(op0, op1, op2) \ +__builtin_rvv_vse1_v_b4((uint8_t *)(op0), (vbool4_t)(op1), (size_t)(op2)) +#define vse1_v_b2(op0, op1, op2) \ +__builtin_rvv_vse1_v_b2((uint8_t *)(op0), (vbool2_t)(op1), (size_t)(op2)) +#define vse1_v_b1(op0, op1, op2) \ +__builtin_rvv_vse1_v_b1((uint8_t *)(op0), (vbool1_t)(op1), (size_t)(op2)) +#define vse1_v_b16(op0, op1, op2) \ +__builtin_rvv_vse1_v_b16((uint8_t *)(op0), (vbool16_t)(op1), (size_t)(op2)) +#define vse1_v_b32(op0, op1, op2) \ +__builtin_rvv_vse1_v_b32((uint8_t *)(op0), (vbool32_t)(op1), (size_t)(op2)) +#define vse1_v_b64(op0, op1, op2) \ +__builtin_rvv_vse1_v_b64((uint8_t *)(op0), (vbool64_t)(op1), (size_t)(op2)) +#define vsext_vf2_i16mf4(op0, op1) \ +__builtin_rvv_vsext_vf2_i16mf4((vint8mf8_t)(op0), (size_t)(op1)) +#define vsext_vf2_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsext_vf2_i16mf2(op0, op1) \ +__builtin_rvv_vsext_vf2_i16mf2((vint8mf4_t)(op0), (size_t)(op1)) +#define vsext_vf2_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsext_vf2_i16m1(op0, op1) \ +__builtin_rvv_vsext_vf2_i16m1((vint8mf2_t)(op0), (size_t)(op1)) +#define vsext_vf2_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsext_vf2_i16m2(op0, op1) \ +__builtin_rvv_vsext_vf2_i16m2((vint8m1_t)(op0), (size_t)(op1)) +#define vsext_vf2_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsext_vf2_i16m4(op0, op1) \ +__builtin_rvv_vsext_vf2_i16m4((vint8m2_t)(op0), (size_t)(op1)) +#define vsext_vf2_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsext_vf2_i16m8(op0, op1) \ +__builtin_rvv_vsext_vf2_i16m8((vint8m4_t)(op0), (size_t)(op1)) +#define vsext_vf2_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsext_vf2_i32mf2(op0, op1) \ +__builtin_rvv_vsext_vf2_i32mf2((vint16mf4_t)(op0), (size_t)(op1)) +#define vsext_vf2_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsext_vf2_i32m1(op0, op1) \ +__builtin_rvv_vsext_vf2_i32m1((vint16mf2_t)(op0), (size_t)(op1)) +#define vsext_vf2_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsext_vf2_i32m2(op0, op1) \ +__builtin_rvv_vsext_vf2_i32m2((vint16m1_t)(op0), (size_t)(op1)) +#define vsext_vf2_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsext_vf2_i32m4(op0, op1) \ +__builtin_rvv_vsext_vf2_i32m4((vint16m2_t)(op0), (size_t)(op1)) +#define vsext_vf2_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsext_vf2_i32m8(op0, op1) \ +__builtin_rvv_vsext_vf2_i32m8((vint16m4_t)(op0), (size_t)(op1)) +#define vsext_vf2_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsext_vf2_i64m1(op0, op1) \ +__builtin_rvv_vsext_vf2_i64m1((vint32mf2_t)(op0), (size_t)(op1)) +#define vsext_vf2_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsext_vf2_i64m2(op0, op1) \ +__builtin_rvv_vsext_vf2_i64m2((vint32m1_t)(op0), (size_t)(op1)) +#define vsext_vf2_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsext_vf2_i64m4(op0, op1) \ +__builtin_rvv_vsext_vf2_i64m4((vint32m2_t)(op0), (size_t)(op1)) +#define vsext_vf2_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsext_vf2_i64m8(op0, op1) \ +__builtin_rvv_vsext_vf2_i64m8((vint32m4_t)(op0), (size_t)(op1)) +#define vsext_vf2_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf2_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsext_vf4_i32mf2(op0, op1) \ +__builtin_rvv_vsext_vf4_i32mf2((vint8mf8_t)(op0), (size_t)(op1)) +#define vsext_vf4_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsext_vf4_i32m1(op0, op1) \ +__builtin_rvv_vsext_vf4_i32m1((vint8mf4_t)(op0), (size_t)(op1)) +#define vsext_vf4_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsext_vf4_i32m2(op0, op1) \ +__builtin_rvv_vsext_vf4_i32m2((vint8mf2_t)(op0), (size_t)(op1)) +#define vsext_vf4_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsext_vf4_i32m4(op0, op1) \ +__builtin_rvv_vsext_vf4_i32m4((vint8m1_t)(op0), (size_t)(op1)) +#define vsext_vf4_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsext_vf4_i32m8(op0, op1) \ +__builtin_rvv_vsext_vf4_i32m8((vint8m2_t)(op0), (size_t)(op1)) +#define vsext_vf4_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsext_vf4_i64m1(op0, op1) \ +__builtin_rvv_vsext_vf4_i64m1((vint16mf4_t)(op0), (size_t)(op1)) +#define vsext_vf4_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsext_vf4_i64m2(op0, op1) \ +__builtin_rvv_vsext_vf4_i64m2((vint16mf2_t)(op0), (size_t)(op1)) +#define vsext_vf4_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsext_vf4_i64m4(op0, op1) \ +__builtin_rvv_vsext_vf4_i64m4((vint16m1_t)(op0), (size_t)(op1)) +#define vsext_vf4_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsext_vf4_i64m8(op0, op1) \ +__builtin_rvv_vsext_vf4_i64m8((vint16m2_t)(op0), (size_t)(op1)) +#define vsext_vf4_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf4_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsext_vf8_i64m1(op0, op1) \ +__builtin_rvv_vsext_vf8_i64m1((vint8mf8_t)(op0), (size_t)(op1)) +#define vsext_vf8_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf8_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsext_vf8_i64m2(op0, op1) \ +__builtin_rvv_vsext_vf8_i64m2((vint8mf4_t)(op0), (size_t)(op1)) +#define vsext_vf8_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf8_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsext_vf8_i64m4(op0, op1) \ +__builtin_rvv_vsext_vf8_i64m4((vint8mf2_t)(op0), (size_t)(op1)) +#define vsext_vf8_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf8_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsext_vf8_i64m8(op0, op1) \ +__builtin_rvv_vsext_vf8_i64m8((vint8m1_t)(op0), (size_t)(op1)) +#define vsext_vf8_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vsext_vf8_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vundefined_i8m1() \ +__builtin_rvv_vundefined_i8m1() +#define vundefined_i8m2() \ +__builtin_rvv_vundefined_i8m2() +#define vundefined_i8m4() \ +__builtin_rvv_vundefined_i8m4() +#define vundefined_i8m8() \ +__builtin_rvv_vundefined_i8m8() +#define vundefined_i8mf2() \ +__builtin_rvv_vundefined_i8mf2() +#define vundefined_i8mf4() \ +__builtin_rvv_vundefined_i8mf4() +#define vundefined_i8mf8() \ +__builtin_rvv_vundefined_i8mf8() +#define vundefined_i16m1() \ +__builtin_rvv_vundefined_i16m1() +#define vundefined_i16m2() \ +__builtin_rvv_vundefined_i16m2() +#define vundefined_i16m4() \ +__builtin_rvv_vundefined_i16m4() +#define vundefined_i16m8() \ +__builtin_rvv_vundefined_i16m8() +#define vundefined_i16mf2() \ +__builtin_rvv_vundefined_i16mf2() +#define vundefined_i16mf4() \ +__builtin_rvv_vundefined_i16mf4() +#define vundefined_i32m1() \ +__builtin_rvv_vundefined_i32m1() +#define vundefined_i32m2() \ +__builtin_rvv_vundefined_i32m2() +#define vundefined_i32m4() \ +__builtin_rvv_vundefined_i32m4() +#define vundefined_i32m8() \ +__builtin_rvv_vundefined_i32m8() +#define vundefined_i32mf2() \ +__builtin_rvv_vundefined_i32mf2() +#define vundefined_i64m1() \ +__builtin_rvv_vundefined_i64m1() +#define vundefined_i64m2() \ +__builtin_rvv_vundefined_i64m2() +#define vundefined_i64m4() \ +__builtin_rvv_vundefined_i64m4() +#define vundefined_i64m8() \ +__builtin_rvv_vundefined_i64m8() +#define vundefined_u8m1() \ +__builtin_rvv_vundefined_u8m1() +#define vundefined_u8m2() \ +__builtin_rvv_vundefined_u8m2() +#define vundefined_u8m4() \ +__builtin_rvv_vundefined_u8m4() +#define vundefined_u8m8() \ +__builtin_rvv_vundefined_u8m8() +#define vundefined_u8mf2() \ +__builtin_rvv_vundefined_u8mf2() +#define vundefined_u8mf4() \ +__builtin_rvv_vundefined_u8mf4() +#define vundefined_u8mf8() \ +__builtin_rvv_vundefined_u8mf8() +#define vundefined_u16m1() \ +__builtin_rvv_vundefined_u16m1() +#define vundefined_u16m2() \ +__builtin_rvv_vundefined_u16m2() +#define vundefined_u16m4() \ +__builtin_rvv_vundefined_u16m4() +#define vundefined_u16m8() \ +__builtin_rvv_vundefined_u16m8() +#define vundefined_u16mf2() \ +__builtin_rvv_vundefined_u16mf2() +#define vundefined_u16mf4() \ +__builtin_rvv_vundefined_u16mf4() +#define vundefined_u32m1() \ +__builtin_rvv_vundefined_u32m1() +#define vundefined_u32m2() \ +__builtin_rvv_vundefined_u32m2() +#define vundefined_u32m4() \ +__builtin_rvv_vundefined_u32m4() +#define vundefined_u32m8() \ +__builtin_rvv_vundefined_u32m8() +#define vundefined_u32mf2() \ +__builtin_rvv_vundefined_u32mf2() +#define vundefined_u64m1() \ +__builtin_rvv_vundefined_u64m1() +#define vundefined_u64m2() \ +__builtin_rvv_vundefined_u64m2() +#define vundefined_u64m4() \ +__builtin_rvv_vundefined_u64m4() +#define vundefined_u64m8() \ +__builtin_rvv_vundefined_u64m8() +#define vzext_vf2_u16mf4(op0, op1) \ +__builtin_rvv_vzext_vf2_u16mf4((vuint8mf8_t)(op0), (size_t)(op1)) +#define vzext_vf2_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vzext_vf2_u16mf2(op0, op1) \ +__builtin_rvv_vzext_vf2_u16mf2((vuint8mf4_t)(op0), (size_t)(op1)) +#define vzext_vf2_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vzext_vf2_u16m1(op0, op1) \ +__builtin_rvv_vzext_vf2_u16m1((vuint8mf2_t)(op0), (size_t)(op1)) +#define vzext_vf2_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vzext_vf2_u16m2(op0, op1) \ +__builtin_rvv_vzext_vf2_u16m2((vuint8m1_t)(op0), (size_t)(op1)) +#define vzext_vf2_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vzext_vf2_u16m4(op0, op1) \ +__builtin_rvv_vzext_vf2_u16m4((vuint8m2_t)(op0), (size_t)(op1)) +#define vzext_vf2_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vzext_vf2_u16m8(op0, op1) \ +__builtin_rvv_vzext_vf2_u16m8((vuint8m4_t)(op0), (size_t)(op1)) +#define vzext_vf2_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vzext_vf2_u32mf2(op0, op1) \ +__builtin_rvv_vzext_vf2_u32mf2((vuint16mf4_t)(op0), (size_t)(op1)) +#define vzext_vf2_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vzext_vf2_u32m1(op0, op1) \ +__builtin_rvv_vzext_vf2_u32m1((vuint16mf2_t)(op0), (size_t)(op1)) +#define vzext_vf2_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vzext_vf2_u32m2(op0, op1) \ +__builtin_rvv_vzext_vf2_u32m2((vuint16m1_t)(op0), (size_t)(op1)) +#define vzext_vf2_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vzext_vf2_u32m4(op0, op1) \ +__builtin_rvv_vzext_vf2_u32m4((vuint16m2_t)(op0), (size_t)(op1)) +#define vzext_vf2_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vzext_vf2_u32m8(op0, op1) \ +__builtin_rvv_vzext_vf2_u32m8((vuint16m4_t)(op0), (size_t)(op1)) +#define vzext_vf2_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vzext_vf2_u64m1(op0, op1) \ +__builtin_rvv_vzext_vf2_u64m1((vuint32mf2_t)(op0), (size_t)(op1)) +#define vzext_vf2_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vzext_vf2_u64m2(op0, op1) \ +__builtin_rvv_vzext_vf2_u64m2((vuint32m1_t)(op0), (size_t)(op1)) +#define vzext_vf2_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vzext_vf2_u64m4(op0, op1) \ +__builtin_rvv_vzext_vf2_u64m4((vuint32m2_t)(op0), (size_t)(op1)) +#define vzext_vf2_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vzext_vf2_u64m8(op0, op1) \ +__builtin_rvv_vzext_vf2_u64m8((vuint32m4_t)(op0), (size_t)(op1)) +#define vzext_vf2_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf2_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vzext_vf4_u32mf2(op0, op1) \ +__builtin_rvv_vzext_vf4_u32mf2((vuint8mf8_t)(op0), (size_t)(op1)) +#define vzext_vf4_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vzext_vf4_u32m1(op0, op1) \ +__builtin_rvv_vzext_vf4_u32m1((vuint8mf4_t)(op0), (size_t)(op1)) +#define vzext_vf4_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vzext_vf4_u32m2(op0, op1) \ +__builtin_rvv_vzext_vf4_u32m2((vuint8mf2_t)(op0), (size_t)(op1)) +#define vzext_vf4_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vzext_vf4_u32m4(op0, op1) \ +__builtin_rvv_vzext_vf4_u32m4((vuint8m1_t)(op0), (size_t)(op1)) +#define vzext_vf4_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vzext_vf4_u32m8(op0, op1) \ +__builtin_rvv_vzext_vf4_u32m8((vuint8m2_t)(op0), (size_t)(op1)) +#define vzext_vf4_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vzext_vf4_u64m1(op0, op1) \ +__builtin_rvv_vzext_vf4_u64m1((vuint16mf4_t)(op0), (size_t)(op1)) +#define vzext_vf4_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vzext_vf4_u64m2(op0, op1) \ +__builtin_rvv_vzext_vf4_u64m2((vuint16mf2_t)(op0), (size_t)(op1)) +#define vzext_vf4_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vzext_vf4_u64m4(op0, op1) \ +__builtin_rvv_vzext_vf4_u64m4((vuint16m1_t)(op0), (size_t)(op1)) +#define vzext_vf4_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vzext_vf4_u64m8(op0, op1) \ +__builtin_rvv_vzext_vf4_u64m8((vuint16m2_t)(op0), (size_t)(op1)) +#define vzext_vf4_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf4_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vzext_vf8_u64m1(op0, op1) \ +__builtin_rvv_vzext_vf8_u64m1((vuint8mf8_t)(op0), (size_t)(op1)) +#define vzext_vf8_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf8_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vzext_vf8_u64m2(op0, op1) \ +__builtin_rvv_vzext_vf8_u64m2((vuint8mf4_t)(op0), (size_t)(op1)) +#define vzext_vf8_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf8_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vzext_vf8_u64m4(op0, op1) \ +__builtin_rvv_vzext_vf8_u64m4((vuint8mf2_t)(op0), (size_t)(op1)) +#define vzext_vf8_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf8_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vzext_vf8_u64m8(op0, op1) \ +__builtin_rvv_vzext_vf8_u64m8((vuint8m1_t)(op0), (size_t)(op1)) +#define vzext_vf8_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vzext_vf8_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#if defined(__riscv_f) +#define vsse32_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsse32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsse32_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_f32m2((float *)(op0), (ptrdiff_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsse32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsse32_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_f32m4((float *)(op0), (ptrdiff_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsse32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsse32_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_f32m8((float *)(op0), (ptrdiff_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsse32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsse32_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsse32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f32m1((const float *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f32m2((const float *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f32m4((const float *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f32m8((const float *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f32mf2((const float *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei16_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f32m1((const float *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f32m2((const float *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f32m4((const float *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f32m8((const float *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f32mf2((const float *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei32_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f32m1((const float *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f32m2((const float *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f32m4((const float *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f32m8((const float *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f32mf2((const float *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei64_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f32m1((const float *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f32m2((const float *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f32m4((const float *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f32mf2((const float *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f32m1((const float *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f32m2((const float *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f32m4((const float *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f32m8((const float *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f32mf2((const float *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei16_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f32m1((const float *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f32m2((const float *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f32m4((const float *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f32m8((const float *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f32mf2((const float *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei32_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f32m1((const float *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f32m2((const float *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f32m4((const float *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f32m8((const float *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f32mf2((const float *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei64_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f32m1((const float *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f32m2((const float *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f32m4((const float *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f32mf2((const float *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmv_v_v_f32m1(op0, op1) \ +__builtin_rvv_vmv_v_v_f32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_f32m2(op0, op1) \ +__builtin_rvv_vmv_v_v_f32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_f32m4(op0, op1) \ +__builtin_rvv_vmv_v_v_f32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_f32m8(op0, op1) \ +__builtin_rvv_vmv_v_v_f32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_f32mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfadd_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfadd_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfadd_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfadd_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfadd_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfadd_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfadd_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfadd_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfadd_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfadd_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfadd_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfadd_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfadd_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfadd_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfadd_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfadd_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfadd_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfadd_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfadd_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfadd_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsub_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfsub_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfsub_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfsub_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfsub_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfsub_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfsub_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfsub_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfsub_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfsub_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfsub_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsub_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsub_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsub_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsub_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsub_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsub_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsub_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsub_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsub_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrsub_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrsub_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrsub_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrsub_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrsub_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrsub_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrsub_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrsub_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrsub_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrsub_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmul_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfmul_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmul_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfmul_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmul_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfmul_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmul_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfmul_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmul_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfmul_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmul_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmul_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmul_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmul_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmul_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmul_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmul_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmul_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmul_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmul_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfdiv_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfdiv_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfdiv_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfdiv_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfdiv_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfdiv_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfdiv_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfdiv_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfdiv_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfdiv_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfdiv_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrdiv_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrdiv_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrdiv_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrdiv_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrdiv_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrdiv_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrdiv_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrdiv_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfrdiv_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfrdiv_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmacc_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmin_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfmin_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmin_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfmin_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmin_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfmin_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmin_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfmin_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmin_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfmin_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmin_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmin_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmin_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmin_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmin_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmin_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmin_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmin_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmin_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmin_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmax_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfmax_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmax_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfmax_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfmax_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfmax_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfmax_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfmax_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfmax_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfmax_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfmax_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmax_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmax_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmax_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmax_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmax_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmax_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmax_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfmax_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmax_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnj_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfsgnj_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnj_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnj_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnj_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnj_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnj_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnj_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnj_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnj_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnj_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfabs_v_f32m1(op0, op1) \ +__builtin_rvv_vfabs_v_f32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfabs_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfabs_v_f32m2(op0, op1) \ +__builtin_rvv_vfabs_v_f32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfabs_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfabs_v_f32m4(op0, op1) \ +__builtin_rvv_vfabs_v_f32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfabs_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfabs_v_f32m8(op0, op1) \ +__builtin_rvv_vfabs_v_f32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfabs_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfabs_v_f32mf2(op0, op1) \ +__builtin_rvv_vfabs_v_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfabs_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vmfeq_vv_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f32m1_b32((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f32m2_b16((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f32m4_b8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f32m8_b4((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f32mf2_b64((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vmfeq_vf_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f32m1_b32((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfeq_vf_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfeq_vf_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f32m2_b16((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfeq_vf_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfeq_vf_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f32m4_b8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfeq_vf_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfeq_vf_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f32m8_b4((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfeq_vf_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfeq_vf_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f32mf2_b64((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfeq_vf_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfne_vv_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f32m1_b32((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vmfne_vv_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vmfne_vv_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f32m2_b16((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vmfne_vv_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vmfne_vv_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f32m4_b8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vmfne_vv_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vmfne_vv_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f32m8_b4((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vmfne_vv_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vmfne_vv_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f32mf2_b64((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vmfne_vv_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vmfne_vf_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f32m1_b32((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfne_vf_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfne_vf_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f32m2_b16((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfne_vf_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfne_vf_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f32m4_b8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfne_vf_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfne_vf_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f32m8_b4((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfne_vf_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfne_vf_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f32mf2_b64((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfne_vf_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmflt_vv_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f32m1_b32((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vmflt_vv_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vmflt_vv_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f32m2_b16((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vmflt_vv_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vmflt_vv_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f32m4_b8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vmflt_vv_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vmflt_vv_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f32m8_b4((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vmflt_vv_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vmflt_vv_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f32mf2_b64((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vmflt_vv_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vmflt_vf_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f32m1_b32((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vmflt_vf_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vmflt_vf_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f32m2_b16((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmflt_vf_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmflt_vf_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f32m4_b8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vmflt_vf_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vmflt_vf_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f32m8_b4((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vmflt_vf_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vmflt_vf_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f32mf2_b64((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmflt_vf_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfle_vv_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f32m1_b32((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vmfle_vv_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vmfle_vv_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f32m2_b16((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vmfle_vv_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vmfle_vv_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f32m4_b8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vmfle_vv_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vmfle_vv_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f32m8_b4((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vmfle_vv_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vmfle_vv_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f32mf2_b64((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vmfle_vv_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vmfle_vf_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f32m1_b32((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfle_vf_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfle_vf_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f32m2_b16((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfle_vf_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfle_vf_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f32m4_b8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfle_vf_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfle_vf_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f32m8_b4((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfle_vf_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfle_vf_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f32mf2_b64((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfle_vf_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfgt_vv_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f32m1_b32((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f32m2_b16((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f32m4_b8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f32m8_b4((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f32mf2_b64((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vmfgt_vf_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f32m1_b32((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfgt_vf_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfgt_vf_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f32m2_b16((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfgt_vf_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfgt_vf_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f32m4_b8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfgt_vf_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfgt_vf_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f32m8_b4((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfgt_vf_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfgt_vf_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f32mf2_b64((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfgt_vf_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfge_vv_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f32m1_b32((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vmfge_vv_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vmfge_vv_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f32m2_b16((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vmfge_vv_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vmfge_vv_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f32m4_b8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vmfge_vv_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vmfge_vv_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f32m8_b4((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vmfge_vv_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vmfge_vv_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f32mf2_b64((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vmfge_vv_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vmfge_vf_f32m1_b32(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f32m1_b32((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfge_vf_f32m1_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f32m1_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfge_vf_f32m2_b16(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f32m2_b16((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfge_vf_f32m2_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f32m2_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfge_vf_f32m4_b8(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f32m4_b8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfge_vf_f32m4_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f32m4_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfge_vf_f32m8_b4(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f32m8_b4((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfge_vf_f32m8_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f32m8_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vmfge_vf_f32mf2_b64(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f32mf2_b64((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vmfge_vf_f32mf2_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f32mf2_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vmerge_vvm_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f32m1((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f32m2((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f32m4((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f32m8((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f32mf2((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfmerge_vfm_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f32m1((vbool32_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (size_t)(op3)) +#define vfmerge_vfm_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f32m2((vbool16_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (size_t)(op3)) +#define vfmerge_vfm_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f32m4((vbool8_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (size_t)(op3)) +#define vfmerge_vfm_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f32m8((vbool4_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (size_t)(op3)) +#define vfmerge_vfm_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f32mf2((vbool64_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (size_t)(op3)) +#define vfmv_v_f_f32m1(op0, op1) \ +__builtin_rvv_vfmv_v_f_f32m1((float)(op0), (size_t)(op1)) +#define vfmv_v_f_f32m2(op0, op1) \ +__builtin_rvv_vfmv_v_f_f32m2((float)(op0), (size_t)(op1)) +#define vfmv_v_f_f32m4(op0, op1) \ +__builtin_rvv_vfmv_v_f_f32m4((float)(op0), (size_t)(op1)) +#define vfmv_v_f_f32m8(op0, op1) \ +__builtin_rvv_vfmv_v_f_f32m8((float)(op0), (size_t)(op1)) +#define vfmv_v_f_f32mf2(op0, op1) \ +__builtin_rvv_vfmv_v_f_f32mf2((float)(op0), (size_t)(op1)) +#define vfredmax_vs_f32m1_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f32m1_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f32m1_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f32m1_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f32m2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f32m2_f32m1((vfloat32m1_t)(op0), (vfloat32m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f32m2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f32m2_f32m1_m((vbool16_t)(op0), (vfloat32m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f32m4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f32m4_f32m1((vfloat32m1_t)(op0), (vfloat32m4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f32m4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f32m4_f32m1_m((vbool8_t)(op0), (vfloat32m1_t)(op1), (vfloat32m4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f32m8_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f32m8_f32m1((vfloat32m1_t)(op0), (vfloat32m8_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f32m8_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f32m8_f32m1_m((vbool4_t)(op0), (vfloat32m1_t)(op1), (vfloat32m8_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f32mf2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f32mf2_f32m1((vfloat32m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f32mf2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f32mf2_f32m1_m((vbool64_t)(op0), (vfloat32m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f32m1_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f32m1_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f32m1_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f32m1_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f32m2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f32m2_f32m1((vfloat32m1_t)(op0), (vfloat32m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f32m2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f32m2_f32m1_m((vbool16_t)(op0), (vfloat32m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f32m4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f32m4_f32m1((vfloat32m1_t)(op0), (vfloat32m4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f32m4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f32m4_f32m1_m((vbool8_t)(op0), (vfloat32m1_t)(op1), (vfloat32m4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f32m8_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f32m8_f32m1((vfloat32m1_t)(op0), (vfloat32m8_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f32m8_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f32m8_f32m1_m((vbool4_t)(op0), (vfloat32m1_t)(op1), (vfloat32m8_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f32mf2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f32mf2_f32m1((vfloat32m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f32mf2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f32mf2_f32m1_m((vbool64_t)(op0), (vfloat32m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f32m1_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f32m1_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f32m1_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f32m1_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f32m2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f32m2_f32m1((vfloat32m1_t)(op0), (vfloat32m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f32m2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f32m2_f32m1_m((vbool16_t)(op0), (vfloat32m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f32m4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f32m4_f32m1((vfloat32m1_t)(op0), (vfloat32m4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f32m4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f32m4_f32m1_m((vbool8_t)(op0), (vfloat32m1_t)(op1), (vfloat32m4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f32m8_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f32m8_f32m1((vfloat32m1_t)(op0), (vfloat32m8_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f32m8_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f32m8_f32m1_m((vbool4_t)(op0), (vfloat32m1_t)(op1), (vfloat32m8_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f32mf2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f32mf2_f32m1((vfloat32m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f32mf2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f32mf2_f32m1_m((vbool64_t)(op0), (vfloat32m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f32m1_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f32m1_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f32m1_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f32m1_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f32m2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f32m2_f32m1((vfloat32m1_t)(op0), (vfloat32m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f32m2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f32m2_f32m1_m((vbool16_t)(op0), (vfloat32m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f32m4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f32m4_f32m1((vfloat32m1_t)(op0), (vfloat32m4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f32m4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f32m4_f32m1_m((vbool8_t)(op0), (vfloat32m1_t)(op1), (vfloat32m4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f32m8_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f32m8_f32m1((vfloat32m1_t)(op0), (vfloat32m8_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f32m8_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f32m8_f32m1_m((vbool4_t)(op0), (vfloat32m1_t)(op1), (vfloat32m8_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f32mf2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f32mf2_f32m1((vfloat32m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f32mf2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f32mf2_f32m1_m((vbool64_t)(op0), (vfloat32m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfmv_f_s_f32m1_f32(op0) \ +__builtin_rvv_vfmv_f_s_f32m1_f32((vfloat32m1_t)(op0)) +#define vfmv_f_s_f32m2_f32(op0) \ +__builtin_rvv_vfmv_f_s_f32m2_f32((vfloat32m2_t)(op0)) +#define vfmv_f_s_f32m4_f32(op0) \ +__builtin_rvv_vfmv_f_s_f32m4_f32((vfloat32m4_t)(op0)) +#define vfmv_f_s_f32m8_f32(op0) \ +__builtin_rvv_vfmv_f_s_f32m8_f32((vfloat32m8_t)(op0)) +#define vfmv_f_s_f32mf2_f32(op0) \ +__builtin_rvv_vfmv_f_s_f32mf2_f32((vfloat32mf2_t)(op0)) +#define vfmv_s_f_f32m1(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmv_s_f_f32m2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmv_s_f_f32m4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmv_s_f_f32m8(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfmv_s_f_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vslideup_vx_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vfslide1up_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1up_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1up_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1up_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1up_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1up_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1up_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1up_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1up_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1up_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1down_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1down_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1down_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1down_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1down_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1down_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1down_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1down_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfslide1down_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfslide1down_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vrgather_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f32m1((vfloat32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f32m2((vfloat32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f32m4((vfloat32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f32m8((vfloat32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f32mf2((vfloat32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vrgather_vx_f32m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f32m1((vfloat32m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f32m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f32m2((vfloat32m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f32m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f32m4((vfloat32m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f32m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f32m8((vfloat32m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f32mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f32m1((vfloat32m1_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f32m2((vfloat32m2_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f32m4((vfloat32m4_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f32m8((vfloat32m8_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f32mf2((vfloat32mf2_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vcompress_vm_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f32m1((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f32m2((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f32m4((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f32m8((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f32mf2((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vget_v_f32m2_f32m1(op0, op1) \ +__builtin_rvv_vget_v_f32m2_f32m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vget_v_f32m4_f32m1(op0, op1) \ +__builtin_rvv_vget_v_f32m4_f32m1((vfloat32m4_t)(op0), (size_t)(op1)) +#define vget_v_f32m8_f32m1(op0, op1) \ +__builtin_rvv_vget_v_f32m8_f32m1((vfloat32m8_t)(op0), (size_t)(op1)) +#define vget_v_f32m4_f32m2(op0, op1) \ +__builtin_rvv_vget_v_f32m4_f32m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vget_v_f32m8_f32m2(op0, op1) \ +__builtin_rvv_vget_v_f32m8_f32m2((vfloat32m8_t)(op0), (size_t)(op1)) +#define vget_v_f32m8_f32m4(op0, op1) \ +__builtin_rvv_vget_v_f32m8_f32m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vset_v_f32m1_f32m2(op0, op1, op2) \ +__builtin_rvv_vset_v_f32m1_f32m2((vfloat32m2_t)(op0), (size_t)(op1), (vfloat32m1_t)(op2)) +#define vset_v_f32m1_f32m4(op0, op1, op2) \ +__builtin_rvv_vset_v_f32m1_f32m4((vfloat32m4_t)(op0), (size_t)(op1), (vfloat32m1_t)(op2)) +#define vset_v_f32m2_f32m4(op0, op1, op2) \ +__builtin_rvv_vset_v_f32m2_f32m4((vfloat32m4_t)(op0), (size_t)(op1), (vfloat32m2_t)(op2)) +#define vsuxei8_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f32m4((float *)(op0), (vuint8m1_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint8m1_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f32m8((float *)(op0), (vuint8m2_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint8m2_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vset_v_f32m1_f32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_f32m1_f32m8((vfloat32m8_t)(op0), (size_t)(op1), (vfloat32m1_t)(op2)) +#define vset_v_f32m2_f32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_f32m2_f32m8((vfloat32m8_t)(op0), (size_t)(op1), (vfloat32m2_t)(op2)) +#define vset_v_f32m4_f32m8(op0, op1, op2) \ +__builtin_rvv_vset_v_f32m4_f32m8((vfloat32m8_t)(op0), (size_t)(op1), (vfloat32m4_t)(op2)) +#define vsuxei16_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f32m4((float *)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint16m2_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f32m8((float *)(op0), (vuint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint16m4_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f32m4((float *)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f32m8((float *)(op0), (vuint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f32m4((float *)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint64m8_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f32m4((float *)(op0), (vuint8m1_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint8m1_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f32m8((float *)(op0), (vuint8m2_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint8m2_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f32m4((float *)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint16m2_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f32m8((float *)(op0), (vuint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint16m4_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f32m4((float *)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f32m8((float *)(op0), (vuint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f32m4((float *)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint64m8_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vle32ff_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_f32m1((const float *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_f32m2((const float *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_f32m4((const float *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_f32m8((const float *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle32ff_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vle32ff_v_f32mf2((const float *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle32ff_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle32ff_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vfneg_v_f32m1(op0, op1) \ +__builtin_rvv_vfneg_v_f32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfneg_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfneg_v_f32m2(op0, op1) \ +__builtin_rvv_vfneg_v_f32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfneg_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfneg_v_f32m4(op0, op1) \ +__builtin_rvv_vfneg_v_f32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfneg_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfneg_v_f32m8(op0, op1) \ +__builtin_rvv_vfneg_v_f32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfneg_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfneg_v_f32mf2(op0, op1) \ +__builtin_rvv_vfneg_v_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfneg_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vle32_v_f32m1(op0, op1) \ +__builtin_rvv_vle32_v_f32m1((const float *)(op0), (size_t)(op1)) +#define vle32_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (size_t)(op3)) +#define vle32_v_f32m2(op0, op1) \ +__builtin_rvv_vle32_v_f32m2((const float *)(op0), (size_t)(op1)) +#define vle32_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (size_t)(op3)) +#define vle32_v_f32m4(op0, op1) \ +__builtin_rvv_vle32_v_f32m4((const float *)(op0), (size_t)(op1)) +#define vle32_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (size_t)(op3)) +#define vle32_v_f32m8(op0, op1) \ +__builtin_rvv_vle32_v_f32m8((const float *)(op0), (size_t)(op1)) +#define vle32_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (size_t)(op3)) +#define vle32_v_f32mf2(op0, op1) \ +__builtin_rvv_vle32_v_f32mf2((const float *)(op0), (size_t)(op1)) +#define vle32_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle32_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (size_t)(op3)) +#define vse32_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vse32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vse32_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vse32_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vse32_v_f32m2((float *)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vse32_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vse32_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vse32_v_f32m4((float *)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vse32_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vse32_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vse32_v_f32m8((float *)(op0), (vfloat32m8_t)(op1), (size_t)(op2)) +#define vse32_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vse32_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vse32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vse32_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vlse32_v_f32m1(op0, op1, op2) \ +__builtin_rvv_vlse32_v_f32m1((const float *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_f32m2(op0, op1, op2) \ +__builtin_rvv_vlse32_v_f32m2((const float *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_f32m4(op0, op1, op2) \ +__builtin_rvv_vlse32_v_f32m4((const float *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_f32m8(op0, op1, op2) \ +__builtin_rvv_vlse32_v_f32m8((const float *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse32_v_f32mf2(op0, op1, op2) \ +__builtin_rvv_vlse32_v_f32mf2((const float *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse32_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vfclass_v_u32m1(op0, op1) \ +__builtin_rvv_vfclass_v_u32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfclass_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfclass_v_u32m2(op0, op1) \ +__builtin_rvv_vfclass_v_u32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfclass_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfclass_v_u32m4(op0, op1) \ +__builtin_rvv_vfclass_v_u32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfclass_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfclass_v_u32m8(op0, op1) \ +__builtin_rvv_vfclass_v_u32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfclass_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfclass_v_u32mf2(op0, op1) \ +__builtin_rvv_vfclass_v_u32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfclass_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f32m1(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f32m1((vint32m1_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f32m2(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f32m2((vint32m2_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f32m4(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f32m4((vint32m4_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f32m8(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f32m8((vint32m8_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f32mf2(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f32mf2((vint32mf2_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f32m1(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f32m1((vuint32m1_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f32m2(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f32m2((vuint32m2_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f32m4(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f32m4((vuint32m4_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f32m8(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f32m8((vuint32m8_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f32mf2(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f32mf2((vuint32mf2_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i32m1(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i32m2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i32m4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i32m8(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i32mf2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u32m1(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u32m2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u32m4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u32m8(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u32mf2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i32m1(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i32m2(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i32m4(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i32m8(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i32mf2(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u32m1(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u32m2(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u32m4(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u32m8(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u32mf2(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f32mf2(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f32mf2((vint64m1_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f32m1(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f32m1((vint64m2_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f32m2(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f32m2((vint64m4_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f32m4(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f32m4((vint64m8_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f32mf2(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f32mf2((vuint64m1_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f32m1(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f32m1((vuint64m2_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f32m2(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f32m2((vuint64m4_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f32m4(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f32m4((vuint64m8_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i16mf4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16mf4((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i16mf2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16mf2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i16m1(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i16m2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i16m4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u16mf4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf4((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u16mf2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u16m1(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u16m2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u16m4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i16mf4(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i16mf4((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i16mf2(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i16mf2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i16m1(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i16m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i16m2(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i16m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i16m4(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i16m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u16mf4(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u16mf4((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u16mf2(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u16mf2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u16m1(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u16m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u16m2(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u16m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u16m4(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u16m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfrec7_v_f32m1(op0, op1) \ +__builtin_rvv_vfrec7_v_f32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfrec7_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfrec7_v_f32m2(op0, op1) \ +__builtin_rvv_vfrec7_v_f32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfrec7_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfrec7_v_f32m4(op0, op1) \ +__builtin_rvv_vfrec7_v_f32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfrec7_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfrec7_v_f32m8(op0, op1) \ +__builtin_rvv_vfrec7_v_f32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfrec7_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfrec7_v_f32mf2(op0, op1) \ +__builtin_rvv_vfrec7_v_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfrec7_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f32m1(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f32m2(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f32m4(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f32m8(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f32mf2(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f32m1(op0, op1) \ +__builtin_rvv_vfsqrt_v_f32m1((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f32m2(op0, op1) \ +__builtin_rvv_vfsqrt_v_f32m2((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f32m4(op0, op1) \ +__builtin_rvv_vfsqrt_v_f32m4((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f32m8(op0, op1) \ +__builtin_rvv_vfsqrt_v_f32m8((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f32mf2(op0, op1) \ +__builtin_rvv_vfsqrt_v_f32mf2((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f32mf2((vint16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f32m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f32m1((vint16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f32m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f32m2((vint16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f32m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f32m4((vint16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f32m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f32m8((vint16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f32mf2((vuint16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f32m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m1((vuint16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f32m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m2((vuint16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f32m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m4((vuint16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f32m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m8((vuint16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i64m1(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m1((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i64m2(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i64m4(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m4((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i64m8(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m8((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u64m1(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m1((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u64m2(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u64m4(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m4((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u64m8(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m8((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i64m1(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i64m1((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i64m2(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i64m2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i64m4(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i64m4((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i64m8(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i64m8((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u64m1(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m1((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u64m2(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u64m4(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m4((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u64m8(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m8((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vlmul_ext_v_f32mf2_f32m1(op0) \ +__builtin_rvv_vlmul_ext_v_f32mf2_f32m1((vfloat32mf2_t)(op0)) +#define vlmul_ext_v_f32m1_f32m2(op0) \ +__builtin_rvv_vlmul_ext_v_f32m1_f32m2((vfloat32m1_t)(op0)) +#define vlmul_ext_v_f32mf2_f32m2(op0) \ +__builtin_rvv_vlmul_ext_v_f32mf2_f32m2((vfloat32mf2_t)(op0)) +#define vlmul_ext_v_f32m1_f32m4(op0) \ +__builtin_rvv_vlmul_ext_v_f32m1_f32m4((vfloat32m1_t)(op0)) +#define vlmul_ext_v_f32m2_f32m4(op0) \ +__builtin_rvv_vlmul_ext_v_f32m2_f32m4((vfloat32m2_t)(op0)) +#define vlmul_ext_v_f32mf2_f32m4(op0) \ +__builtin_rvv_vlmul_ext_v_f32mf2_f32m4((vfloat32mf2_t)(op0)) +#define vlmul_ext_v_f32m1_f32m8(op0) \ +__builtin_rvv_vlmul_ext_v_f32m1_f32m8((vfloat32m1_t)(op0)) +#define vlmul_ext_v_f32m2_f32m8(op0) \ +__builtin_rvv_vlmul_ext_v_f32m2_f32m8((vfloat32m2_t)(op0)) +#define vlmul_ext_v_f32m4_f32m8(op0) \ +__builtin_rvv_vlmul_ext_v_f32m4_f32m8((vfloat32m4_t)(op0)) +#define vlmul_ext_v_f32mf2_f32m8(op0) \ +__builtin_rvv_vlmul_ext_v_f32mf2_f32m8((vfloat32mf2_t)(op0)) +#define vlmul_trunc_v_f32m1_f32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m1_f32mf2((vfloat32m1_t)(op0)) +#define vlmul_trunc_v_f32m2_f32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m2_f32mf2((vfloat32m2_t)(op0)) +#define vlmul_trunc_v_f32m4_f32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m4_f32mf2((vfloat32m4_t)(op0)) +#define vlmul_trunc_v_f32m8_f32mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m8_f32mf2((vfloat32m8_t)(op0)) +#define vlmul_trunc_v_f32m2_f32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m2_f32m1((vfloat32m2_t)(op0)) +#define vlmul_trunc_v_f32m4_f32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m4_f32m1((vfloat32m4_t)(op0)) +#define vlmul_trunc_v_f32m8_f32m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m8_f32m1((vfloat32m8_t)(op0)) +#define vlmul_trunc_v_f32m4_f32m2(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m4_f32m2((vfloat32m4_t)(op0)) +#define vlmul_trunc_v_f32m8_f32m2(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m8_f32m2((vfloat32m8_t)(op0)) +#define vlmul_trunc_v_f32m8_f32m4(op0) \ +__builtin_rvv_vlmul_trunc_v_f32m8_f32m4((vfloat32m8_t)(op0)) +#define vreinterpret_v_i32m1_f32m1(op0) \ +__builtin_rvv_vreinterpret_v_i32m1_f32m1((vint32m1_t)(op0)) +#define vreinterpret_v_i32m2_f32m2(op0) \ +__builtin_rvv_vreinterpret_v_i32m2_f32m2((vint32m2_t)(op0)) +#define vreinterpret_v_i32m4_f32m4(op0) \ +__builtin_rvv_vreinterpret_v_i32m4_f32m4((vint32m4_t)(op0)) +#define vreinterpret_v_i32m8_f32m8(op0) \ +__builtin_rvv_vreinterpret_v_i32m8_f32m8((vint32m8_t)(op0)) +#define vreinterpret_v_i32mf2_f32mf2(op0) \ +__builtin_rvv_vreinterpret_v_i32mf2_f32mf2((vint32mf2_t)(op0)) +#define vreinterpret_v_u32m1_f32m1(op0) \ +__builtin_rvv_vreinterpret_v_u32m1_f32m1((vuint32m1_t)(op0)) +#define vreinterpret_v_u32m2_f32m2(op0) \ +__builtin_rvv_vreinterpret_v_u32m2_f32m2((vuint32m2_t)(op0)) +#define vreinterpret_v_u32m4_f32m4(op0) \ +__builtin_rvv_vreinterpret_v_u32m4_f32m4((vuint32m4_t)(op0)) +#define vreinterpret_v_u32m8_f32m8(op0) \ +__builtin_rvv_vreinterpret_v_u32m8_f32m8((vuint32m8_t)(op0)) +#define vreinterpret_v_u32mf2_f32mf2(op0) \ +__builtin_rvv_vreinterpret_v_u32mf2_f32mf2((vuint32mf2_t)(op0)) +#define vreinterpret_v_f32m1_i32m1(op0) \ +__builtin_rvv_vreinterpret_v_f32m1_i32m1((vfloat32m1_t)(op0)) +#define vreinterpret_v_f32m2_i32m2(op0) \ +__builtin_rvv_vreinterpret_v_f32m2_i32m2((vfloat32m2_t)(op0)) +#define vreinterpret_v_f32m4_i32m4(op0) \ +__builtin_rvv_vreinterpret_v_f32m4_i32m4((vfloat32m4_t)(op0)) +#define vreinterpret_v_f32m8_i32m8(op0) \ +__builtin_rvv_vreinterpret_v_f32m8_i32m8((vfloat32m8_t)(op0)) +#define vreinterpret_v_f32mf2_i32mf2(op0) \ +__builtin_rvv_vreinterpret_v_f32mf2_i32mf2((vfloat32mf2_t)(op0)) +#define vreinterpret_v_f32m1_u32m1(op0) \ +__builtin_rvv_vreinterpret_v_f32m1_u32m1((vfloat32m1_t)(op0)) +#define vreinterpret_v_f32m2_u32m2(op0) \ +__builtin_rvv_vreinterpret_v_f32m2_u32m2((vfloat32m2_t)(op0)) +#define vreinterpret_v_f32m4_u32m4(op0) \ +__builtin_rvv_vreinterpret_v_f32m4_u32m4((vfloat32m4_t)(op0)) +#define vreinterpret_v_f32m8_u32m8(op0) \ +__builtin_rvv_vreinterpret_v_f32m8_u32m8((vfloat32m8_t)(op0)) +#define vreinterpret_v_f32mf2_u32mf2(op0) \ +__builtin_rvv_vreinterpret_v_f32mf2_u32mf2((vfloat32mf2_t)(op0)) +#define vundefined_f32m1() \ +__builtin_rvv_vundefined_f32m1() +#define vundefined_f32m2() \ +__builtin_rvv_vundefined_f32m2() +#define vundefined_f32m4() \ +__builtin_rvv_vundefined_f32m4() +#define vundefined_f32m8() \ +__builtin_rvv_vundefined_f32m8() +#define vundefined_f32mf2() \ +__builtin_rvv_vundefined_f32mf2() +#endif + +#if defined(__riscv_d) +#define vlse64_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vlse64_v_f64m1((const double *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vlse64_v_f64m2((const double *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vlse64_v_f64m4((const double *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse64_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vlse64_v_f64m8((const double *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse64_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vsse64_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsse64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsse64_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_f64m2((double *)(op0), (ptrdiff_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsse64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsse64_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_f64m4((double *)(op0), (ptrdiff_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsse64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsse64_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse64_v_f64m8((double *)(op0), (ptrdiff_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsse64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse64_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vluxei8_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f64m1((const double *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei8_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f64m2((const double *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f64m4((const double *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f64m8((const double *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f64m1((const double *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei16_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f64m2((const double *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f64m4((const double *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f64m8((const double *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f64m1((const double *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei32_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f64m2((const double *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f64m4((const double *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f64m8((const double *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f64m1((const double *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxei64_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f64m2((const double *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f64m4((const double *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f64m8((const double *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei8_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f64m1((const double *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei8_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f64m2((const double *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f64m4((const double *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f64m8((const double *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f64m1((const double *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei16_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f64m2((const double *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f64m4((const double *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f64m8((const double *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f64m1((const double *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei32_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f64m2((const double *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f64m4((const double *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f64m8((const double *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f64m1((const double *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei64_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f64m2((const double *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f64m4((const double *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f64m8((const double *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vmv_v_v_f64m1(op0, op1) \ +__builtin_rvv_vmv_v_v_f64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_f64m2(op0, op1) \ +__builtin_rvv_vmv_v_v_f64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_f64m4(op0, op1) \ +__builtin_rvv_vmv_v_v_f64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_f64m8(op0, op1) \ +__builtin_rvv_vmv_v_v_f64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfadd_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfadd_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfadd_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfadd_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfadd_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfadd_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfadd_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfadd_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfadd_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfadd_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfadd_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfadd_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfadd_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfadd_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfadd_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfadd_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsub_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfsub_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfsub_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfsub_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfsub_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfsub_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfsub_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfsub_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfsub_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsub_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsub_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsub_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsub_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsub_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsub_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsub_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrsub_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrsub_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrsub_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrsub_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrsub_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrsub_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrsub_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrsub_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmul_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfmul_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmul_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfmul_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmul_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfmul_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmul_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfmul_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmul_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmul_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmul_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmul_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmul_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmul_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmul_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmul_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfdiv_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfdiv_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfdiv_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfdiv_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfdiv_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfdiv_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfdiv_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfdiv_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfdiv_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrdiv_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrdiv_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrdiv_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrdiv_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrdiv_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrdiv_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfrdiv_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfrdiv_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmacc_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmin_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfmin_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmin_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfmin_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmin_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfmin_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmin_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfmin_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmin_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmin_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmin_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmin_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmin_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmin_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmin_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmin_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmax_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfmax_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmax_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfmax_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfmax_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfmax_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfmax_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfmax_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfmax_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmax_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmax_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmax_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmax_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmax_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfmax_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmax_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnj_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfsgnj_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnj_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnj_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnj_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnj_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnj_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnj_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnj_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfabs_v_f64m1(op0, op1) \ +__builtin_rvv_vfabs_v_f64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfabs_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfabs_v_f64m2(op0, op1) \ +__builtin_rvv_vfabs_v_f64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfabs_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfabs_v_f64m4(op0, op1) \ +__builtin_rvv_vfabs_v_f64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfabs_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfabs_v_f64m8(op0, op1) \ +__builtin_rvv_vfabs_v_f64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfabs_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vmfeq_vv_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f64m1_b64((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f64m2_b32((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f64m4_b16((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f64m8_b8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vmfeq_vf_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f64m1_b64((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfeq_vf_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfeq_vf_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f64m2_b32((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfeq_vf_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfeq_vf_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f64m4_b16((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfeq_vf_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfeq_vf_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f64m8_b8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfeq_vf_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfne_vv_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f64m1_b64((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vmfne_vv_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vmfne_vv_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f64m2_b32((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vmfne_vv_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vmfne_vv_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f64m4_b16((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vmfne_vv_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vmfne_vv_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f64m8_b8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vmfne_vv_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vmfne_vf_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f64m1_b64((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfne_vf_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfne_vf_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f64m2_b32((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfne_vf_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfne_vf_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f64m4_b16((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfne_vf_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfne_vf_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f64m8_b8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfne_vf_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vmflt_vv_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f64m1_b64((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vmflt_vv_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vmflt_vv_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f64m2_b32((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vmflt_vv_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vmflt_vv_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f64m4_b16((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vmflt_vv_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vmflt_vv_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f64m8_b8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vmflt_vv_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vmflt_vf_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f64m1_b64((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vmflt_vf_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vmflt_vf_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f64m2_b32((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vmflt_vf_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vmflt_vf_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f64m4_b16((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vmflt_vf_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vmflt_vf_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f64m8_b8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vmflt_vf_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfle_vv_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f64m1_b64((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vmfle_vv_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vmfle_vv_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f64m2_b32((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vmfle_vv_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vmfle_vv_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f64m4_b16((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vmfle_vv_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vmfle_vv_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f64m8_b8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vmfle_vv_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vmfle_vf_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f64m1_b64((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfle_vf_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfle_vf_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f64m2_b32((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfle_vf_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfle_vf_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f64m4_b16((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfle_vf_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfle_vf_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f64m8_b8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfle_vf_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfgt_vv_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f64m1_b64((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f64m2_b32((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f64m4_b16((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f64m8_b8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vmfgt_vf_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f64m1_b64((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfgt_vf_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfgt_vf_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f64m2_b32((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfgt_vf_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfgt_vf_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f64m4_b16((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfgt_vf_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfgt_vf_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f64m8_b8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfgt_vf_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfge_vv_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f64m1_b64((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vmfge_vv_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vmfge_vv_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f64m2_b32((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vmfge_vv_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vmfge_vv_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f64m4_b16((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vmfge_vv_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vmfge_vv_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f64m8_b8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vmfge_vv_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vmfge_vf_f64m1_b64(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f64m1_b64((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfge_vf_f64m1_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f64m1_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfge_vf_f64m2_b32(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f64m2_b32((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfge_vf_f64m2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f64m2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfge_vf_f64m4_b16(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f64m4_b16((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfge_vf_f64m4_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f64m4_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vmfge_vf_f64m8_b8(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f64m8_b8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vmfge_vf_f64m8_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f64m8_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vmerge_vvm_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f64m1((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f64m2((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f64m4((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f64m8((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfmerge_vfm_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f64m1((vbool64_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (size_t)(op3)) +#define vfmerge_vfm_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f64m2((vbool32_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (size_t)(op3)) +#define vfmerge_vfm_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f64m4((vbool16_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (size_t)(op3)) +#define vfmerge_vfm_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f64m8((vbool8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (size_t)(op3)) +#define vfmv_v_f_f64m1(op0, op1) \ +__builtin_rvv_vfmv_v_f_f64m1((double)(op0), (size_t)(op1)) +#define vfmv_v_f_f64m2(op0, op1) \ +__builtin_rvv_vfmv_v_f_f64m2((double)(op0), (size_t)(op1)) +#define vfmv_v_f_f64m4(op0, op1) \ +__builtin_rvv_vfmv_v_f_f64m4((double)(op0), (size_t)(op1)) +#define vfmv_v_f_f64m8(op0, op1) \ +__builtin_rvv_vfmv_v_f_f64m8((double)(op0), (size_t)(op1)) +#define vfredmax_vs_f64m1_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f64m1_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f64m1_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f64m1_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f64m2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f64m2_f64m1((vfloat64m1_t)(op0), (vfloat64m2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f64m2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f64m2_f64m1_m((vbool32_t)(op0), (vfloat64m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f64m4_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f64m4_f64m1((vfloat64m1_t)(op0), (vfloat64m4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f64m4_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f64m4_f64m1_m((vbool16_t)(op0), (vfloat64m1_t)(op1), (vfloat64m4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f64m8_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f64m8_f64m1((vfloat64m1_t)(op0), (vfloat64m8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f64m8_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f64m8_f64m1_m((vbool8_t)(op0), (vfloat64m1_t)(op1), (vfloat64m8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f64m1_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f64m1_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f64m1_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f64m1_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f64m2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f64m2_f64m1((vfloat64m1_t)(op0), (vfloat64m2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f64m2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f64m2_f64m1_m((vbool32_t)(op0), (vfloat64m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f64m4_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f64m4_f64m1((vfloat64m1_t)(op0), (vfloat64m4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f64m4_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f64m4_f64m1_m((vbool16_t)(op0), (vfloat64m1_t)(op1), (vfloat64m4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f64m8_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f64m8_f64m1((vfloat64m1_t)(op0), (vfloat64m8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f64m8_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f64m8_f64m1_m((vbool8_t)(op0), (vfloat64m1_t)(op1), (vfloat64m8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f64m1_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f64m1_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f64m1_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f64m1_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f64m2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f64m2_f64m1((vfloat64m1_t)(op0), (vfloat64m2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f64m2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f64m2_f64m1_m((vbool32_t)(op0), (vfloat64m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f64m4_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f64m4_f64m1((vfloat64m1_t)(op0), (vfloat64m4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f64m4_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f64m4_f64m1_m((vbool16_t)(op0), (vfloat64m1_t)(op1), (vfloat64m4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f64m8_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f64m8_f64m1((vfloat64m1_t)(op0), (vfloat64m8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f64m8_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f64m8_f64m1_m((vbool8_t)(op0), (vfloat64m1_t)(op1), (vfloat64m8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f64m1_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f64m1_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f64m1_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f64m1_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f64m2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f64m2_f64m1((vfloat64m1_t)(op0), (vfloat64m2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f64m2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f64m2_f64m1_m((vbool32_t)(op0), (vfloat64m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f64m4_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f64m4_f64m1((vfloat64m1_t)(op0), (vfloat64m4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f64m4_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f64m4_f64m1_m((vbool16_t)(op0), (vfloat64m1_t)(op1), (vfloat64m4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f64m8_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f64m8_f64m1((vfloat64m1_t)(op0), (vfloat64m8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f64m8_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f64m8_f64m1_m((vbool8_t)(op0), (vfloat64m1_t)(op1), (vfloat64m8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfmv_f_s_f64m1_f64(op0) \ +__builtin_rvv_vfmv_f_s_f64m1_f64((vfloat64m1_t)(op0)) +#define vfmv_f_s_f64m2_f64(op0) \ +__builtin_rvv_vfmv_f_s_f64m2_f64((vfloat64m2_t)(op0)) +#define vfmv_f_s_f64m4_f64(op0) \ +__builtin_rvv_vfmv_f_s_f64m4_f64((vfloat64m4_t)(op0)) +#define vfmv_f_s_f64m8_f64(op0) \ +__builtin_rvv_vfmv_f_s_f64m8_f64((vfloat64m8_t)(op0)) +#define vfmv_s_f_f64m1(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmv_s_f_f64m2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmv_s_f_f64m4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfmv_s_f_f64m8(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vslideup_vx_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vfslide1up_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1up_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1up_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1up_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1up_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1up_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1up_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1up_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1down_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1down_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1down_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1down_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1down_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1down_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (double)(op3), (size_t)(op4)) +#define vfslide1down_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2)) +#define vfslide1down_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (double)(op3), (size_t)(op4)) +#define vrgather_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f64m1((vfloat64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f64m2((vfloat64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f64m4((vfloat64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f64m8((vfloat64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vrgather_vx_f64m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f64m1((vfloat64m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f64m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f64m2((vfloat64m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f64m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f64m4((vfloat64m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f64m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f64m8((vfloat64m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f64m1((vfloat64m1_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f64m2((vfloat64m2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f64m4((vfloat64m4_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f64m8((vfloat64m8_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vcompress_vm_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f64m1((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f64m2((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f64m4((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f64m8((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vget_v_f64m2_f64m1(op0, op1) \ +__builtin_rvv_vget_v_f64m2_f64m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vget_v_f64m4_f64m1(op0, op1) \ +__builtin_rvv_vget_v_f64m4_f64m1((vfloat64m4_t)(op0), (size_t)(op1)) +#define vget_v_f64m8_f64m1(op0, op1) \ +__builtin_rvv_vget_v_f64m8_f64m1((vfloat64m8_t)(op0), (size_t)(op1)) +#define vget_v_f64m4_f64m2(op0, op1) \ +__builtin_rvv_vget_v_f64m4_f64m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vget_v_f64m8_f64m2(op0, op1) \ +__builtin_rvv_vget_v_f64m8_f64m2((vfloat64m8_t)(op0), (size_t)(op1)) +#define vget_v_f64m8_f64m4(op0, op1) \ +__builtin_rvv_vget_v_f64m8_f64m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vset_v_f64m1_f64m2(op0, op1, op2) \ +__builtin_rvv_vset_v_f64m1_f64m2((vfloat64m2_t)(op0), (size_t)(op1), (vfloat64m1_t)(op2)) +#define vset_v_f64m1_f64m4(op0, op1, op2) \ +__builtin_rvv_vset_v_f64m1_f64m4((vfloat64m4_t)(op0), (size_t)(op1), (vfloat64m1_t)(op2)) +#define vset_v_f64m2_f64m4(op0, op1, op2) \ +__builtin_rvv_vset_v_f64m2_f64m4((vfloat64m4_t)(op0), (size_t)(op1), (vfloat64m2_t)(op2)) +#define vset_v_f64m1_f64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_f64m1_f64m8((vfloat64m8_t)(op0), (size_t)(op1), (vfloat64m1_t)(op2)) +#define vset_v_f64m2_f64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_f64m2_f64m8((vfloat64m8_t)(op0), (size_t)(op1), (vfloat64m2_t)(op2)) +#define vset_v_f64m4_f64m8(op0, op1, op2) \ +__builtin_rvv_vset_v_f64m4_f64m8((vfloat64m8_t)(op0), (size_t)(op1), (vfloat64m4_t)(op2)) +#define vsuxei8_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f64m4((double *)(op0), (vuint8mf2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint8mf2_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f64m8((double *)(op0), (vuint8m1_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint8m1_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f64m4((double *)(op0), (vuint16m1_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint16m1_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f64m8((double *)(op0), (vuint16m2_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint16m2_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f64m4((double *)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint32m2_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f64m8((double *)(op0), (vuint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint32m4_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f64m4((double *)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f64m8((double *)(op0), (vuint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f64m4((double *)(op0), (vuint8mf2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint8mf2_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f64m8((double *)(op0), (vuint8m1_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint8m1_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f64m4((double *)(op0), (vuint16m1_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint16m1_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f64m8((double *)(op0), (vuint16m2_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint16m2_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f64m4((double *)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint32m2_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f64m8((double *)(op0), (vuint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint32m4_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f64m4((double *)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f64m8((double *)(op0), (vuint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vle64ff_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_f64m1((const double *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_f64m2((const double *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_f64m4((const double *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle64ff_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vle64ff_v_f64m8((const double *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle64ff_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle64ff_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vfneg_v_f64m1(op0, op1) \ +__builtin_rvv_vfneg_v_f64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfneg_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfneg_v_f64m2(op0, op1) \ +__builtin_rvv_vfneg_v_f64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfneg_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfneg_v_f64m4(op0, op1) \ +__builtin_rvv_vfneg_v_f64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfneg_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfneg_v_f64m8(op0, op1) \ +__builtin_rvv_vfneg_v_f64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfneg_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vle64_v_f64m1(op0, op1) \ +__builtin_rvv_vle64_v_f64m1((const double *)(op0), (size_t)(op1)) +#define vle64_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (const double *)(op2), (size_t)(op3)) +#define vle64_v_f64m2(op0, op1) \ +__builtin_rvv_vle64_v_f64m2((const double *)(op0), (size_t)(op1)) +#define vle64_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (const double *)(op2), (size_t)(op3)) +#define vle64_v_f64m4(op0, op1) \ +__builtin_rvv_vle64_v_f64m4((const double *)(op0), (size_t)(op1)) +#define vle64_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (const double *)(op2), (size_t)(op3)) +#define vle64_v_f64m8(op0, op1) \ +__builtin_rvv_vle64_v_f64m8((const double *)(op0), (size_t)(op1)) +#define vle64_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle64_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (const double *)(op2), (size_t)(op3)) +#define vse64_v_f64m1(op0, op1, op2) \ +__builtin_rvv_vse64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (size_t)(op2)) +#define vse64_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vse64_v_f64m2(op0, op1, op2) \ +__builtin_rvv_vse64_v_f64m2((double *)(op0), (vfloat64m2_t)(op1), (size_t)(op2)) +#define vse64_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vse64_v_f64m4(op0, op1, op2) \ +__builtin_rvv_vse64_v_f64m4((double *)(op0), (vfloat64m4_t)(op1), (size_t)(op2)) +#define vse64_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vse64_v_f64m8(op0, op1, op2) \ +__builtin_rvv_vse64_v_f64m8((double *)(op0), (vfloat64m8_t)(op1), (size_t)(op2)) +#define vse64_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse64_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfclass_v_u64m1(op0, op1) \ +__builtin_rvv_vfclass_v_u64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfclass_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfclass_v_u64m2(op0, op1) \ +__builtin_rvv_vfclass_v_u64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfclass_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfclass_v_u64m4(op0, op1) \ +__builtin_rvv_vfclass_v_u64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfclass_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfclass_v_u64m8(op0, op1) \ +__builtin_rvv_vfclass_v_u64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfclass_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f64m1(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f64m1((vint64m1_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f64m2(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f64m2((vint64m2_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f64m4(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f64m4((vint64m4_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f64m8(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f64m8((vint64m8_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f64m1(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f64m1((vuint64m1_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f64m2(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f64m2((vuint64m2_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f64m4(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f64m4((vuint64m4_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f64m8(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f64m8((vuint64m8_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i64m1(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i64m2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i64m4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i64m8(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u64m1(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u64m2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u64m4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u64m8(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i64m1(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i64m1_m((vbool64_t)(op0), (vint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i64m2(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i64m2_m((vbool32_t)(op0), (vint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i64m4(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i64m4_m((vbool16_t)(op0), (vint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i64m8(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i64m8_m((vbool8_t)(op0), (vint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u64m1(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u64m1_m((vbool64_t)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u64m2(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u64m2_m((vbool32_t)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u64m4(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u64m4_m((vbool16_t)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u64m8(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u64m8_m((vbool8_t)(op0), (vuint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i32mf2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32mf2((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i32m1(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i32m2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i32m4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u32mf2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32mf2((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u32m1(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u32m2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u32m4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i32mf2(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i32mf2((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i32m1(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i32m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i32m2(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i32m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i32m4(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i32m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u32mf2(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u32mf2((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u32m1(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u32m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u32m2(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u32m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u32m4(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u32m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfrec7_v_f64m1(op0, op1) \ +__builtin_rvv_vfrec7_v_f64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfrec7_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfrec7_v_f64m2(op0, op1) \ +__builtin_rvv_vfrec7_v_f64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfrec7_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfrec7_v_f64m4(op0, op1) \ +__builtin_rvv_vfrec7_v_f64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfrec7_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfrec7_v_f64m8(op0, op1) \ +__builtin_rvv_vfrec7_v_f64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfrec7_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f64m1(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f64m2(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f64m4(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f64m8(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f64m1(op0, op1) \ +__builtin_rvv_vfsqrt_v_f64m1((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f64m2(op0, op1) \ +__builtin_rvv_vfsqrt_v_f64m2((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f64m4(op0, op1) \ +__builtin_rvv_vfsqrt_v_f64m4((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f64m8(op0, op1) \ +__builtin_rvv_vfsqrt_v_f64m8((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f64m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f64m1((vint32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f64m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f64m2((vint32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f64m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f64m4((vint32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f64m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f64m8((vint32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f64m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m1((vuint32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f64m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m2((vuint32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f64m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m4((vuint32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f64m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m8((vuint32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vlmul_ext_v_f64m1_f64m2(op0) \ +__builtin_rvv_vlmul_ext_v_f64m1_f64m2((vfloat64m1_t)(op0)) +#define vlmul_ext_v_f64m1_f64m4(op0) \ +__builtin_rvv_vlmul_ext_v_f64m1_f64m4((vfloat64m1_t)(op0)) +#define vlmul_ext_v_f64m2_f64m4(op0) \ +__builtin_rvv_vlmul_ext_v_f64m2_f64m4((vfloat64m2_t)(op0)) +#define vlmul_ext_v_f64m1_f64m8(op0) \ +__builtin_rvv_vlmul_ext_v_f64m1_f64m8((vfloat64m1_t)(op0)) +#define vlmul_ext_v_f64m2_f64m8(op0) \ +__builtin_rvv_vlmul_ext_v_f64m2_f64m8((vfloat64m2_t)(op0)) +#define vlmul_ext_v_f64m4_f64m8(op0) \ +__builtin_rvv_vlmul_ext_v_f64m4_f64m8((vfloat64m4_t)(op0)) +#define vlmul_trunc_v_f64m2_f64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f64m2_f64m1((vfloat64m2_t)(op0)) +#define vlmul_trunc_v_f64m4_f64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f64m4_f64m1((vfloat64m4_t)(op0)) +#define vlmul_trunc_v_f64m8_f64m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f64m8_f64m1((vfloat64m8_t)(op0)) +#define vlmul_trunc_v_f64m4_f64m2(op0) \ +__builtin_rvv_vlmul_trunc_v_f64m4_f64m2((vfloat64m4_t)(op0)) +#define vlmul_trunc_v_f64m8_f64m2(op0) \ +__builtin_rvv_vlmul_trunc_v_f64m8_f64m2((vfloat64m8_t)(op0)) +#define vlmul_trunc_v_f64m8_f64m4(op0) \ +__builtin_rvv_vlmul_trunc_v_f64m8_f64m4((vfloat64m8_t)(op0)) +#define vreinterpret_v_i64m1_f64m1(op0) \ +__builtin_rvv_vreinterpret_v_i64m1_f64m1((vint64m1_t)(op0)) +#define vreinterpret_v_i64m2_f64m2(op0) \ +__builtin_rvv_vreinterpret_v_i64m2_f64m2((vint64m2_t)(op0)) +#define vreinterpret_v_i64m4_f64m4(op0) \ +__builtin_rvv_vreinterpret_v_i64m4_f64m4((vint64m4_t)(op0)) +#define vreinterpret_v_i64m8_f64m8(op0) \ +__builtin_rvv_vreinterpret_v_i64m8_f64m8((vint64m8_t)(op0)) +#define vreinterpret_v_u64m1_f64m1(op0) \ +__builtin_rvv_vreinterpret_v_u64m1_f64m1((vuint64m1_t)(op0)) +#define vreinterpret_v_u64m2_f64m2(op0) \ +__builtin_rvv_vreinterpret_v_u64m2_f64m2((vuint64m2_t)(op0)) +#define vreinterpret_v_u64m4_f64m4(op0) \ +__builtin_rvv_vreinterpret_v_u64m4_f64m4((vuint64m4_t)(op0)) +#define vreinterpret_v_u64m8_f64m8(op0) \ +__builtin_rvv_vreinterpret_v_u64m8_f64m8((vuint64m8_t)(op0)) +#define vreinterpret_v_f64m1_i64m1(op0) \ +__builtin_rvv_vreinterpret_v_f64m1_i64m1((vfloat64m1_t)(op0)) +#define vreinterpret_v_f64m2_i64m2(op0) \ +__builtin_rvv_vreinterpret_v_f64m2_i64m2((vfloat64m2_t)(op0)) +#define vreinterpret_v_f64m4_i64m4(op0) \ +__builtin_rvv_vreinterpret_v_f64m4_i64m4((vfloat64m4_t)(op0)) +#define vreinterpret_v_f64m8_i64m8(op0) \ +__builtin_rvv_vreinterpret_v_f64m8_i64m8((vfloat64m8_t)(op0)) +#define vreinterpret_v_f64m1_u64m1(op0) \ +__builtin_rvv_vreinterpret_v_f64m1_u64m1((vfloat64m1_t)(op0)) +#define vreinterpret_v_f64m2_u64m2(op0) \ +__builtin_rvv_vreinterpret_v_f64m2_u64m2((vfloat64m2_t)(op0)) +#define vreinterpret_v_f64m4_u64m4(op0) \ +__builtin_rvv_vreinterpret_v_f64m4_u64m4((vfloat64m4_t)(op0)) +#define vreinterpret_v_f64m8_u64m8(op0) \ +__builtin_rvv_vreinterpret_v_f64m8_u64m8((vfloat64m8_t)(op0)) +#define vundefined_f64m1() \ +__builtin_rvv_vundefined_f64m1() +#define vundefined_f64m2() \ +__builtin_rvv_vundefined_f64m2() +#define vundefined_f64m4() \ +__builtin_rvv_vundefined_f64m4() +#define vundefined_f64m8() \ +__builtin_rvv_vundefined_f64m8() +#endif + +#if defined(__riscv_f) && defined(__riscv_d) +#define vfwadd_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f64m1((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f64m2((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f64m4((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f64m8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwadd_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f64m1((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f64m2((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f64m4((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f64m8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f64m1((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f64m2((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f64m4((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f64m8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwsub_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f64m1((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f64m2((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f64m4((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f64m8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_wv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f64m2((vfloat64m2_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f64m4((vfloat64m4_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f64m8((vfloat64m8_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwadd_wf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f64m1((vfloat64m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_wf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_wf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f64m2((vfloat64m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_wf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_wf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f64m4((vfloat64m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_wf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwadd_wf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f64m8((vfloat64m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwadd_wf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_wv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f64m2((vfloat64m2_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f64m4((vfloat64m4_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f64m8((vfloat64m8_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwsub_wf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f64m1((vfloat64m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_wf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_wf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f64m2((vfloat64m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_wf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_wf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f64m4((vfloat64m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_wf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwsub_wf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f64m8((vfloat64m8_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwsub_wf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwmul_vv_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f64m1((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f64m2((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f64m4((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f64m8((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwmul_vf_f64m1(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f64m1((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwmul_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwmul_vf_f64m2(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f64m2((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwmul_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwmul_vf_f64m4(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f64m4((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwmul_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwmul_vf_f64m8(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f64m8((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2)) +#define vfwmul_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (float)(op3), (size_t)(op4)) +#define vfwmacc_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f64m2((vfloat64m2_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f64m4((vfloat64m4_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f64m8((vfloat64m8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f64m1((vfloat64m1_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f64m2((vfloat64m2_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f64m4((vfloat64m4_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f64m8((vfloat64m8_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f64m2((vfloat64m2_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f64m4((vfloat64m4_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f64m8((vfloat64m8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f64m1((vfloat64m1_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f64m2((vfloat64m2_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f64m4((vfloat64m4_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f64m8((vfloat64m8_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f64m2((vfloat64m2_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f64m4((vfloat64m4_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f64m8((vfloat64m8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f64m1((vfloat64m1_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f64m2((vfloat64m2_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f64m4((vfloat64m4_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f64m8((vfloat64m8_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f64m2((vfloat64m2_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f64m4((vfloat64m4_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f64m8((vfloat64m8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f64m1((vfloat64m1_t)(op0), (float)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (float)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f64m2((vfloat64m2_t)(op0), (float)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (float)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f64m4((vfloat64m4_t)(op0), (float)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (float)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f64m8((vfloat64m8_t)(op0), (float)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (float)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f32m1_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f32m1_f64m1((vfloat64m1_t)(op0), (vfloat32m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f32m1_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f32m1_f64m1_m((vbool32_t)(op0), (vfloat64m1_t)(op1), (vfloat32m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f32m2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f32m2_f64m1((vfloat64m1_t)(op0), (vfloat32m2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f32m2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f32m2_f64m1_m((vbool16_t)(op0), (vfloat64m1_t)(op1), (vfloat32m2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f32m4_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f32m4_f64m1((vfloat64m1_t)(op0), (vfloat32m4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f32m4_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f32m4_f64m1_m((vbool8_t)(op0), (vfloat64m1_t)(op1), (vfloat32m4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f32m8_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f32m8_f64m1((vfloat64m1_t)(op0), (vfloat32m8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f32m8_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f32m8_f64m1_m((vbool4_t)(op0), (vfloat64m1_t)(op1), (vfloat32m8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f32mf2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f32mf2_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f32mf2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f32mf2_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f32m1_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f32m1_f64m1((vfloat64m1_t)(op0), (vfloat32m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f32m1_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f32m1_f64m1_m((vbool32_t)(op0), (vfloat64m1_t)(op1), (vfloat32m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f32m2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f32m2_f64m1((vfloat64m1_t)(op0), (vfloat32m2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f32m2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f32m2_f64m1_m((vbool16_t)(op0), (vfloat64m1_t)(op1), (vfloat32m2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f32m4_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f32m4_f64m1((vfloat64m1_t)(op0), (vfloat32m4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f32m4_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f32m4_f64m1_m((vbool8_t)(op0), (vfloat64m1_t)(op1), (vfloat32m4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f32m8_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f32m8_f64m1((vfloat64m1_t)(op0), (vfloat32m8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f32m8_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f32m8_f64m1_m((vbool4_t)(op0), (vfloat64m1_t)(op1), (vfloat32m8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f32mf2_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f32mf2_f64m1((vfloat64m1_t)(op0), (vfloat32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f32mf2_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f32mf2_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vfncvt_f_f_w_f32mf2(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f32mf2((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f32m1(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f32m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f32m2(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f32m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f32m4(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f32m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f32mf2(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32mf2((vfloat64m1_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f32m1(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32m1((vfloat64m2_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f32m2(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32m2((vfloat64m4_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f32m4(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32m4((vfloat64m8_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f64m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f64m1((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f64m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f64m1_m((vbool64_t)(op0), (vfloat64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f64m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f64m2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f64m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f64m2_m((vbool32_t)(op0), (vfloat64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f64m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f64m4((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f64m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f64m4_m((vbool16_t)(op0), (vfloat64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f64m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f64m8((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f64m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f64m8_m((vbool8_t)(op0), (vfloat64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#endif + +#if defined(__riscv_zfh) +#define vsse16_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsse16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsse16_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_f16m2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsse16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsse16_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_f16m4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsse16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsse16_v_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_f16m8((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vsse16_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_f16m8_m((vbool2_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vsse16_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsse16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsse16_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsse16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsse16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsse16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f16m1((const _Float16 *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vluxei8_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxei8_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f16m2((const _Float16 *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vluxei8_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxei8_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f16m4((const _Float16 *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vluxei8_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxei8_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f16m8((const _Float16 *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vluxei8_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxei8_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f16mf2((const _Float16 *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vluxei8_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxei8_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei8_v_f16mf4((const _Float16 *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vluxei8_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei8_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxei16_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f16m1((const _Float16 *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vluxei16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxei16_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f16m2((const _Float16 *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vluxei16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxei16_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f16m4((const _Float16 *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vluxei16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxei16_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f16m8((const _Float16 *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vluxei16_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxei16_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f16mf2((const _Float16 *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vluxei16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxei16_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei16_v_f16mf4((const _Float16 *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vluxei16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei16_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxei32_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f16m1((const _Float16 *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vluxei32_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxei32_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f16m2((const _Float16 *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vluxei32_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxei32_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f16m4((const _Float16 *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vluxei32_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxei32_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f16mf2((const _Float16 *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vluxei32_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxei32_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei32_v_f16mf4((const _Float16 *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vluxei32_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei32_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxei64_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f16m1((const _Float16 *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vluxei64_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxei64_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f16m2((const _Float16 *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vluxei64_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxei64_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f16mf2((const _Float16 *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vluxei64_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxei64_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vluxei64_v_f16mf4((const _Float16 *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vluxei64_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxei64_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f16m1((const _Float16 *)(op0), (vuint8mf2_t)(op1), (size_t)(op2)) +#define vloxei8_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxei8_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f16m2((const _Float16 *)(op0), (vuint8m1_t)(op1), (size_t)(op2)) +#define vloxei8_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxei8_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f16m4((const _Float16 *)(op0), (vuint8m2_t)(op1), (size_t)(op2)) +#define vloxei8_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxei8_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f16m8((const _Float16 *)(op0), (vuint8m4_t)(op1), (size_t)(op2)) +#define vloxei8_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxei8_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f16mf2((const _Float16 *)(op0), (vuint8mf4_t)(op1), (size_t)(op2)) +#define vloxei8_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxei8_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei8_v_f16mf4((const _Float16 *)(op0), (vuint8mf8_t)(op1), (size_t)(op2)) +#define vloxei8_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei8_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxei16_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f16m1((const _Float16 *)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vloxei16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxei16_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f16m2((const _Float16 *)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vloxei16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxei16_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f16m4((const _Float16 *)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vloxei16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxei16_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f16m8((const _Float16 *)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vloxei16_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxei16_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f16mf2((const _Float16 *)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vloxei16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxei16_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei16_v_f16mf4((const _Float16 *)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vloxei16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei16_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxei32_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f16m1((const _Float16 *)(op0), (vuint32m2_t)(op1), (size_t)(op2)) +#define vloxei32_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxei32_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f16m2((const _Float16 *)(op0), (vuint32m4_t)(op1), (size_t)(op2)) +#define vloxei32_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxei32_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f16m4((const _Float16 *)(op0), (vuint32m8_t)(op1), (size_t)(op2)) +#define vloxei32_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxei32_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f16mf2((const _Float16 *)(op0), (vuint32m1_t)(op1), (size_t)(op2)) +#define vloxei32_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxei32_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei32_v_f16mf4((const _Float16 *)(op0), (vuint32mf2_t)(op1), (size_t)(op2)) +#define vloxei32_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei32_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxei64_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f16m1((const _Float16 *)(op0), (vuint64m4_t)(op1), (size_t)(op2)) +#define vloxei64_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxei64_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f16m2((const _Float16 *)(op0), (vuint64m8_t)(op1), (size_t)(op2)) +#define vloxei64_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxei64_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f16mf2((const _Float16 *)(op0), (vuint64m2_t)(op1), (size_t)(op2)) +#define vloxei64_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxei64_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vloxei64_v_f16mf4((const _Float16 *)(op0), (vuint64m1_t)(op1), (size_t)(op2)) +#define vloxei64_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxei64_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vmv_v_v_f16m1(op0, op1) \ +__builtin_rvv_vmv_v_v_f16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vmv_v_v_f16m2(op0, op1) \ +__builtin_rvv_vmv_v_v_f16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vmv_v_v_f16m4(op0, op1) \ +__builtin_rvv_vmv_v_v_f16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vmv_v_v_f16m8(op0, op1) \ +__builtin_rvv_vmv_v_v_f16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vmv_v_v_f16mf2(op0, op1) \ +__builtin_rvv_vmv_v_v_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vmv_v_v_f16mf4(op0, op1) \ +__builtin_rvv_vmv_v_v_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfadd_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfadd_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfadd_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfadd_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfadd_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfadd_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfadd_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfadd_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfadd_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfadd_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfadd_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfadd_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfadd_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfadd_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfadd_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfadd_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfadd_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfadd_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfadd_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfadd_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfadd_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfadd_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfadd_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfadd_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfadd_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfadd_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfadd_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsub_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfsub_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfsub_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfsub_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfsub_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfsub_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfsub_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfsub_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfsub_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfsub_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfsub_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsub_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfsub_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfsub_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsub_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsub_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsub_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsub_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsub_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsub_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsub_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsub_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsub_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsub_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsub_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsub_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsub_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrsub_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrsub_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrsub_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrsub_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrsub_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrsub_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrsub_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrsub_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrsub_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrsub_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrsub_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfrsub_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrsub_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrsub_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmul_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfmul_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmul_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfmul_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmul_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfmul_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmul_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfmul_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmul_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfmul_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmul_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmul_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfmul_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmul_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmul_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmul_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmul_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmul_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmul_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmul_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmul_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmul_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmul_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmul_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmul_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmul_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmul_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfdiv_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfdiv_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfdiv_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfdiv_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfdiv_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfdiv_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfdiv_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfdiv_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfdiv_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfdiv_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfdiv_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfdiv_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfdiv_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfdiv_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfdiv_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfdiv_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfdiv_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrdiv_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrdiv_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrdiv_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrdiv_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrdiv_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrdiv_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrdiv_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrdiv_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrdiv_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrdiv_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfrdiv_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfrdiv_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfrdiv_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfrdiv_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmacc_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmacc_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmacc_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmacc_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmacc_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmacc_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmacc_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmacc_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmacc_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmacc_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmacc_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmacc_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmacc_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmsac_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmsac_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmsac_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsac_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmsac_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsac_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmsac_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmsac_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmsac_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsac_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmsac_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsac_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmadd_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmadd_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmadd_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmadd_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmadd_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmadd_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmadd_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmadd_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmadd_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmadd_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmadd_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmadd_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmsub_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmsub_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmsub_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmsub_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmsub_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmsub_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmsub_vv_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmsub_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfnmsub_vf_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfnmsub_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfnmsub_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfnmsub_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmin_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfmin_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmin_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfmin_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmin_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfmin_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmin_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfmin_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmin_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfmin_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmin_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmin_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfmin_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmin_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmin_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmin_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmin_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmin_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmin_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmin_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmin_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmin_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmin_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmin_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmin_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmin_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmin_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmax_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfmax_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfmax_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfmax_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfmax_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfmax_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfmax_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfmax_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfmax_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfmax_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfmax_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmax_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfmax_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmax_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmax_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmax_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmax_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmax_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmax_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmax_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmax_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmax_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmax_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfmax_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmax_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmax_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfmax_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnj_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfsgnj_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfsgnj_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfsgnj_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnj_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnj_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnj_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnj_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnj_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnj_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnj_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnj_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnj_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnj_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsgnj_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnj_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnj_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfsgnjn_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfsgnjn_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjn_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsgnjn_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjn_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjn_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfsgnjx_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vv_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfsgnjx_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfsgnjx_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfsgnjx_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfsgnjx_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfsgnjx_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfabs_v_f16m1(op0, op1) \ +__builtin_rvv_vfabs_v_f16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfabs_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfabs_v_f16m2(op0, op1) \ +__builtin_rvv_vfabs_v_f16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfabs_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfabs_v_f16m4(op0, op1) \ +__builtin_rvv_vfabs_v_f16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfabs_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfabs_v_f16m8(op0, op1) \ +__builtin_rvv_vfabs_v_f16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfabs_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfabs_v_f16mf2(op0, op1) \ +__builtin_rvv_vfabs_v_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfabs_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfabs_v_f16mf4(op0, op1) \ +__builtin_rvv_vfabs_v_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfabs_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfabs_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vmfeq_vv_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f16m1_b16((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f16m2_b8((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f16m4_b4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f16m8_b2((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f16mf2_b32((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vmfeq_vv_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfeq_vv_f16mf4_b64((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vmfeq_vv_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vv_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vmfeq_vf_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f16m1_b16((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfeq_vf_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfeq_vf_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f16m2_b8((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfeq_vf_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfeq_vf_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f16m4_b4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfeq_vf_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfeq_vf_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f16m8_b2((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfeq_vf_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfeq_vf_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f16mf2_b32((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfeq_vf_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfeq_vf_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfeq_vf_f16mf4_b64((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfeq_vf_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfeq_vf_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfne_vv_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f16m1_b16((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vmfne_vv_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vmfne_vv_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f16m2_b8((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vmfne_vv_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vmfne_vv_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f16m4_b4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vmfne_vv_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vmfne_vv_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f16m8_b2((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vmfne_vv_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vmfne_vv_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f16mf2_b32((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vmfne_vv_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vmfne_vv_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfne_vv_f16mf4_b64((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vmfne_vv_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vv_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vmfne_vf_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f16m1_b16((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfne_vf_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfne_vf_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f16m2_b8((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfne_vf_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfne_vf_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f16m4_b4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfne_vf_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfne_vf_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f16m8_b2((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfne_vf_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfne_vf_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f16mf2_b32((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfne_vf_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfne_vf_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfne_vf_f16mf4_b64((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfne_vf_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfne_vf_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmflt_vv_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f16m1_b16((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vmflt_vv_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vmflt_vv_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f16m2_b8((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vmflt_vv_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vmflt_vv_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f16m4_b4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vmflt_vv_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vmflt_vv_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f16m8_b2((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vmflt_vv_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vmflt_vv_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f16mf2_b32((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vmflt_vv_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vmflt_vv_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmflt_vv_f16mf4_b64((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vmflt_vv_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vv_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vmflt_vf_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f16m1_b16((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmflt_vf_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmflt_vf_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f16m2_b8((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmflt_vf_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmflt_vf_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f16m4_b4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmflt_vf_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmflt_vf_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f16m8_b2((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmflt_vf_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmflt_vf_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f16mf2_b32((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmflt_vf_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmflt_vf_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmflt_vf_f16mf4_b64((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmflt_vf_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmflt_vf_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfle_vv_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f16m1_b16((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vmfle_vv_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vmfle_vv_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f16m2_b8((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vmfle_vv_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vmfle_vv_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f16m4_b4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vmfle_vv_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vmfle_vv_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f16m8_b2((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vmfle_vv_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vmfle_vv_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f16mf2_b32((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vmfle_vv_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vmfle_vv_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfle_vv_f16mf4_b64((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vmfle_vv_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vv_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vmfle_vf_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f16m1_b16((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfle_vf_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfle_vf_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f16m2_b8((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfle_vf_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfle_vf_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f16m4_b4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfle_vf_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfle_vf_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f16m8_b2((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfle_vf_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfle_vf_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f16mf2_b32((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfle_vf_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfle_vf_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfle_vf_f16mf4_b64((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfle_vf_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfle_vf_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfgt_vv_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f16m1_b16((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f16m2_b8((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f16m4_b4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f16m8_b2((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f16mf2_b32((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vmfgt_vv_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfgt_vv_f16mf4_b64((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vmfgt_vv_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vv_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vmfgt_vf_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f16m1_b16((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfgt_vf_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfgt_vf_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f16m2_b8((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfgt_vf_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfgt_vf_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f16m4_b4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfgt_vf_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfgt_vf_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f16m8_b2((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfgt_vf_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfgt_vf_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f16mf2_b32((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfgt_vf_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfgt_vf_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfgt_vf_f16mf4_b64((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfgt_vf_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfgt_vf_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfge_vv_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f16m1_b16((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vmfge_vv_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vmfge_vv_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f16m2_b8((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vmfge_vv_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vmfge_vv_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f16m4_b4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vmfge_vv_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vmfge_vv_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f16m8_b2((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vmfge_vv_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vmfge_vv_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f16mf2_b32((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vmfge_vv_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vmfge_vv_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfge_vv_f16mf4_b64((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vmfge_vv_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vv_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vmfge_vf_f16m1_b16(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f16m1_b16((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfge_vf_f16m1_b16_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f16m1_b16_m((vbool16_t)(op0), (vbool16_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfge_vf_f16m2_b8(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f16m2_b8((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfge_vf_f16m2_b8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f16m2_b8_m((vbool8_t)(op0), (vbool8_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfge_vf_f16m4_b4(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f16m4_b4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfge_vf_f16m4_b4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f16m4_b4_m((vbool4_t)(op0), (vbool4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfge_vf_f16m8_b2(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f16m8_b2((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfge_vf_f16m8_b2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f16m8_b2_m((vbool2_t)(op0), (vbool2_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfge_vf_f16mf2_b32(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f16mf2_b32((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfge_vf_f16mf2_b32_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f16mf2_b32_m((vbool32_t)(op0), (vbool32_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmfge_vf_f16mf4_b64(op0, op1, op2) \ +__builtin_rvv_vmfge_vf_f16mf4_b64((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vmfge_vf_f16mf4_b64_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vmfge_vf_f16mf4_b64_m((vbool64_t)(op0), (vbool64_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vmerge_vvm_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f16m1((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f16m2((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f16m4((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f16m8((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f16mf2((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vmerge_vvm_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vmerge_vvm_f16mf4((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfmerge_vfm_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f16m1((vbool16_t)(op0), (vfloat16m1_t)(op1), (_Float16)(op2), (size_t)(op3)) +#define vfmerge_vfm_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f16m2((vbool8_t)(op0), (vfloat16m2_t)(op1), (_Float16)(op2), (size_t)(op3)) +#define vfmerge_vfm_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f16m4((vbool4_t)(op0), (vfloat16m4_t)(op1), (_Float16)(op2), (size_t)(op3)) +#define vfmerge_vfm_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f16m8((vbool2_t)(op0), (vfloat16m8_t)(op1), (_Float16)(op2), (size_t)(op3)) +#define vfmerge_vfm_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f16mf2((vbool32_t)(op0), (vfloat16mf2_t)(op1), (_Float16)(op2), (size_t)(op3)) +#define vfmerge_vfm_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vfmerge_vfm_f16mf4((vbool64_t)(op0), (vfloat16mf4_t)(op1), (_Float16)(op2), (size_t)(op3)) +#define vfmv_v_f_f16m1(op0, op1) \ +__builtin_rvv_vfmv_v_f_f16m1((_Float16)(op0), (size_t)(op1)) +#define vfmv_v_f_f16m2(op0, op1) \ +__builtin_rvv_vfmv_v_f_f16m2((_Float16)(op0), (size_t)(op1)) +#define vfmv_v_f_f16m4(op0, op1) \ +__builtin_rvv_vfmv_v_f_f16m4((_Float16)(op0), (size_t)(op1)) +#define vfmv_v_f_f16m8(op0, op1) \ +__builtin_rvv_vfmv_v_f_f16m8((_Float16)(op0), (size_t)(op1)) +#define vfmv_v_f_f16mf2(op0, op1) \ +__builtin_rvv_vfmv_v_f_f16mf2((_Float16)(op0), (size_t)(op1)) +#define vfmv_v_f_f16mf4(op0, op1) \ +__builtin_rvv_vfmv_v_f_f16mf4((_Float16)(op0), (size_t)(op1)) +#define vfredmax_vs_f16m1_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f16m1_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f16m1_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f16m1_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f16m2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f16m2_f16m1((vfloat16m1_t)(op0), (vfloat16m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f16m2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f16m2_f16m1_m((vbool8_t)(op0), (vfloat16m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f16m4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f16m4_f16m1((vfloat16m1_t)(op0), (vfloat16m4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f16m4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f16m4_f16m1_m((vbool4_t)(op0), (vfloat16m1_t)(op1), (vfloat16m4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f16m8_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f16m8_f16m1((vfloat16m1_t)(op0), (vfloat16m8_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f16m8_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f16m8_f16m1_m((vbool2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m8_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f16mf2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f16mf2_f16m1((vfloat16m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f16mf2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f16mf2_f16m1_m((vbool32_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmax_vs_f16mf4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmax_vs_f16mf4_f16m1((vfloat16m1_t)(op0), (vfloat16mf4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmax_vs_f16mf4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmax_vs_f16mf4_f16m1_m((vbool64_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f16m1_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f16m1_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f16m1_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f16m1_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f16m2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f16m2_f16m1((vfloat16m1_t)(op0), (vfloat16m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f16m2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f16m2_f16m1_m((vbool8_t)(op0), (vfloat16m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f16m4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f16m4_f16m1((vfloat16m1_t)(op0), (vfloat16m4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f16m4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f16m4_f16m1_m((vbool4_t)(op0), (vfloat16m1_t)(op1), (vfloat16m4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f16m8_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f16m8_f16m1((vfloat16m1_t)(op0), (vfloat16m8_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f16m8_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f16m8_f16m1_m((vbool2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m8_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f16mf2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f16mf2_f16m1((vfloat16m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f16mf2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f16mf2_f16m1_m((vbool32_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredmin_vs_f16mf4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredmin_vs_f16mf4_f16m1((vfloat16m1_t)(op0), (vfloat16mf4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredmin_vs_f16mf4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredmin_vs_f16mf4_f16m1_m((vbool64_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f16m1_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f16m1_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f16m1_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f16m1_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f16m2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f16m2_f16m1((vfloat16m1_t)(op0), (vfloat16m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f16m2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f16m2_f16m1_m((vbool8_t)(op0), (vfloat16m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f16m4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f16m4_f16m1((vfloat16m1_t)(op0), (vfloat16m4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f16m4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f16m4_f16m1_m((vbool4_t)(op0), (vfloat16m1_t)(op1), (vfloat16m4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f16m8_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f16m8_f16m1((vfloat16m1_t)(op0), (vfloat16m8_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f16m8_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f16m8_f16m1_m((vbool2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m8_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f16mf2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f16mf2_f16m1((vfloat16m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f16mf2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f16mf2_f16m1_m((vbool32_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredsum_vs_f16mf4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredsum_vs_f16mf4_f16m1((vfloat16m1_t)(op0), (vfloat16mf4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredsum_vs_f16mf4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredsum_vs_f16mf4_f16m1_m((vbool64_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f16m1_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f16m1_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f16m1_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f16m1_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f16m2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f16m2_f16m1((vfloat16m1_t)(op0), (vfloat16m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f16m2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f16m2_f16m1_m((vbool8_t)(op0), (vfloat16m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f16m4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f16m4_f16m1((vfloat16m1_t)(op0), (vfloat16m4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f16m4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f16m4_f16m1_m((vbool4_t)(op0), (vfloat16m1_t)(op1), (vfloat16m4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f16m8_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f16m8_f16m1((vfloat16m1_t)(op0), (vfloat16m8_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f16m8_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f16m8_f16m1_m((vbool2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m8_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f16mf2_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f16mf2_f16m1((vfloat16m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f16mf2_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f16mf2_f16m1_m((vbool32_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfredosum_vs_f16mf4_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vfredosum_vs_f16mf4_f16m1((vfloat16m1_t)(op0), (vfloat16mf4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfredosum_vs_f16mf4_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfredosum_vs_f16mf4_f16m1_m((vbool64_t)(op0), (vfloat16m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f16m4((_Float16 *)(op0), (vuint8m2_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint8m2_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f16m8((_Float16 *)(op0), (vuint8m4_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f16m8_m((vbool2_t)(op0), (_Float16 *)(op1), (vuint8m4_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxei8_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsuxei8_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f16m4((_Float16 *)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f16m8((_Float16 *)(op0), (vuint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f16m8_m((vbool2_t)(op0), (_Float16 *)(op1), (vuint16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxei16_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsuxei16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfmv_f_s_f16m1_f16(op0) \ +__builtin_rvv_vfmv_f_s_f16m1_f16((vfloat16m1_t)(op0)) +#define vfmv_f_s_f16m2_f16(op0) \ +__builtin_rvv_vfmv_f_s_f16m2_f16((vfloat16m2_t)(op0)) +#define vfmv_f_s_f16m4_f16(op0) \ +__builtin_rvv_vfmv_f_s_f16m4_f16((vfloat16m4_t)(op0)) +#define vfmv_f_s_f16m8_f16(op0) \ +__builtin_rvv_vfmv_f_s_f16m8_f16((vfloat16m8_t)(op0)) +#define vfmv_f_s_f16mf2_f16(op0) \ +__builtin_rvv_vfmv_f_s_f16mf2_f16((vfloat16mf2_t)(op0)) +#define vfmv_f_s_f16mf4_f16(op0) \ +__builtin_rvv_vfmv_f_s_f16mf4_f16((vfloat16mf4_t)(op0)) +#define vfmv_s_f_f16m1(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmv_s_f_f16m2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmv_s_f_f16m4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmv_s_f_f16m8(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmv_s_f_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfmv_s_f_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfmv_s_f_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vslideup_vx_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslideup_vx_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslideup_vx_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslideup_vx_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslideup_vx_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f16m1((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f16m2((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f16m4((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f16m8((vfloat16m8_t)(op0), (vfloat16m8_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f16mf2((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vslidedown_vx_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vslidedown_vx_f16mf4((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2), (size_t)(op3)) +#define vslidedown_vx_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vslidedown_vx_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vfslide1up_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1up_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1up_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1up_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1up_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1up_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1up_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1up_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1up_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1up_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1up_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfslide1up_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1up_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1up_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vsuxei32_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f16m4((_Float16 *)(op0), (vuint32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint32m8_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxei32_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsuxei32_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfslide1down_vf_f16m1(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f16m1((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1down_vf_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1down_vf_f16m2(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f16m2((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1down_vf_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1down_vf_f16m4(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f16m4((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1down_vf_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1down_vf_f16m8(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f16m8((vfloat16m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1down_vf_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1down_vf_f16mf2(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f16mf2((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1down_vf_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfslide1down_vf_f16mf4(op0, op1, op2) \ +__builtin_rvv_vfslide1down_vf_f16mf4((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfslide1down_vf_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfslide1down_vf_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vrgather_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f16m1((vfloat16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgather_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgather_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f16m2((vfloat16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgather_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgather_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f16m4((vfloat16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgather_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgather_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f16m8((vfloat16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgather_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgather_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f16mf2((vfloat16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgather_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgather_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vv_f16mf4((vfloat16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgather_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vrgather_vx_f16m1(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f16m1((vfloat16m1_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f16m2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f16m2((vfloat16m2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f16m4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f16m4((vfloat16m4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f16m8(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f16m8((vfloat16m8_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f16mf2(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgather_vx_f16mf4(op0, op1, op2) \ +__builtin_rvv_vrgather_vx_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1), (size_t)(op2)) +#define vrgather_vx_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgather_vx_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f16m1(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f16m1((vfloat16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f16m2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f16m2((vfloat16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f16m4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f16m4((vfloat16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f16m8(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f16m8((vfloat16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f16mf2(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f16mf2((vfloat16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vrgatherei16_vv_f16mf4(op0, op1, op2) \ +__builtin_rvv_vrgatherei16_vv_f16mf4((vfloat16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2)) +#define vrgatherei16_vv_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vrgatherei16_vv_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vcompress_vm_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f16m1((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vcompress_vm_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f16m2((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vcompress_vm_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f16m4((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vcompress_vm_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f16m8((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vcompress_vm_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f16mf2((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vcompress_vm_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vcompress_vm_f16mf4((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxei64_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsuxei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsuxei64_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f16m4((_Float16 *)(op0), (vuint8m2_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint8m2_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f16m8((_Float16 *)(op0), (vuint8m4_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f16m8_m((vbool2_t)(op0), (_Float16 *)(op1), (vuint8m4_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxei8_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsoxei8_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f16m4((_Float16 *)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f16m8(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f16m8((_Float16 *)(op0), (vuint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f16m8_m((vbool2_t)(op0), (_Float16 *)(op1), (vuint16m8_t)(op2), (vfloat16m8_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxei16_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsoxei16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f16m4((_Float16 *)(op0), (vuint32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint32m8_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxei32_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsoxei32_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxei64_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsoxei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsoxei64_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vle16ff_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_f16m1((const _Float16 *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_f16m2((const _Float16 *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_f16m4((const _Float16 *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_f16m8((const _Float16 *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_f16mf2((const _Float16 *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vle16ff_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vle16ff_v_f16mf4((const _Float16 *)(op0), (size_t *)(op1), (size_t)(op2)) +#define vle16ff_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vle16ff_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vfneg_v_f16m1(op0, op1) \ +__builtin_rvv_vfneg_v_f16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfneg_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfneg_v_f16m2(op0, op1) \ +__builtin_rvv_vfneg_v_f16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfneg_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfneg_v_f16m4(op0, op1) \ +__builtin_rvv_vfneg_v_f16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfneg_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfneg_v_f16m8(op0, op1) \ +__builtin_rvv_vfneg_v_f16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfneg_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfneg_v_f16mf2(op0, op1) \ +__builtin_rvv_vfneg_v_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfneg_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfneg_v_f16mf4(op0, op1) \ +__builtin_rvv_vfneg_v_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfneg_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfneg_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vle16_v_f16m1(op0, op1) \ +__builtin_rvv_vle16_v_f16m1((const _Float16 *)(op0), (size_t)(op1)) +#define vle16_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vle16_v_f16m2(op0, op1) \ +__builtin_rvv_vle16_v_f16m2((const _Float16 *)(op0), (size_t)(op1)) +#define vle16_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vle16_v_f16m4(op0, op1) \ +__builtin_rvv_vle16_v_f16m4((const _Float16 *)(op0), (size_t)(op1)) +#define vle16_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vle16_v_f16m8(op0, op1) \ +__builtin_rvv_vle16_v_f16m8((const _Float16 *)(op0), (size_t)(op1)) +#define vle16_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vle16_v_f16mf2(op0, op1) \ +__builtin_rvv_vle16_v_f16mf2((const _Float16 *)(op0), (size_t)(op1)) +#define vle16_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vle16_v_f16mf4(op0, op1) \ +__builtin_rvv_vle16_v_f16mf4((const _Float16 *)(op0), (size_t)(op1)) +#define vle16_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vle16_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vse16_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vse16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vse16_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vse16_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vse16_v_f16m2((_Float16 *)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vse16_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vse16_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vse16_v_f16m4((_Float16 *)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vse16_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vse16_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vse16_v_f16m8((_Float16 *)(op0), (vfloat16m8_t)(op1), (size_t)(op2)) +#define vse16_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_f16m8_m((vbool2_t)(op0), (_Float16 *)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vse16_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vse16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vse16_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vse16_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vse16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vse16_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vse16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vlse16_v_f16m1(op0, op1, op2) \ +__builtin_rvv_vlse16_v_f16m1((const _Float16 *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_f16m2(op0, op1, op2) \ +__builtin_rvv_vlse16_v_f16m2((const _Float16 *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_f16m4(op0, op1, op2) \ +__builtin_rvv_vlse16_v_f16m4((const _Float16 *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_f16m8(op0, op1, op2) \ +__builtin_rvv_vlse16_v_f16m8((const _Float16 *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_f16m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_f16mf2(op0, op1, op2) \ +__builtin_rvv_vlse16_v_f16mf2((const _Float16 *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlse16_v_f16mf4(op0, op1, op2) \ +__builtin_rvv_vlse16_v_f16mf4((const _Float16 *)(op0), (ptrdiff_t)(op1), (size_t)(op2)) +#define vlse16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlse16_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vfclass_v_u16m1(op0, op1) \ +__builtin_rvv_vfclass_v_u16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfclass_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfclass_v_u16m2(op0, op1) \ +__builtin_rvv_vfclass_v_u16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfclass_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfclass_v_u16m4(op0, op1) \ +__builtin_rvv_vfclass_v_u16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfclass_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfclass_v_u16m8(op0, op1) \ +__builtin_rvv_vfclass_v_u16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfclass_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfclass_v_u16mf2(op0, op1) \ +__builtin_rvv_vfclass_v_u16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfclass_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfclass_v_u16mf4(op0, op1) \ +__builtin_rvv_vfclass_v_u16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfclass_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfclass_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f16m1(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f16m1((vint16m1_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f16m2(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f16m2((vint16m2_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f16m4(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f16m4((vint16m4_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f16m8(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f16m8((vint16m8_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vint16m8_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f16mf2(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f16mf2((vint16mf2_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vfcvt_f_x_v_f16mf4(op0, op1) \ +__builtin_rvv_vfcvt_f_x_v_f16mf4((vint16mf4_t)(op0), (size_t)(op1)) +#define vfcvt_f_x_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_x_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f16m1(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f16m1((vuint16m1_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f16m2(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f16m2((vuint16m2_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f16m4(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f16m4((vuint16m4_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f16m8(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f16m8((vuint16m8_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vuint16m8_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f16mf2(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f16mf2((vuint16mf2_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vfcvt_f_xu_v_f16mf4(op0, op1) \ +__builtin_rvv_vfcvt_f_xu_v_f16mf4((vuint16mf4_t)(op0), (size_t)(op1)) +#define vfcvt_f_xu_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_f_xu_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i16m1(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i16m2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i16m4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i16m8(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i16mf2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_x_f_v_i16mf4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_x_f_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_x_f_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u16m1(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u16m2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u16m4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u16m8(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u16mf2(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfcvt_rtz_xu_f_v_u16mf4(op0, op1) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfcvt_rtz_xu_f_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i16m1(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i16m1_m((vbool16_t)(op0), (vint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i16m2(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i16m2_m((vbool8_t)(op0), (vint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i16m4(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i16m4_m((vbool4_t)(op0), (vint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i16m8(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i16m8_m((vbool2_t)(op0), (vint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i16mf2(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i16mf2_m((vbool32_t)(op0), (vint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfcvt_x_f_v_i16mf4(op0, op1) \ +__builtin_rvv_vfcvt_x_f_v_i16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfcvt_x_f_v_i16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_x_f_v_i16mf4_m((vbool64_t)(op0), (vint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u16m1(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u16m1_m((vbool16_t)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u16m2(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u16m2_m((vbool8_t)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u16m4(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u16m4_m((vbool4_t)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u16m8(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u16m8_m((vbool2_t)(op0), (vuint16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u16mf2(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u16mf2_m((vbool32_t)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfcvt_xu_f_v_u16mf4(op0, op1) \ +__builtin_rvv_vfcvt_xu_f_v_u16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfcvt_xu_f_v_u16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfcvt_xu_f_v_u16mf4_m((vbool64_t)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f16mf4(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f16mf4((vint32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f16mf2(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f16mf2((vint32m1_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f16m1(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f16m1((vint32m2_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f16m2(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f16m2((vint32m4_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vfncvt_f_x_w_f16m4(op0, op1) \ +__builtin_rvv_vfncvt_f_x_w_f16m4((vint32m8_t)(op0), (size_t)(op1)) +#define vfncvt_f_x_w_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_x_w_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f16mf4(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f16mf4((vuint32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f16mf2(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f16mf2((vuint32m1_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f16m1(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f16m1((vuint32m2_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f16m2(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f16m2((vuint32m4_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vfncvt_f_xu_w_f16m4(op0, op1) \ +__builtin_rvv_vfncvt_f_xu_w_f16m4((vuint32m8_t)(op0), (size_t)(op1)) +#define vfncvt_f_xu_w_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_xu_w_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i8mf8(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8mf8((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i8mf4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8mf4((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i8mf2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8mf2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i8m1(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8m1((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i8m2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8m2((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_x_f_w_i8m4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8m4((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_x_f_w_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_x_f_w_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u8mf8(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf8((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u8mf4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf4((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u8mf2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u8m1(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8m1((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u8m2(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8m2((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfncvt_rtz_xu_f_w_u8m4(op0, op1) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8m4((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfncvt_rtz_xu_f_w_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rtz_xu_f_w_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i8mf8(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i8mf8((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i8mf8_m((vbool64_t)(op0), (vint8mf8_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i8mf4(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i8mf4((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i8mf4_m((vbool32_t)(op0), (vint8mf4_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i8mf2(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i8mf2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i8mf2_m((vbool16_t)(op0), (vint8mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i8m1(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i8m1((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i8m1_m((vbool8_t)(op0), (vint8m1_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i8m2(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i8m2((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i8m2_m((vbool4_t)(op0), (vint8m2_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfncvt_x_f_w_i8m4(op0, op1) \ +__builtin_rvv_vfncvt_x_f_w_i8m4((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfncvt_x_f_w_i8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_x_f_w_i8m4_m((vbool2_t)(op0), (vint8m4_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u8mf8(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u8mf8((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u8mf8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u8mf8_m((vbool64_t)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u8mf4(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u8mf4((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u8mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u8mf4_m((vbool32_t)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u8mf2(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u8mf2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u8mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u8mf2_m((vbool16_t)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u8m1(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u8m1((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u8m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u8m1_m((vbool8_t)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u8m2(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u8m2((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u8m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u8m2_m((vbool4_t)(op0), (vuint8m2_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfncvt_xu_f_w_u8m4(op0, op1) \ +__builtin_rvv_vfncvt_xu_f_w_u8m4((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfncvt_xu_f_w_u8m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_xu_f_w_u8m4_m((vbool2_t)(op0), (vuint8m4_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfrec7_v_f16m1(op0, op1) \ +__builtin_rvv_vfrec7_v_f16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfrec7_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfrec7_v_f16m2(op0, op1) \ +__builtin_rvv_vfrec7_v_f16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfrec7_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfrec7_v_f16m4(op0, op1) \ +__builtin_rvv_vfrec7_v_f16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfrec7_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfrec7_v_f16m8(op0, op1) \ +__builtin_rvv_vfrec7_v_f16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfrec7_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfrec7_v_f16mf2(op0, op1) \ +__builtin_rvv_vfrec7_v_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfrec7_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfrec7_v_f16mf4(op0, op1) \ +__builtin_rvv_vfrec7_v_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfrec7_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrec7_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f16m1(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f16m2(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f16m4(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f16m8(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f16mf2(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfrsqrt7_v_f16mf4(op0, op1) \ +__builtin_rvv_vfrsqrt7_v_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfrsqrt7_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfrsqrt7_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f16m1(op0, op1) \ +__builtin_rvv_vfsqrt_v_f16m1((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f16m2(op0, op1) \ +__builtin_rvv_vfsqrt_v_f16m2((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f16m4(op0, op1) \ +__builtin_rvv_vfsqrt_v_f16m4((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f16m8(op0, op1) \ +__builtin_rvv_vfsqrt_v_f16m8((vfloat16m8_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vfloat16m8_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f16mf2(op0, op1) \ +__builtin_rvv_vfsqrt_v_f16mf2((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfsqrt_v_f16mf4(op0, op1) \ +__builtin_rvv_vfsqrt_v_f16mf4((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfsqrt_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfsqrt_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f16mf4(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f16mf4((vint8mf8_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f16mf2(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f16mf2((vint8mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f16m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f16m1((vint8mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f16m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f16m2((vint8m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f16m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f16m4((vint8m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_x_v_f16m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_x_v_f16m8((vint8m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_x_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_x_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f16mf4(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f16mf4((vuint8mf8_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f16mf2(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f16mf2((vuint8mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f16m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m1((vuint8mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f16m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m2((vuint8m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f16m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m4((vuint8m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_xu_v_f16m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m8((vuint8m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_xu_v_f16m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_xu_v_f16m8_m((vbool2_t)(op0), (vfloat16m8_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32mf2((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i32m1(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m1((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i32m2(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i32m4(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m4((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_x_f_v_i32m8(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m8((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_x_f_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_x_f_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32mf2((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u32m1(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m1((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u32m2(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u32m4(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m4((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_rtz_xu_f_v_u32m8(op0, op1) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m8((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_rtz_xu_f_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i32mf2((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i32mf2_m((vbool64_t)(op0), (vint32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i32m1(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i32m1((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i32m1_m((vbool32_t)(op0), (vint32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i32m2(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i32m2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i32m2_m((vbool16_t)(op0), (vint32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i32m4(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i32m4((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i32m4_m((vbool8_t)(op0), (vint32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_x_f_v_i32m8(op0, op1) \ +__builtin_rvv_vfwcvt_x_f_v_i32m8((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_x_f_v_i32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_x_f_v_i32m8_m((vbool4_t)(op0), (vint32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u32mf2((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u32mf2_m((vbool64_t)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u32m1(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m1((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m1_m((vbool32_t)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u32m2(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m2_m((vbool16_t)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u32m4(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m4((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m4_m((vbool8_t)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_xu_f_v_u32m8(op0, op1) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m8((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_xu_f_v_u32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_xu_f_v_u32m8_m((vbool4_t)(op0), (vuint32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vlmul_ext_v_f16mf2_f16m1(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf2_f16m1((vfloat16mf2_t)(op0)) +#define vlmul_ext_v_f16mf4_f16m1(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf4_f16m1((vfloat16mf4_t)(op0)) +#define vlmul_ext_v_f16mf4_f16mf2(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf4_f16mf2((vfloat16mf4_t)(op0)) +#define vlmul_ext_v_f16m1_f16m2(op0) \ +__builtin_rvv_vlmul_ext_v_f16m1_f16m2((vfloat16m1_t)(op0)) +#define vlmul_ext_v_f16mf2_f16m2(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf2_f16m2((vfloat16mf2_t)(op0)) +#define vlmul_ext_v_f16mf4_f16m2(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf4_f16m2((vfloat16mf4_t)(op0)) +#define vlmul_ext_v_f16m1_f16m4(op0) \ +__builtin_rvv_vlmul_ext_v_f16m1_f16m4((vfloat16m1_t)(op0)) +#define vlmul_ext_v_f16m2_f16m4(op0) \ +__builtin_rvv_vlmul_ext_v_f16m2_f16m4((vfloat16m2_t)(op0)) +#define vlmul_ext_v_f16mf2_f16m4(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf2_f16m4((vfloat16mf2_t)(op0)) +#define vlmul_ext_v_f16mf4_f16m4(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf4_f16m4((vfloat16mf4_t)(op0)) +#define vlmul_ext_v_f16m1_f16m8(op0) \ +__builtin_rvv_vlmul_ext_v_f16m1_f16m8((vfloat16m1_t)(op0)) +#define vlmul_ext_v_f16m2_f16m8(op0) \ +__builtin_rvv_vlmul_ext_v_f16m2_f16m8((vfloat16m2_t)(op0)) +#define vlmul_ext_v_f16m4_f16m8(op0) \ +__builtin_rvv_vlmul_ext_v_f16m4_f16m8((vfloat16m4_t)(op0)) +#define vlmul_ext_v_f16mf2_f16m8(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf2_f16m8((vfloat16mf2_t)(op0)) +#define vlmul_ext_v_f16mf4_f16m8(op0) \ +__builtin_rvv_vlmul_ext_v_f16mf4_f16m8((vfloat16mf4_t)(op0)) +#define vlmul_trunc_v_f16m1_f16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m1_f16mf2((vfloat16m1_t)(op0)) +#define vlmul_trunc_v_f16m2_f16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m2_f16mf2((vfloat16m2_t)(op0)) +#define vlmul_trunc_v_f16m4_f16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m4_f16mf2((vfloat16m4_t)(op0)) +#define vlmul_trunc_v_f16m8_f16mf2(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m8_f16mf2((vfloat16m8_t)(op0)) +#define vlmul_trunc_v_f16m1_f16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m1_f16mf4((vfloat16m1_t)(op0)) +#define vlmul_trunc_v_f16m2_f16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m2_f16mf4((vfloat16m2_t)(op0)) +#define vlmul_trunc_v_f16m4_f16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m4_f16mf4((vfloat16m4_t)(op0)) +#define vlmul_trunc_v_f16m8_f16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m8_f16mf4((vfloat16m8_t)(op0)) +#define vlmul_trunc_v_f16mf2_f16mf4(op0) \ +__builtin_rvv_vlmul_trunc_v_f16mf2_f16mf4((vfloat16mf2_t)(op0)) +#define vlmul_trunc_v_f16m2_f16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m2_f16m1((vfloat16m2_t)(op0)) +#define vlmul_trunc_v_f16m4_f16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m4_f16m1((vfloat16m4_t)(op0)) +#define vlmul_trunc_v_f16m8_f16m1(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m8_f16m1((vfloat16m8_t)(op0)) +#define vlmul_trunc_v_f16m4_f16m2(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m4_f16m2((vfloat16m4_t)(op0)) +#define vlmul_trunc_v_f16m8_f16m2(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m8_f16m2((vfloat16m8_t)(op0)) +#define vlmul_trunc_v_f16m8_f16m4(op0) \ +__builtin_rvv_vlmul_trunc_v_f16m8_f16m4((vfloat16m8_t)(op0)) +#define vreinterpret_v_i16m1_f16m1(op0) \ +__builtin_rvv_vreinterpret_v_i16m1_f16m1((vint16m1_t)(op0)) +#define vreinterpret_v_i16m2_f16m2(op0) \ +__builtin_rvv_vreinterpret_v_i16m2_f16m2((vint16m2_t)(op0)) +#define vreinterpret_v_i16m4_f16m4(op0) \ +__builtin_rvv_vreinterpret_v_i16m4_f16m4((vint16m4_t)(op0)) +#define vreinterpret_v_i16m8_f16m8(op0) \ +__builtin_rvv_vreinterpret_v_i16m8_f16m8((vint16m8_t)(op0)) +#define vreinterpret_v_i16mf2_f16mf2(op0) \ +__builtin_rvv_vreinterpret_v_i16mf2_f16mf2((vint16mf2_t)(op0)) +#define vreinterpret_v_i16mf4_f16mf4(op0) \ +__builtin_rvv_vreinterpret_v_i16mf4_f16mf4((vint16mf4_t)(op0)) +#define vreinterpret_v_u16m1_f16m1(op0) \ +__builtin_rvv_vreinterpret_v_u16m1_f16m1((vuint16m1_t)(op0)) +#define vreinterpret_v_u16m2_f16m2(op0) \ +__builtin_rvv_vreinterpret_v_u16m2_f16m2((vuint16m2_t)(op0)) +#define vreinterpret_v_u16m4_f16m4(op0) \ +__builtin_rvv_vreinterpret_v_u16m4_f16m4((vuint16m4_t)(op0)) +#define vreinterpret_v_u16m8_f16m8(op0) \ +__builtin_rvv_vreinterpret_v_u16m8_f16m8((vuint16m8_t)(op0)) +#define vreinterpret_v_u16mf2_f16mf2(op0) \ +__builtin_rvv_vreinterpret_v_u16mf2_f16mf2((vuint16mf2_t)(op0)) +#define vreinterpret_v_u16mf4_f16mf4(op0) \ +__builtin_rvv_vreinterpret_v_u16mf4_f16mf4((vuint16mf4_t)(op0)) +#define vreinterpret_v_f16m1_i16m1(op0) \ +__builtin_rvv_vreinterpret_v_f16m1_i16m1((vfloat16m1_t)(op0)) +#define vreinterpret_v_f16m2_i16m2(op0) \ +__builtin_rvv_vreinterpret_v_f16m2_i16m2((vfloat16m2_t)(op0)) +#define vreinterpret_v_f16m4_i16m4(op0) \ +__builtin_rvv_vreinterpret_v_f16m4_i16m4((vfloat16m4_t)(op0)) +#define vreinterpret_v_f16m8_i16m8(op0) \ +__builtin_rvv_vreinterpret_v_f16m8_i16m8((vfloat16m8_t)(op0)) +#define vreinterpret_v_f16mf2_i16mf2(op0) \ +__builtin_rvv_vreinterpret_v_f16mf2_i16mf2((vfloat16mf2_t)(op0)) +#define vreinterpret_v_f16mf4_i16mf4(op0) \ +__builtin_rvv_vreinterpret_v_f16mf4_i16mf4((vfloat16mf4_t)(op0)) +#define vreinterpret_v_f16m1_u16m1(op0) \ +__builtin_rvv_vreinterpret_v_f16m1_u16m1((vfloat16m1_t)(op0)) +#define vreinterpret_v_f16m2_u16m2(op0) \ +__builtin_rvv_vreinterpret_v_f16m2_u16m2((vfloat16m2_t)(op0)) +#define vreinterpret_v_f16m4_u16m4(op0) \ +__builtin_rvv_vreinterpret_v_f16m4_u16m4((vfloat16m4_t)(op0)) +#define vreinterpret_v_f16m8_u16m8(op0) \ +__builtin_rvv_vreinterpret_v_f16m8_u16m8((vfloat16m8_t)(op0)) +#define vreinterpret_v_f16mf2_u16mf2(op0) \ +__builtin_rvv_vreinterpret_v_f16mf2_u16mf2((vfloat16mf2_t)(op0)) +#define vreinterpret_v_f16mf4_u16mf4(op0) \ +__builtin_rvv_vreinterpret_v_f16mf4_u16mf4((vfloat16mf4_t)(op0)) +#define vundefined_f16m1() \ +__builtin_rvv_vundefined_f16m1() +#define vundefined_f16m2() \ +__builtin_rvv_vundefined_f16m2() +#define vundefined_f16m4() \ +__builtin_rvv_vundefined_f16m4() +#define vundefined_f16m8() \ +__builtin_rvv_vundefined_f16m8() +#define vundefined_f16mf2() \ +__builtin_rvv_vundefined_f16mf2() +#define vundefined_f16mf4() \ +__builtin_rvv_vundefined_f16mf4() +#endif + +#if defined(__riscv_f) && defined(__riscv_zfh) +#define vfwadd_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f32mf2((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f32m1((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f32m2((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f32m4((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwadd_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_vv_f32m8((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfwadd_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwadd_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f32mf2((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f32m1((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f32m2((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f32m4((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_vf_f32m8((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f32mf2((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f32m1((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f32m2((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f32m4((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwsub_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_vv_f32m8((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfwsub_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwsub_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f32mf2((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f32m1((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f32m2((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f32m4((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_vf_f32m8((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_wv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f32mf2((vfloat32mf2_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f32m2((vfloat32m2_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f32m4((vfloat32m4_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwadd_wv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_wv_f32m8((vfloat32m8_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfwadd_wv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwadd_wf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f32mf2((vfloat32mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_wf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_wf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f32m1((vfloat32m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_wf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_wf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f32m2((vfloat32m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_wf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_wf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f32m4((vfloat32m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_wf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwadd_wf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwadd_wf_f32m8((vfloat32m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwadd_wf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwadd_wf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_wv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f32mf2((vfloat32mf2_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f32m2((vfloat32m2_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f32m4((vfloat32m4_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwsub_wv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_wv_f32m8((vfloat32m8_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfwsub_wv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwsub_wf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f32mf2((vfloat32mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_wf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_wf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f32m1((vfloat32m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_wf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_wf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f32m2((vfloat32m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_wf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_wf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f32m4((vfloat32m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_wf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwsub_wf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwsub_wf_f32m8((vfloat32m8_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwsub_wf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwsub_wf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwmul_vv_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f32mf2((vfloat16mf4_t)(op0), (vfloat16mf4_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f32m1((vfloat16mf2_t)(op0), (vfloat16mf2_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f32m2((vfloat16m1_t)(op0), (vfloat16m1_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f32m4((vfloat16m2_t)(op0), (vfloat16m2_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwmul_vv_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwmul_vv_f32m8((vfloat16m4_t)(op0), (vfloat16m4_t)(op1), (size_t)(op2)) +#define vfwmul_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwmul_vf_f32mf2(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f32mf2((vfloat16mf4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwmul_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwmul_vf_f32m1(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f32m1((vfloat16mf2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwmul_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwmul_vf_f32m2(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f32m2((vfloat16m1_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwmul_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwmul_vf_f32m4(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f32m4((vfloat16m2_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwmul_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwmul_vf_f32m8(op0, op1, op2) \ +__builtin_rvv_vfwmul_vf_f32m8((vfloat16m4_t)(op0), (_Float16)(op1), (size_t)(op2)) +#define vfwmul_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmul_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (_Float16)(op3), (size_t)(op4)) +#define vfwmacc_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f32m2((vfloat32m2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f32m4((vfloat32m4_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwmacc_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vv_f32m8((vfloat32m8_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwmacc_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f32mf2((vfloat32mf2_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f32m1((vfloat32m1_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f32m2((vfloat32m2_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f32m4((vfloat32m4_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwmacc_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmacc_vf_f32m8((vfloat32m8_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwmacc_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmacc_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f32m2((vfloat32m2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f32m4((vfloat32m4_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vv_f32m8((vfloat32m8_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwnmacc_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f32mf2((vfloat32mf2_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f32m1((vfloat32m1_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f32m2((vfloat32m2_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f32m4((vfloat32m4_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwnmacc_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmacc_vf_f32m8((vfloat32m8_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwnmacc_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmacc_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f32m2((vfloat32m2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f32m4((vfloat32m4_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwmsac_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vv_f32m8((vfloat32m8_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwmsac_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f32mf2((vfloat32mf2_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f32m1((vfloat32m1_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f32m2((vfloat32m2_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f32m4((vfloat32m4_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwmsac_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwmsac_vf_f32m8((vfloat32m8_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwmsac_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwmsac_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f32m2((vfloat32m2_t)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f32m4((vfloat32m4_t)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vv_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vv_f32m8((vfloat32m8_t)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwnmsac_vv_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vv_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f32mf2((vfloat32mf2_t)(op0), (_Float16)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (_Float16)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f32m1((vfloat32m1_t)(op0), (_Float16)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (_Float16)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f32m2((vfloat32m2_t)(op0), (_Float16)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (_Float16)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f32m4((vfloat32m4_t)(op0), (_Float16)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (_Float16)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vfwnmsac_vf_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vfwnmsac_vf_f32m8((vfloat32m8_t)(op0), (_Float16)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vfwnmsac_vf_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwnmsac_vf_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (_Float16)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f16m1_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f16m1_f32m1((vfloat32m1_t)(op0), (vfloat16m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f16m1_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f16m1_f32m1_m((vbool16_t)(op0), (vfloat32m1_t)(op1), (vfloat16m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f16m2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f16m2_f32m1((vfloat32m1_t)(op0), (vfloat16m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f16m2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f16m2_f32m1_m((vbool8_t)(op0), (vfloat32m1_t)(op1), (vfloat16m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f16m4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f16m4_f32m1((vfloat32m1_t)(op0), (vfloat16m4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f16m4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f16m4_f32m1_m((vbool4_t)(op0), (vfloat32m1_t)(op1), (vfloat16m4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f16m8_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f16m8_f32m1((vfloat32m1_t)(op0), (vfloat16m8_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f16m8_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f16m8_f32m1_m((vbool2_t)(op0), (vfloat32m1_t)(op1), (vfloat16m8_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f16mf2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f16mf2_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f16mf2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f16mf2_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredsum_vs_f16mf4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredsum_vs_f16mf4_f32m1((vfloat32m1_t)(op0), (vfloat16mf4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredsum_vs_f16mf4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredsum_vs_f16mf4_f32m1_m((vbool64_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f16m1_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f16m1_f32m1((vfloat32m1_t)(op0), (vfloat16m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f16m1_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f16m1_f32m1_m((vbool16_t)(op0), (vfloat32m1_t)(op1), (vfloat16m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f16m2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f16m2_f32m1((vfloat32m1_t)(op0), (vfloat16m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f16m2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f16m2_f32m1_m((vbool8_t)(op0), (vfloat32m1_t)(op1), (vfloat16m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f16m4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f16m4_f32m1((vfloat32m1_t)(op0), (vfloat16m4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f16m4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f16m4_f32m1_m((vbool4_t)(op0), (vfloat32m1_t)(op1), (vfloat16m4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f16m8_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f16m8_f32m1((vfloat32m1_t)(op0), (vfloat16m8_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f16m8_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f16m8_f32m1_m((vbool2_t)(op0), (vfloat32m1_t)(op1), (vfloat16m8_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f16mf2_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f16mf2_f32m1((vfloat32m1_t)(op0), (vfloat16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f16mf2_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f16mf2_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfwredosum_vs_f16mf4_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vfwredosum_vs_f16mf4_f32m1((vfloat32m1_t)(op0), (vfloat16mf4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfwredosum_vs_f16mf4_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vfwredosum_vs_f16mf4_f32m1_m((vbool64_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vfncvt_f_f_w_f16mf4(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f16mf4((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f16mf2(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f16mf2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f16m1(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f16m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f16m2(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f16m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfncvt_f_f_w_f16m4(op0, op1) \ +__builtin_rvv_vfncvt_f_f_w_f16m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfncvt_f_f_w_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_f_f_w_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f16mf4(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16mf4((vfloat32mf2_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f16mf4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16mf4_m((vbool64_t)(op0), (vfloat16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f16mf2(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16mf2((vfloat32m1_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f16mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16mf2_m((vbool32_t)(op0), (vfloat16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f16m1(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16m1((vfloat32m2_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f16m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16m1_m((vbool16_t)(op0), (vfloat16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f16m2(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16m2((vfloat32m4_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f16m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16m2_m((vbool8_t)(op0), (vfloat16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vfncvt_rod_f_f_w_f16m4(op0, op1) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16m4((vfloat32m8_t)(op0), (size_t)(op1)) +#define vfncvt_rod_f_f_w_f16m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfncvt_rod_f_f_w_f16m4_m((vbool4_t)(op0), (vfloat16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f32mf2(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f32mf2((vfloat16mf4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f32mf2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f32mf2_m((vbool64_t)(op0), (vfloat32mf2_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f32m1(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f32m1((vfloat16mf2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f32m1_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f32m1_m((vbool32_t)(op0), (vfloat32m1_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f32m2(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f32m2((vfloat16m1_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f32m2_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f32m2_m((vbool16_t)(op0), (vfloat32m2_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f32m4(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f32m4((vfloat16m2_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f32m4_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f32m4_m((vbool8_t)(op0), (vfloat32m4_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vfwcvt_f_f_v_f32m8(op0, op1) \ +__builtin_rvv_vfwcvt_f_f_v_f32m8((vfloat16m4_t)(op0), (size_t)(op1)) +#define vfwcvt_f_f_v_f32m8_m(op0, op1, op2, op3) \ +__builtin_rvv_vfwcvt_f_f_v_f32m8_m((vbool4_t)(op0), (vfloat32m8_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#endif + +#if defined(__riscv_zvamo) +#define vamoswapei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoaddei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoaddei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoaddei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoaddei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoxorei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoxorei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoxorei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoxorei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoandei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoandei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoandei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoandei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoandei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoandei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoandei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoandei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoandei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoandei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoandei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoandei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoandei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoandei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoandei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoandei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoandei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoandei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoorei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoorei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoorei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoorei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoorei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoorei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoorei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoorei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoorei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoorei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoorei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoorei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamoorei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamoorei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamoorei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoorei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamoorei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoorei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamominei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamominei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamominei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamominei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamominei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamominei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamominei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamominei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamominei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamominei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamominei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamominei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamominei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamominei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamominei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamominei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamominei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamominei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamominei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamominei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamominei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamominei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamominei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamominei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamominei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamominei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamominei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamominei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamominei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamominei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamominei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamominei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamominei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamominei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamominei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamominei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamominei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamominei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamominei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamominei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamominei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamominei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamominei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamominei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamominei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamominei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamominei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamominei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamominei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamominei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamominei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamominei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamominei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamominei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamominei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamominei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamominei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamominei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamominei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamominei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamominei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamominei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamominei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamominei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamominei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamominei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamominei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamominei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamominei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamominei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i32m8((int32_t *)(op0), (vuint16m4_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint16m4_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i32m8((int32_t *)(op0), (vuint32m8_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint32m8_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamomaxei8_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei8_v_i64m8((int64_t *)(op0), (vuint8m1_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamomaxei8_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei8_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint8m1_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamomaxei16_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei16_v_i64m8((int64_t *)(op0), (vuint16m2_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamomaxei16_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei16_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint16m2_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamomaxei32_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei32_v_i64m8((int64_t *)(op0), (vuint32m4_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamomaxei32_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei32_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint32m4_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vamomaxei64_v_i64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxei64_v_i64m8((int64_t *)(op0), (vuint64m8_t)(op1), (vint64m8_t)(op2), (size_t)(op3)) +#define vamomaxei64_v_i64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxei64_v_i64m8_m((vbool8_t)(op0), (int64_t *)(op1), (vuint64m8_t)(op2), (vint64m8_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamominuei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamominuei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamominuei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamominuei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamominuei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamominuei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamominuei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamominuei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamominuei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamominuei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u32m8((uint32_t *)(op0), (vuint16m4_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint16m4_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u32m8((uint32_t *)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint32m8_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamomaxuei8_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei8_v_u64m8((uint64_t *)(op0), (vuint8m1_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamomaxuei8_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei8_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint8m1_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamomaxuei16_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei16_v_u64m8((uint64_t *)(op0), (vuint16m2_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamomaxuei16_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei16_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint16m2_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamomaxuei32_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei32_v_u64m8((uint64_t *)(op0), (vuint32m4_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamomaxuei32_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei32_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint32m4_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vamomaxuei64_v_u64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamomaxuei64_v_u64m8((uint64_t *)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (size_t)(op3)) +#define vamomaxuei64_v_u64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamomaxuei64_v_u64m8_m((vbool8_t)(op0), (uint64_t *)(op1), (vuint64m8_t)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i32m8((int32_t *)(op0), (vuint8m2_t)(op1), (vint32m8_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i32m8_m((vbool4_t)(op0), (int32_t *)(op1), (vuint8m2_t)(op2), (vint32m8_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u32m8((uint32_t *)(op0), (vuint8m2_t)(op1), (vuint32m8_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u32m8_m((vbool4_t)(op0), (uint32_t *)(op1), (vuint8m2_t)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#endif + +#if defined(__riscv_f) && defined(__riscv_zvamo) +#define vamoswapei8_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f32m4((float *)(op0), (vuint8m1_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint8m1_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f32m8((float *)(op0), (vuint8m2_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint8m2_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f32m4((float *)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint16m2_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f32m8((float *)(op0), (vuint16m4_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint16m4_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f32m4((float *)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f32m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f32m8((float *)(op0), (vuint32m8_t)(op1), (vfloat32m8_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f32m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f32m8_m((vbool4_t)(op0), (float *)(op1), (vuint32m8_t)(op2), (vfloat32m8_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f32m4((float *)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint64m8_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#endif + +#if defined(__riscv_d) && defined(__riscv_zvamo) +#define vamoswapei8_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f64m4((double *)(op0), (vuint8mf2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint8mf2_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vamoswapei8_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei8_v_f64m8((double *)(op0), (vuint8m1_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vamoswapei8_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei8_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint8m1_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f64m4((double *)(op0), (vuint16m1_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint16m1_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vamoswapei16_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei16_v_f64m8((double *)(op0), (vuint16m2_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vamoswapei16_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei16_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint16m2_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f64m4((double *)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint32m2_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vamoswapei32_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei32_v_f64m8((double *)(op0), (vuint32m4_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vamoswapei32_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei32_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint32m4_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f64m4((double *)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vamoswapei64_v_f64m8(op0, op1, op2, op3) \ +__builtin_rvv_vamoswapei64_v_f64m8((double *)(op0), (vuint64m8_t)(op1), (vfloat64m8_t)(op2), (size_t)(op3)) +#define vamoswapei64_v_f64m8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vamoswapei64_v_f64m8_m((vbool8_t)(op0), (double *)(op1), (vuint64m8_t)(op2), (vfloat64m8_t)(op3), (size_t)(op4)) +#endif + +#if defined(__riscv_zvlsseg) +#define vloxseg2ei8_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vsseg3e8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_i8m2((int8_t *)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsseg3e8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsseg3e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_i8m2((int8_t *)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsseg4e8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsseg4e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsseg5e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsseg5e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsseg6e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsseg6e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsseg7e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsseg7e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsseg8e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsseg8e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsseg2e16_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_i16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_i16m2((int16_t *)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_i16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_i16m4((int16_t *)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_i16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_i16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_i16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_u16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_u16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_u16m4((uint16_t *)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_u16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_u16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_u16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_i16m2((int16_t *)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_i16m2((int16_t *)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_i16m1((int16_t *)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_i16mf2((int16_t *)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_i16mf4((int16_t *)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsseg2e32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_i32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_i32m2((int32_t *)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_i32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_i32m4((int32_t *)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_i32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_i32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_u32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_u32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_u32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_u32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_i32m2((int32_t *)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_i32m2((int32_t *)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsseg5e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsseg5e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsseg5e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsseg6e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsseg6e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsseg6e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsseg7e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsseg7e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsseg7e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e32_v_i32m1((int32_t *)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsseg8e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e32_v_i32mf2((int32_t *)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsseg8e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsseg8e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsseg2e64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_i64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_i64m2((int64_t *)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_i64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_i64m4((int64_t *)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_i64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_u64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_u64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_u64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsseg3e64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e64_v_i64m2((int64_t *)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsseg3e64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsseg3e64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsseg4e64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e64_v_i64m2((int64_t *)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsseg4e64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsseg4e64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsseg5e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsseg5e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsseg5e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsseg5e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsseg6e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsseg6e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsseg6e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsseg6e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsseg7e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsseg7e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsseg7e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsseg7e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsseg8e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e64_v_i64m1((int64_t *)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsseg8e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsseg8e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsseg8e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vssseg3e8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_i8m2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_u8m2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vssseg3e8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vssseg3e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_i8m2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_u8m2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vssseg4e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vssseg4e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vssseg5e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vssseg5e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vssseg6e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vssseg6e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vssseg7e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vssseg7e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vssseg8e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vssseg8e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vssseg2e16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_i16m2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_i16m4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_u16m2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_u16m4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_i16m2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_u16m2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_i16m2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_u16m2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_i16m1((int16_t *)(op0), (ptrdiff_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_i16mf2((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_i16mf4((int16_t *)(op0), (ptrdiff_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (ptrdiff_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_u16m1((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_u16mf2((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_u16mf4((uint16_t *)(op0), (ptrdiff_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (ptrdiff_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vssseg2e32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_i32m2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_i32m4((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_u32m2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_u32m4((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_i32m2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_u32m2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_i32m2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_u32m2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vssseg5e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vssseg5e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vssseg5e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vssseg6e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vssseg6e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vssseg6e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vssseg7e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vssseg7e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vssseg7e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e32_v_i32m1((int32_t *)(op0), (ptrdiff_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vssseg8e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e32_v_i32mf2((int32_t *)(op0), (ptrdiff_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (ptrdiff_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vssseg8e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e32_v_u32m1((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vssseg8e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e32_v_u32mf2((uint32_t *)(op0), (ptrdiff_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (ptrdiff_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vssseg2e64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_i64m2((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_i64m4((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_u64m2((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_u64m4((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vssseg3e64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e64_v_i64m2((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vssseg3e64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vssseg3e64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e64_v_u64m2((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vssseg4e64_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e64_v_i64m2((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vssseg4e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vssseg4e64_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e64_v_u64m2((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vssseg5e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vssseg5e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vssseg5e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vssseg5e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vssseg6e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vssseg6e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vssseg6e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vssseg6e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vssseg7e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vssseg7e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vssseg7e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vssseg7e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vssseg8e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e64_v_i64m1((int64_t *)(op0), (ptrdiff_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vssseg8e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (ptrdiff_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vssseg8e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e64_v_u64m1((uint64_t *)(op0), (ptrdiff_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vssseg8e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (ptrdiff_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg3ei8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i8m4((int8_t *)(op0), (vuint16m8_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint16m8_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u8m4((uint8_t *)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i16m4((int16_t *)(op0), (vuint8m2_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint8m2_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u16m4((uint16_t *)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint8m2_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i16m4((int16_t *)(op0), (vuint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint16m4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u16m4((uint16_t *)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i16m4((int16_t *)(op0), (vuint32m8_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint32m8_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u16m4((uint16_t *)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i8m4((int8_t *)(op0), (vuint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint8m4_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u8m4((uint8_t *)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i8m4((int8_t *)(op0), (vuint16m8_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint16m8_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u8m4((uint8_t *)(op0), (vuint16m8_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint16m8_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i8m2((int8_t *)(op0), (vuint16m4_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint16m4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u8m2((uint8_t *)(op0), (vuint16m4_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint16m4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i8m1((int8_t *)(op0), (vuint16m2_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint16m2_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i8mf2((int8_t *)(op0), (vuint16m1_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint16m1_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i8mf4((int8_t *)(op0), (vuint16mf2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint16mf2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i8mf8((int8_t *)(op0), (vuint16mf4_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint16mf4_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u8m1((uint8_t *)(op0), (vuint16m2_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint16m2_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u8mf2((uint8_t *)(op0), (vuint16m1_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint16m1_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u8mf4((uint8_t *)(op0), (vuint16mf2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint16mf2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u8mf8((uint8_t *)(op0), (vuint16mf4_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint16mf4_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i8m2((int8_t *)(op0), (vuint32m8_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint32m8_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u8m2((uint8_t *)(op0), (vuint32m8_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint32m8_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i8m1((int8_t *)(op0), (vuint32m4_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint32m4_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i8mf2((int8_t *)(op0), (vuint32m2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint32m2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i8mf4((int8_t *)(op0), (vuint32m1_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint32m1_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i8mf8((int8_t *)(op0), (vuint32mf2_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint32mf2_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u8m1((uint8_t *)(op0), (vuint32m4_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint32m4_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u8mf2((uint8_t *)(op0), (vuint32m2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint32m2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u8mf4((uint8_t *)(op0), (vuint32m1_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint32m1_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u8mf8((uint8_t *)(op0), (vuint32mf2_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint32mf2_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i8m1((int8_t *)(op0), (vuint64m8_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint64m8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i8mf2((int8_t *)(op0), (vuint64m4_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint64m4_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i8mf4((int8_t *)(op0), (vuint64m2_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint64m2_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i8mf8((int8_t *)(op0), (vuint64m1_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint64m1_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u8m1((uint8_t *)(op0), (vuint64m8_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint64m8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u8mf2((uint8_t *)(op0), (vuint64m4_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint64m4_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u8mf4((uint8_t *)(op0), (vuint64m2_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint64m2_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u8mf8((uint8_t *)(op0), (vuint64m1_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint64m1_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i16m4((int16_t *)(op0), (vuint8m2_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint8m2_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u16m4((uint16_t *)(op0), (vuint8m2_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint8m2_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i16m2((int16_t *)(op0), (vuint8m1_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint8m1_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u16m2((uint16_t *)(op0), (vuint8m1_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint8m1_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i16m1((int16_t *)(op0), (vuint8mf2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint8mf2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i16mf2((int16_t *)(op0), (vuint8mf4_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint8mf4_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i16mf4((int16_t *)(op0), (vuint8mf8_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint8mf8_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u16m1((uint16_t *)(op0), (vuint8mf2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint8mf2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u16mf2((uint16_t *)(op0), (vuint8mf4_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint8mf4_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u16mf4((uint16_t *)(op0), (vuint8mf8_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint8mf8_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i16m4((int16_t *)(op0), (vuint16m4_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint16m4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u16m4((uint16_t *)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i16m2((int16_t *)(op0), (vuint16m2_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u16m2((uint16_t *)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i16m1((int16_t *)(op0), (vuint16m1_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i16mf2((int16_t *)(op0), (vuint16mf2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i16mf4((int16_t *)(op0), (vuint16mf4_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u16m1((uint16_t *)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u16mf2((uint16_t *)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u16mf4((uint16_t *)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i16m4((int16_t *)(op0), (vuint32m8_t)(op1), (vint16m4_t)(op2), (vint16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i16m4_m((vbool4_t)(op0), (int16_t *)(op1), (vuint32m8_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u16m4((uint16_t *)(op0), (vuint32m8_t)(op1), (vuint16m4_t)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u16m4_m((vbool4_t)(op0), (uint16_t *)(op1), (vuint32m8_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i16m2((int16_t *)(op0), (vuint32m4_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint32m4_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u16m2((uint16_t *)(op0), (vuint32m4_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint32m4_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i16m1((int16_t *)(op0), (vuint32m2_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint32m2_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i16mf2((int16_t *)(op0), (vuint32m1_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint32m1_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i16mf4((int16_t *)(op0), (vuint32mf2_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint32mf2_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u16m1((uint16_t *)(op0), (vuint32m2_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint32m2_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u16mf2((uint16_t *)(op0), (vuint32m1_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint32m1_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u16mf4((uint16_t *)(op0), (vuint32mf2_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint32mf2_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i16m2((int16_t *)(op0), (vuint64m8_t)(op1), (vint16m2_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i16m2_m((vbool8_t)(op0), (int16_t *)(op1), (vuint64m8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u16m2((uint16_t *)(op0), (vuint64m8_t)(op1), (vuint16m2_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u16m2_m((vbool8_t)(op0), (uint16_t *)(op1), (vuint64m8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i16m1((int16_t *)(op0), (vuint64m4_t)(op1), (vint16m1_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i16m1_m((vbool16_t)(op0), (int16_t *)(op1), (vuint64m4_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i16mf2((int16_t *)(op0), (vuint64m2_t)(op1), (vint16mf2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i16mf2_m((vbool32_t)(op0), (int16_t *)(op1), (vuint64m2_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i16mf4((int16_t *)(op0), (vuint64m1_t)(op1), (vint16mf4_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i16mf4_m((vbool64_t)(op0), (int16_t *)(op1), (vuint64m1_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u16m1((uint16_t *)(op0), (vuint64m4_t)(op1), (vuint16m1_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u16m1_m((vbool16_t)(op0), (uint16_t *)(op1), (vuint64m4_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u16mf2((uint16_t *)(op0), (vuint64m2_t)(op1), (vuint16mf2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u16mf2_m((vbool32_t)(op0), (uint16_t *)(op1), (vuint64m2_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u16mf4((uint16_t *)(op0), (vuint64m1_t)(op1), (vuint16mf4_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u16mf4_m((vbool64_t)(op0), (uint16_t *)(op1), (vuint64m1_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i32m4((int32_t *)(op0), (vuint8m1_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint8m1_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u32m4((uint32_t *)(op0), (vuint8m1_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint8m1_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i32m2((int32_t *)(op0), (vuint8mf2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint8mf2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u32m2((uint32_t *)(op0), (vuint8mf2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint8mf2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i32m1((int32_t *)(op0), (vuint8mf4_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint8mf4_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i32mf2((int32_t *)(op0), (vuint8mf8_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint8mf8_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u32m1((uint32_t *)(op0), (vuint8mf4_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint8mf4_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u32mf2((uint32_t *)(op0), (vuint8mf8_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint8mf8_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i32m4((int32_t *)(op0), (vuint16m2_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint16m2_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u32m4((uint32_t *)(op0), (vuint16m2_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint16m2_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i32m2((int32_t *)(op0), (vuint16m1_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint16m1_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u32m2((uint32_t *)(op0), (vuint16m1_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint16m1_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i32m1((int32_t *)(op0), (vuint16mf2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint16mf2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i32mf2((int32_t *)(op0), (vuint16mf4_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint16mf4_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u32m1((uint32_t *)(op0), (vuint16mf2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint16mf2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u32mf2((uint32_t *)(op0), (vuint16mf4_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint16mf4_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i32m4((int32_t *)(op0), (vuint32m4_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint32m4_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u32m4((uint32_t *)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i32m2((int32_t *)(op0), (vuint32m2_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u32m2((uint32_t *)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i32m1((int32_t *)(op0), (vuint32m1_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i32mf2((int32_t *)(op0), (vuint32mf2_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u32m1((uint32_t *)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u32mf2((uint32_t *)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i32m4((int32_t *)(op0), (vuint64m8_t)(op1), (vint32m4_t)(op2), (vint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i32m4_m((vbool8_t)(op0), (int32_t *)(op1), (vuint64m8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u32m4((uint32_t *)(op0), (vuint64m8_t)(op1), (vuint32m4_t)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u32m4_m((vbool8_t)(op0), (uint32_t *)(op1), (vuint64m8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i32m2((int32_t *)(op0), (vuint64m4_t)(op1), (vint32m2_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i32m2_m((vbool16_t)(op0), (int32_t *)(op1), (vuint64m4_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u32m2((uint32_t *)(op0), (vuint64m4_t)(op1), (vuint32m2_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u32m2_m((vbool16_t)(op0), (uint32_t *)(op1), (vuint64m4_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i32m1((int32_t *)(op0), (vuint64m2_t)(op1), (vint32m1_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i32m1_m((vbool32_t)(op0), (int32_t *)(op1), (vuint64m2_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i32mf2((int32_t *)(op0), (vuint64m1_t)(op1), (vint32mf2_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i32mf2_m((vbool64_t)(op0), (int32_t *)(op1), (vuint64m1_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u32m1((uint32_t *)(op0), (vuint64m2_t)(op1), (vuint32m1_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u32m1_m((vbool32_t)(op0), (uint32_t *)(op1), (vuint64m2_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u32mf2((uint32_t *)(op0), (vuint64m1_t)(op1), (vuint32mf2_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u32mf2_m((vbool64_t)(op0), (uint32_t *)(op1), (vuint64m1_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_i64m4((int64_t *)(op0), (vuint8mf2_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint8mf2_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_u64m4((uint64_t *)(op0), (vuint8mf2_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint8mf2_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_i64m2((int64_t *)(op0), (vuint8mf4_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint8mf4_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_u64m2((uint64_t *)(op0), (vuint8mf4_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint8mf4_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_i64m1((int64_t *)(op0), (vuint8mf8_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint8mf8_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_u64m1((uint64_t *)(op0), (vuint8mf8_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint8mf8_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_i64m4((int64_t *)(op0), (vuint16m1_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint16m1_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_u64m4((uint64_t *)(op0), (vuint16m1_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint16m1_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_i64m2((int64_t *)(op0), (vuint16mf2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint16mf2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_u64m2((uint64_t *)(op0), (vuint16mf2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint16mf2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_i64m1((int64_t *)(op0), (vuint16mf4_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint16mf4_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_u64m1((uint64_t *)(op0), (vuint16mf4_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint16mf4_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_i64m4((int64_t *)(op0), (vuint32m2_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint32m2_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_u64m4((uint64_t *)(op0), (vuint32m2_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint32m2_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_i64m2((int64_t *)(op0), (vuint32m1_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint32m1_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_u64m2((uint64_t *)(op0), (vuint32m1_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint32m1_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_i64m1((int64_t *)(op0), (vuint32mf2_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint32mf2_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_u64m1((uint64_t *)(op0), (vuint32mf2_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint32mf2_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_i64m4((int64_t *)(op0), (vuint64m4_t)(op1), (vint64m4_t)(op2), (vint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_i64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_i64m4_m((vbool16_t)(op0), (int64_t *)(op1), (vuint64m4_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_u64m4((uint64_t *)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_u64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_u64m4_m((vbool16_t)(op0), (uint64_t *)(op1), (vuint64m4_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_i64m2((int64_t *)(op0), (vuint64m2_t)(op1), (vint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_i64m2_m((vbool32_t)(op0), (int64_t *)(op1), (vuint64m2_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_u64m2((uint64_t *)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_u64m2_m((vbool32_t)(op0), (uint64_t *)(op1), (vuint64m2_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_i64m1((int64_t *)(op0), (vuint64m1_t)(op1), (vint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_i64m1_m((vbool64_t)(op0), (int64_t *)(op1), (vuint64m1_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_u64m1((uint64_t *)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_u64m1_m((vbool64_t)(op0), (uint64_t *)(op1), (vuint64m1_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vlseg2e8_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg2e8_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (size_t)(op3)) +#define vlseg2e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg3e8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg3e8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (size_t)(op4)) +#define vlseg3e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg4e8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (size_t)(op10)) +#define vlseg4e8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (size_t)(op5)) +#define vlseg4e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (size_t)(op10)) +#define vlseg5e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (size_t)(op12)) +#define vlseg5e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (size_t)(op6)) +#define vlseg5e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (size_t)(op12)) +#define vlseg6e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (size_t)(op14)) +#define vlseg6e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (size_t)(op7)) +#define vlseg6e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (size_t)(op14)) +#define vlseg7e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (size_t)(op16)) +#define vlseg7e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (size_t)(op8)) +#define vlseg7e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (size_t)(op16)) +#define vlseg8e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (size_t)(op18)) +#define vlseg8e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (size_t)(op9)) +#define vlseg8e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (size_t)(op18)) +#define vlseg2e16_v_i16m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_i16m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_i16m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_i16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_i16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_u16m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_u16m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_u16m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_u16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg2e16_v_u16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (size_t)(op3)) +#define vlseg2e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg3e16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg3e16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (size_t)(op4)) +#define vlseg3e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg4e16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (size_t)(op10)) +#define vlseg4e16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (size_t)(op5)) +#define vlseg4e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (size_t)(op10)) +#define vlseg5e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg5e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (size_t)(op12)) +#define vlseg5e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg5e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (size_t)(op12)) +#define vlseg5e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (size_t)(op6)) +#define vlseg5e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (size_t)(op12)) +#define vlseg5e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg5e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (size_t)(op12)) +#define vlseg5e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg5e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (size_t)(op12)) +#define vlseg5e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (size_t)(op6)) +#define vlseg5e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (size_t)(op12)) +#define vlseg6e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (size_t)(op7)) +#define vlseg6e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (size_t)(op14)) +#define vlseg6e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (size_t)(op7)) +#define vlseg6e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (size_t)(op14)) +#define vlseg6e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (size_t)(op7)) +#define vlseg6e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (size_t)(op14)) +#define vlseg6e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (size_t)(op7)) +#define vlseg6e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (size_t)(op14)) +#define vlseg6e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (size_t)(op7)) +#define vlseg6e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (size_t)(op14)) +#define vlseg6e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (size_t)(op7)) +#define vlseg6e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (size_t)(op14)) +#define vlseg7e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg7e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (size_t)(op16)) +#define vlseg7e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg7e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (size_t)(op16)) +#define vlseg7e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (size_t)(op8)) +#define vlseg7e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (size_t)(op16)) +#define vlseg7e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg7e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (size_t)(op16)) +#define vlseg7e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg7e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (size_t)(op16)) +#define vlseg7e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (size_t)(op8)) +#define vlseg7e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (size_t)(op16)) +#define vlseg8e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (size_t)(op9)) +#define vlseg8e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (size_t)(op18)) +#define vlseg8e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (size_t)(op9)) +#define vlseg8e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (size_t)(op18)) +#define vlseg8e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (size_t)(op9)) +#define vlseg8e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (size_t)(op18)) +#define vlseg8e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (size_t)(op9)) +#define vlseg8e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (size_t)(op18)) +#define vlseg8e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (size_t)(op9)) +#define vlseg8e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (size_t)(op18)) +#define vlseg8e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (size_t)(op9)) +#define vlseg8e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (size_t)(op18)) +#define vlseg2e32_v_i32m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_i32m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_i32m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_i32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_u32m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_u32m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_u32m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (size_t)(op6)) +#define vlseg2e32_v_u32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (size_t)(op3)) +#define vlseg2e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (size_t)(op6)) +#define vlseg3e32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (size_t)(op4)) +#define vlseg3e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (size_t)(op8)) +#define vlseg3e32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (size_t)(op4)) +#define vlseg3e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (size_t)(op8)) +#define vlseg3e32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (size_t)(op4)) +#define vlseg3e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (size_t)(op8)) +#define vlseg3e32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (size_t)(op4)) +#define vlseg3e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (size_t)(op8)) +#define vlseg3e32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (size_t)(op4)) +#define vlseg3e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (size_t)(op8)) +#define vlseg3e32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (size_t)(op4)) +#define vlseg3e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (size_t)(op8)) +#define vlseg4e32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (size_t)(op5)) +#define vlseg4e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (size_t)(op10)) +#define vlseg4e32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (size_t)(op5)) +#define vlseg4e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (size_t)(op10)) +#define vlseg4e32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (size_t)(op5)) +#define vlseg4e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (size_t)(op10)) +#define vlseg4e32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (size_t)(op5)) +#define vlseg4e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (size_t)(op10)) +#define vlseg4e32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (size_t)(op5)) +#define vlseg4e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (size_t)(op10)) +#define vlseg4e32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (size_t)(op5)) +#define vlseg4e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (size_t)(op10)) +#define vlseg5e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (size_t)(op6)) +#define vlseg5e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (size_t)(op12)) +#define vlseg5e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (size_t)(op6)) +#define vlseg5e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (size_t)(op12)) +#define vlseg5e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (size_t)(op6)) +#define vlseg5e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (size_t)(op12)) +#define vlseg5e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (size_t)(op6)) +#define vlseg5e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (size_t)(op12)) +#define vlseg6e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (size_t)(op7)) +#define vlseg6e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (size_t)(op14)) +#define vlseg6e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (size_t)(op7)) +#define vlseg6e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (size_t)(op14)) +#define vlseg6e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (size_t)(op7)) +#define vlseg6e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (size_t)(op14)) +#define vlseg6e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (size_t)(op7)) +#define vlseg6e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (size_t)(op14)) +#define vlseg7e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (size_t)(op8)) +#define vlseg7e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (size_t)(op16)) +#define vlseg7e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (size_t)(op8)) +#define vlseg7e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (size_t)(op16)) +#define vlseg7e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (size_t)(op8)) +#define vlseg7e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (size_t)(op16)) +#define vlseg7e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (size_t)(op8)) +#define vlseg7e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (size_t)(op16)) +#define vlseg8e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (size_t)(op9)) +#define vlseg8e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (size_t)(op18)) +#define vlseg8e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (size_t)(op9)) +#define vlseg8e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (size_t)(op18)) +#define vlseg8e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (size_t)(op9)) +#define vlseg8e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (size_t)(op18)) +#define vlseg8e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (size_t)(op9)) +#define vlseg8e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (size_t)(op18)) +#define vlseg2e64_v_i64m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vlseg2e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (size_t)(op6)) +#define vlseg2e64_v_i64m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vlseg2e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (size_t)(op6)) +#define vlseg2e64_v_i64m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (size_t)(op3)) +#define vlseg2e64_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (size_t)(op6)) +#define vlseg2e64_v_u64m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vlseg2e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (size_t)(op6)) +#define vlseg2e64_v_u64m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vlseg2e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (size_t)(op6)) +#define vlseg2e64_v_u64m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (size_t)(op3)) +#define vlseg2e64_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (size_t)(op6)) +#define vlseg3e64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (size_t)(op4)) +#define vlseg3e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (size_t)(op8)) +#define vlseg3e64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (size_t)(op4)) +#define vlseg3e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (size_t)(op8)) +#define vlseg3e64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (size_t)(op4)) +#define vlseg3e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (size_t)(op8)) +#define vlseg3e64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (size_t)(op4)) +#define vlseg3e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (size_t)(op8)) +#define vlseg4e64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (size_t)(op5)) +#define vlseg4e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (size_t)(op10)) +#define vlseg4e64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (size_t)(op5)) +#define vlseg4e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (size_t)(op10)) +#define vlseg4e64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (size_t)(op5)) +#define vlseg4e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (size_t)(op10)) +#define vlseg4e64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (size_t)(op5)) +#define vlseg4e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (size_t)(op10)) +#define vlseg5e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (size_t)(op6)) +#define vlseg5e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (size_t)(op12)) +#define vlseg5e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (size_t)(op6)) +#define vlseg5e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (size_t)(op12)) +#define vlseg6e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (size_t)(op7)) +#define vlseg6e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (size_t)(op14)) +#define vlseg6e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (size_t)(op7)) +#define vlseg6e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (size_t)(op14)) +#define vlseg7e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (size_t)(op8)) +#define vlseg7e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (size_t)(op16)) +#define vlseg7e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (size_t)(op8)) +#define vlseg7e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (size_t)(op16)) +#define vlseg8e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (size_t)(op9)) +#define vlseg8e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (size_t)(op18)) +#define vlseg8e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (size_t)(op9)) +#define vlseg8e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (size_t)(op18)) +#define vlseg3e8ff_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e8ff_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e8ff_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e8ff_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e8ff_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e8ff_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e8ff_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e8ff_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e8ff_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e8ff_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e8ff_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e8ff_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg2e16ff_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg3e16ff_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e16ff_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e16ff_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e16ff_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e16ff_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e16ff_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg2e32ff_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg3e32ff_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e32ff_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e32ff_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e32ff_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e32ff_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e32ff_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e32ff_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e32ff_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e32ff_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e32ff_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e32ff_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e32ff_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg2e8ff_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e8ff_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e8ff_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e8ff_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e8ff_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg7e32ff_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e32ff_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e32ff_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e32ff_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e32ff_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e32ff_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e32ff_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e32ff_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e32ff_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e32ff_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e32ff_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e32ff_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e32ff_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e32ff_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e32ff_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e32ff_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e32ff_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e32ff_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg2e64ff_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg3e64ff_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e64ff_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e64ff_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e64ff_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e64ff_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e64ff_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e64ff_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e64ff_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e64ff_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e64ff_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e64ff_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e64ff_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e64ff_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e64ff_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e64ff_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e64ff_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e64ff_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e64ff_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e64ff_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e64ff_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e64ff_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlsseg2e8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlseg5e64ff_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e64ff_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e64ff_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e64ff_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e64ff_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e64ff_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e64ff_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e64ff_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e64ff_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e64ff_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e64ff_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e64ff_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e64ff_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlsseg2e8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (vuint8m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vlsseg3e8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vluxseg2ei8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (vuint8m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vlsseg3e8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vsseg2e8_v_i8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_i8m1((int8_t *)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_i8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_i8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_i8m2((int8_t *)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_i8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_i8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_i8m4((int8_t *)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_i8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_i8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_i8mf2((int8_t *)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_i8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_i8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_i8mf4((int8_t *)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_i8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_i8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_i8mf8((int8_t *)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_i8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vlsseg8e8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg2e16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg3e16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vsseg2e8_v_u8m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_u8m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_u8m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_u8m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_u8m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_u8m4((uint8_t *)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_u8m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_u8mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_u8mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_u8mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_u8mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsseg2e8_v_u8mf8(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (size_t)(op3)) +#define vsseg2e8_v_u8mf8_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vlsseg6e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg2e32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg3e32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg2e64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg3e64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e64_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e64_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vluxseg3ei8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (vuint8m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (vuint8m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (vuint8m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (vuint8m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint8m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint8m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint8m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint8m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint8m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint8m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint8m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint8m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (vuint16m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (vuint16m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (vuint16m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (vuint16m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (vuint16m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (vuint16m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (vuint16m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (vuint16m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint16m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint16m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint16m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint16m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint16m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint16m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint16m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint16m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint16m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint16m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint16m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint16m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (vuint32m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (vuint32m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (vuint32m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (vuint32m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (vuint32m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (vuint32m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (vuint32m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (vuint32m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint32m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint32m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint32m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint32m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint32m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint32m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint32m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint32m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint32m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint32m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint32m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint32m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint64m8_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint64m8_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint64m8_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint64m8_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint64m8_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint64m8_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint64m8_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint64m8_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint64m8_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint64m8_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint64m8_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint64m8_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg2ei8_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg3ei8_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vssseg2e8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_i8m1((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_i8m2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_i8m4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_i8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_i8mf2((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_i8mf4((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_i8mf8((int8_t *)(op0), (ptrdiff_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (ptrdiff_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg8ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vssseg2e8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_u8m1((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_u8m2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_u8m4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_u8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_u8mf2((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_u8mf4((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vssseg2e8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e8_v_u8mf8((uint8_t *)(op0), (ptrdiff_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vssseg2e8_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (ptrdiff_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg6ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg2ei8_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg3ei8_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i8m1((int8_t *)(op0), (vuint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i8m1_m((vbool8_t)(op0), (int8_t *)(op1), (vuint8m1_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i8m2((int8_t *)(op0), (vuint8m2_t)(op1), (vint8m2_t)(op2), (vint8m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i8m2_m((vbool4_t)(op0), (int8_t *)(op1), (vuint8m2_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i8m4((int8_t *)(op0), (vuint8m4_t)(op1), (vint8m4_t)(op2), (vint8m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i8m4_m((vbool2_t)(op0), (int8_t *)(op1), (vuint8m4_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i8mf2((int8_t *)(op0), (vuint8mf2_t)(op1), (vint8mf2_t)(op2), (vint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i8mf2_m((vbool16_t)(op0), (int8_t *)(op1), (vuint8mf2_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i8mf4((int8_t *)(op0), (vuint8mf4_t)(op1), (vint8mf4_t)(op2), (vint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i8mf4_m((vbool32_t)(op0), (int8_t *)(op1), (vuint8mf4_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_i8mf8((int8_t *)(op0), (vuint8mf8_t)(op1), (vint8mf8_t)(op2), (vint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_i8mf8_m((vbool64_t)(op0), (int8_t *)(op1), (vuint8mf8_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg4ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg2ei8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u8m1((uint8_t *)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u8m1_m((vbool8_t)(op0), (uint8_t *)(op1), (vuint8m1_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u8m2((uint8_t *)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u8m2_m((vbool4_t)(op0), (uint8_t *)(op1), (vuint8m2_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u8m4((uint8_t *)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u8m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u8m4_m((vbool2_t)(op0), (uint8_t *)(op1), (vuint8m4_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u8mf2((uint8_t *)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u8mf2_m((vbool16_t)(op0), (uint8_t *)(op1), (vuint8mf2_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u8mf4((uint8_t *)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u8mf4_m((vbool32_t)(op0), (uint8_t *)(op1), (vuint8mf4_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_u8mf8((uint8_t *)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_u8mf8_m((vbool64_t)(op0), (uint8_t *)(op1), (vuint8mf8_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg2ei16_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg2ei8_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei8_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_i64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (const int64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vbool64_t)(op2), (vint64m1_t)(op3), (vint64m1_t)(op4), (const int64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (const int64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vbool32_t)(op2), (vint64m2_t)(op3), (vint64m2_t)(op4), (const int64_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_i64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_i64m4((vint64m4_t *)(op0), (vint64m4_t *)(op1), (const int64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_i64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_i64m4_m((vint64m4_t *)(op0), (vint64m4_t *)(op1), (vbool16_t)(op2), (vint64m4_t)(op3), (vint64m4_t)(op4), (const int64_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (const uint64_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vbool64_t)(op2), (vuint64m1_t)(op3), (vuint64m1_t)(op4), (const uint64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (const uint64_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vbool32_t)(op2), (vuint64m2_t)(op3), (vuint64m2_t)(op4), (const uint64_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_u64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_u64m4((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (const uint64_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_u64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_u64m4_m((vuint64m4_t *)(op0), (vuint64m4_t *)(op1), (vbool16_t)(op2), (vuint64m4_t)(op3), (vuint64m4_t)(op4), (const uint64_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_i64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (const int64_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vbool64_t)(op3), (vint64m1_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (const int64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_i64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (const int64_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vbool32_t)(op3), (vint64m2_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (const int64_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (const uint64_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vbool64_t)(op3), (vuint64m1_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (const uint64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_u64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (const uint64_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vbool32_t)(op3), (vuint64m2_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (const uint64_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (const int64_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vbool64_t)(op4), (vint64m1_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (const int64_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_i64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_i64m2((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (const int64_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_i64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_i64m2_m((vint64m2_t *)(op0), (vint64m2_t *)(op1), (vint64m2_t *)(op2), (vint64m2_t *)(op3), (vbool32_t)(op4), (vint64m2_t)(op5), (vint64m2_t)(op6), (vint64m2_t)(op7), (vint64m2_t)(op8), (const int64_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (const uint64_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vbool64_t)(op4), (vuint64m1_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (const uint64_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_u64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_u64m2((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (const uint64_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_u64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_u64m2_m((vuint64m2_t *)(op0), (vuint64m2_t *)(op1), (vuint64m2_t *)(op2), (vuint64m2_t *)(op3), (vbool32_t)(op4), (vuint64m2_t)(op5), (vuint64m2_t)(op6), (vuint64m2_t)(op7), (vuint64m2_t)(op8), (const uint64_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (const int64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vbool64_t)(op5), (vint64m1_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (const int64_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (const uint64_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vbool64_t)(op5), (vuint64m1_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (const uint64_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (const int64_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vbool64_t)(op6), (vint64m1_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (const int64_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (const uint64_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vbool64_t)(op6), (vuint64m1_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (const uint64_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (const int64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vbool64_t)(op7), (vint64m1_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (const int64_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (const uint64_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vbool64_t)(op7), (vuint64m1_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (const uint64_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_i64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_i64m1((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (const int64_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_i64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_i64m1_m((vint64m1_t *)(op0), (vint64m1_t *)(op1), (vint64m1_t *)(op2), (vint64m1_t *)(op3), (vint64m1_t *)(op4), (vint64m1_t *)(op5), (vint64m1_t *)(op6), (vint64m1_t *)(op7), (vbool64_t)(op8), (vint64m1_t)(op9), (vint64m1_t)(op10), (vint64m1_t)(op11), (vint64m1_t)(op12), (vint64m1_t)(op13), (vint64m1_t)(op14), (vint64m1_t)(op15), (vint64m1_t)(op16), (const int64_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_u64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_u64m1((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (const uint64_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_u64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_u64m1_m((vuint64m1_t *)(op0), (vuint64m1_t *)(op1), (vuint64m1_t *)(op2), (vuint64m1_t *)(op3), (vuint64m1_t *)(op4), (vuint64m1_t *)(op5), (vuint64m1_t *)(op6), (vuint64m1_t *)(op7), (vbool64_t)(op8), (vuint64m1_t)(op9), (vuint64m1_t)(op10), (vuint64m1_t)(op11), (vuint64m1_t)(op12), (vuint64m1_t)(op13), (vuint64m1_t)(op14), (vuint64m1_t)(op15), (vuint64m1_t)(op16), (const uint64_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg2ei8_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (vuint8m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (vuint8m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (vuint8m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (vuint8m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (vuint8m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (vuint8m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (vuint8m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (vuint8m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (vuint8m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint8m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint8m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint8m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint8m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint8m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint8m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint8m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint8m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint8m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint8m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i8m4((vint8m4_t *)(op0), (vint8m4_t *)(op1), (const int8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i8m4_m((vint8m4_t *)(op0), (vint8m4_t *)(op1), (vbool2_t)(op2), (vint8m4_t)(op3), (vint8m4_t)(op4), (const int8_t *)(op5), (vuint16m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u8m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u8m4((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (const uint8_t *)(op2), (vuint16m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u8m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u8m4_m((vuint8m4_t *)(op0), (vuint8m4_t *)(op1), (vbool2_t)(op2), (vuint8m4_t)(op3), (vuint8m4_t)(op4), (const uint8_t *)(op5), (vuint16m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (vuint16m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (vuint16m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (vuint16m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (vuint16m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (vuint16m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (vuint16m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (vuint16m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint16m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint16m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint16m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint16m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint16m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint16m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint16m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint16m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint16m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint16m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint16m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint16m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (const int8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vbool4_t)(op2), (vint8m2_t)(op3), (vint8m2_t)(op4), (const int8_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u8m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (const uint8_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vbool4_t)(op2), (vuint8m2_t)(op3), (vuint8m2_t)(op4), (const uint8_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (const int8_t *)(op3), (vuint32m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vbool4_t)(op3), (vint8m2_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (const int8_t *)(op7), (vuint32m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u8m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (const uint8_t *)(op3), (vuint32m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vbool4_t)(op3), (vuint8m2_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (const uint8_t *)(op7), (vuint32m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i8m2((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (const int8_t *)(op4), (vuint32m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i8m2_m((vint8m2_t *)(op0), (vint8m2_t *)(op1), (vint8m2_t *)(op2), (vint8m2_t *)(op3), (vbool4_t)(op4), (vint8m2_t)(op5), (vint8m2_t)(op6), (vint8m2_t)(op7), (vint8m2_t)(op8), (const int8_t *)(op9), (vuint32m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u8m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u8m2((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (const uint8_t *)(op4), (vuint32m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u8m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u8m2_m((vuint8m2_t *)(op0), (vuint8m2_t *)(op1), (vuint8m2_t *)(op2), (vuint8m2_t *)(op3), (vbool4_t)(op4), (vuint8m2_t)(op5), (vuint8m2_t)(op6), (vuint8m2_t)(op7), (vuint8m2_t)(op8), (const uint8_t *)(op9), (vuint32m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint32m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint32m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint32m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint32m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint32m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint32m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint32m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint32m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint32m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint32m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint32m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint32m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_i8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (const int8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vbool8_t)(op2), (vint8m1_t)(op3), (vint8m1_t)(op4), (const int8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (const int8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vbool16_t)(op2), (vint8mf2_t)(op3), (vint8mf2_t)(op4), (const int8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (const int8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vbool32_t)(op2), (vint8mf4_t)(op3), (vint8mf4_t)(op4), (const int8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (const int8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vbool64_t)(op2), (vint8mf8_t)(op3), (vint8mf8_t)(op4), (const int8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u8m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (const uint8_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vbool8_t)(op2), (vuint8m1_t)(op3), (vuint8m1_t)(op4), (const uint8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u8mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (const uint8_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vbool16_t)(op2), (vuint8mf2_t)(op3), (vuint8mf2_t)(op4), (const uint8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u8mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (const uint8_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vbool32_t)(op2), (vuint8mf4_t)(op3), (vuint8mf4_t)(op4), (const uint8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u8mf8(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (const uint8_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vbool64_t)(op2), (vuint8mf8_t)(op3), (vuint8mf8_t)(op4), (const uint8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_i8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (const int8_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vbool8_t)(op3), (vint8m1_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (const int8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (const int8_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vbool16_t)(op3), (vint8mf2_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (const int8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (const int8_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vbool32_t)(op3), (vint8mf4_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (const int8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (const int8_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vbool64_t)(op3), (vint8mf8_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (const int8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u8m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (const uint8_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vbool8_t)(op3), (vuint8m1_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (const uint8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u8mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (const uint8_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vbool16_t)(op3), (vuint8mf2_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (const uint8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u8mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (const uint8_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vbool32_t)(op3), (vuint8mf4_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (const uint8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u8mf8(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (const uint8_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vbool64_t)(op3), (vuint8mf8_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (const uint8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (const int8_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vbool8_t)(op4), (vint8m1_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (const int8_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (const int8_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vbool16_t)(op4), (vint8mf2_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (const int8_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (const int8_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vbool32_t)(op4), (vint8mf4_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (const int8_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (const int8_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vbool64_t)(op4), (vint8mf8_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (const int8_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (const uint8_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vbool8_t)(op4), (vuint8m1_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (const uint8_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (const uint8_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vbool16_t)(op4), (vuint8mf2_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (const uint8_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (const uint8_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vbool32_t)(op4), (vuint8mf4_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (const uint8_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (const uint8_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vbool64_t)(op4), (vuint8mf8_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (const uint8_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (const int8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vbool8_t)(op5), (vint8m1_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (const int8_t *)(op11), (vuint64m8_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (const int8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vbool16_t)(op5), (vint8mf2_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (const int8_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (const int8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vbool32_t)(op5), (vint8mf4_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (const int8_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (const int8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vbool64_t)(op5), (vint8mf8_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (const int8_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (const uint8_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vbool8_t)(op5), (vuint8m1_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (const uint8_t *)(op11), (vuint64m8_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (const uint8_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vbool16_t)(op5), (vuint8mf2_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (const uint8_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (const uint8_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vbool32_t)(op5), (vuint8mf4_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (const uint8_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (const uint8_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vbool64_t)(op5), (vuint8mf8_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (const uint8_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (const int8_t *)(op6), (vuint64m8_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vbool8_t)(op6), (vint8m1_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (const int8_t *)(op13), (vuint64m8_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (const int8_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vbool16_t)(op6), (vint8mf2_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (const int8_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (const int8_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vbool32_t)(op6), (vint8mf4_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (const int8_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (const int8_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vbool64_t)(op6), (vint8mf8_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (const int8_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (const uint8_t *)(op6), (vuint64m8_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vbool8_t)(op6), (vuint8m1_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (const uint8_t *)(op13), (vuint64m8_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (const uint8_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vbool16_t)(op6), (vuint8mf2_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (const uint8_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (const uint8_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vbool32_t)(op6), (vuint8mf4_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (const uint8_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (const uint8_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vbool64_t)(op6), (vuint8mf8_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (const uint8_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (const int8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vbool8_t)(op7), (vint8m1_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (const int8_t *)(op15), (vuint64m8_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (const int8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vbool16_t)(op7), (vint8mf2_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (const int8_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (const int8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vbool32_t)(op7), (vint8mf4_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (const int8_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (const int8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vbool64_t)(op7), (vint8mf8_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (const int8_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (const uint8_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vbool8_t)(op7), (vuint8m1_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (const uint8_t *)(op15), (vuint64m8_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (const uint8_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vbool16_t)(op7), (vuint8mf2_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (const uint8_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (const uint8_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vbool32_t)(op7), (vuint8mf4_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (const uint8_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (const uint8_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vbool64_t)(op7), (vuint8mf8_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (const uint8_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_i8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i8m1((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (const int8_t *)(op8), (vuint64m8_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i8m1_m((vint8m1_t *)(op0), (vint8m1_t *)(op1), (vint8m1_t *)(op2), (vint8m1_t *)(op3), (vint8m1_t *)(op4), (vint8m1_t *)(op5), (vint8m1_t *)(op6), (vint8m1_t *)(op7), (vbool8_t)(op8), (vint8m1_t)(op9), (vint8m1_t)(op10), (vint8m1_t)(op11), (vint8m1_t)(op12), (vint8m1_t)(op13), (vint8m1_t)(op14), (vint8m1_t)(op15), (vint8m1_t)(op16), (const int8_t *)(op17), (vuint64m8_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_i8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i8mf2((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (const int8_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i8mf2_m((vint8mf2_t *)(op0), (vint8mf2_t *)(op1), (vint8mf2_t *)(op2), (vint8mf2_t *)(op3), (vint8mf2_t *)(op4), (vint8mf2_t *)(op5), (vint8mf2_t *)(op6), (vint8mf2_t *)(op7), (vbool16_t)(op8), (vint8mf2_t)(op9), (vint8mf2_t)(op10), (vint8mf2_t)(op11), (vint8mf2_t)(op12), (vint8mf2_t)(op13), (vint8mf2_t)(op14), (vint8mf2_t)(op15), (vint8mf2_t)(op16), (const int8_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_i8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i8mf4((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (const int8_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i8mf4_m((vint8mf4_t *)(op0), (vint8mf4_t *)(op1), (vint8mf4_t *)(op2), (vint8mf4_t *)(op3), (vint8mf4_t *)(op4), (vint8mf4_t *)(op5), (vint8mf4_t *)(op6), (vint8mf4_t *)(op7), (vbool32_t)(op8), (vint8mf4_t)(op9), (vint8mf4_t)(op10), (vint8mf4_t)(op11), (vint8mf4_t)(op12), (vint8mf4_t)(op13), (vint8mf4_t)(op14), (vint8mf4_t)(op15), (vint8mf4_t)(op16), (const int8_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_i8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i8mf8((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (const int8_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i8mf8_m((vint8mf8_t *)(op0), (vint8mf8_t *)(op1), (vint8mf8_t *)(op2), (vint8mf8_t *)(op3), (vint8mf8_t *)(op4), (vint8mf8_t *)(op5), (vint8mf8_t *)(op6), (vint8mf8_t *)(op7), (vbool64_t)(op8), (vint8mf8_t)(op9), (vint8mf8_t)(op10), (vint8mf8_t)(op11), (vint8mf8_t)(op12), (vint8mf8_t)(op13), (vint8mf8_t)(op14), (vint8mf8_t)(op15), (vint8mf8_t)(op16), (const int8_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u8m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u8m1((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (const uint8_t *)(op8), (vuint64m8_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u8m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u8m1_m((vuint8m1_t *)(op0), (vuint8m1_t *)(op1), (vuint8m1_t *)(op2), (vuint8m1_t *)(op3), (vuint8m1_t *)(op4), (vuint8m1_t *)(op5), (vuint8m1_t *)(op6), (vuint8m1_t *)(op7), (vbool8_t)(op8), (vuint8m1_t)(op9), (vuint8m1_t)(op10), (vuint8m1_t)(op11), (vuint8m1_t)(op12), (vuint8m1_t)(op13), (vuint8m1_t)(op14), (vuint8m1_t)(op15), (vuint8m1_t)(op16), (const uint8_t *)(op17), (vuint64m8_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u8mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u8mf2((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (const uint8_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u8mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u8mf2_m((vuint8mf2_t *)(op0), (vuint8mf2_t *)(op1), (vuint8mf2_t *)(op2), (vuint8mf2_t *)(op3), (vuint8mf2_t *)(op4), (vuint8mf2_t *)(op5), (vuint8mf2_t *)(op6), (vuint8mf2_t *)(op7), (vbool16_t)(op8), (vuint8mf2_t)(op9), (vuint8mf2_t)(op10), (vuint8mf2_t)(op11), (vuint8mf2_t)(op12), (vuint8mf2_t)(op13), (vuint8mf2_t)(op14), (vuint8mf2_t)(op15), (vuint8mf2_t)(op16), (const uint8_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u8mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u8mf4((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (const uint8_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u8mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u8mf4_m((vuint8mf4_t *)(op0), (vuint8mf4_t *)(op1), (vuint8mf4_t *)(op2), (vuint8mf4_t *)(op3), (vuint8mf4_t *)(op4), (vuint8mf4_t *)(op5), (vuint8mf4_t *)(op6), (vuint8mf4_t *)(op7), (vbool32_t)(op8), (vuint8mf4_t)(op9), (vuint8mf4_t)(op10), (vuint8mf4_t)(op11), (vuint8mf4_t)(op12), (vuint8mf4_t)(op13), (vuint8mf4_t)(op14), (vuint8mf4_t)(op15), (vuint8mf4_t)(op16), (const uint8_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u8mf8(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u8mf8((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (const uint8_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u8mf8_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u8mf8_m((vuint8mf8_t *)(op0), (vuint8mf8_t *)(op1), (vuint8mf8_t *)(op2), (vuint8mf8_t *)(op3), (vuint8mf8_t *)(op4), (vuint8mf8_t *)(op5), (vuint8mf8_t *)(op6), (vuint8mf8_t *)(op7), (vbool64_t)(op8), (vuint8mf8_t)(op9), (vuint8mf8_t)(op10), (vuint8mf8_t)(op11), (vuint8mf8_t)(op12), (vuint8mf8_t)(op13), (vuint8mf8_t)(op14), (vuint8mf8_t)(op15), (vuint8mf8_t)(op16), (const uint8_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg2ei8_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i16m4((vint16m4_t *)(op0), (vint16m4_t *)(op1), (const int16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i16m4_m((vint16m4_t *)(op0), (vint16m4_t *)(op1), (vbool4_t)(op2), (vint16m4_t)(op3), (vint16m4_t)(op4), (const int16_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u16m4((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (const uint16_t *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u16m4_m((vuint16m4_t *)(op0), (vuint16m4_t *)(op1), (vbool4_t)(op2), (vuint16m4_t)(op3), (vuint16m4_t)(op4), (const uint16_t *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_i16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (const int16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vbool16_t)(op2), (vint16m1_t)(op3), (vint16m1_t)(op4), (const int16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (const int16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vbool8_t)(op2), (vint16m2_t)(op3), (vint16m2_t)(op4), (const int16_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (const int16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vbool32_t)(op2), (vint16mf2_t)(op3), (vint16mf2_t)(op4), (const int16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (const int16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vbool64_t)(op2), (vint16mf4_t)(op3), (vint16mf4_t)(op4), (const int16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (const uint16_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vbool16_t)(op2), (vuint16m1_t)(op3), (vuint16m1_t)(op4), (const uint16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (const uint16_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vbool8_t)(op2), (vuint16m2_t)(op3), (vuint16m2_t)(op4), (const uint16_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (const uint16_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vbool32_t)(op2), (vuint16mf2_t)(op3), (vuint16mf2_t)(op4), (const uint16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (const uint16_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vbool64_t)(op2), (vuint16mf4_t)(op3), (vuint16mf4_t)(op4), (const uint16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_i16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (const int16_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vbool16_t)(op3), (vint16m1_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (const int16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (const int16_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vbool8_t)(op3), (vint16m2_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (const int16_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (const int16_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vbool32_t)(op3), (vint16mf2_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (const int16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (const int16_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vbool64_t)(op3), (vint16mf4_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (const int16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (const uint16_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vbool16_t)(op3), (vuint16m1_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (const uint16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (const uint16_t *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vbool8_t)(op3), (vuint16m2_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (const uint16_t *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (const uint16_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vbool32_t)(op3), (vuint16mf2_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (const uint16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (const uint16_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vbool64_t)(op3), (vuint16mf4_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (const uint16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (const int16_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vbool16_t)(op4), (vint16m1_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (const int16_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i16m2((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (const int16_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i16m2_m((vint16m2_t *)(op0), (vint16m2_t *)(op1), (vint16m2_t *)(op2), (vint16m2_t *)(op3), (vbool8_t)(op4), (vint16m2_t)(op5), (vint16m2_t)(op6), (vint16m2_t)(op7), (vint16m2_t)(op8), (const int16_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (const int16_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vbool32_t)(op4), (vint16mf2_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (const int16_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (const int16_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vbool64_t)(op4), (vint16mf4_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (const int16_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (const uint16_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vbool16_t)(op4), (vuint16m1_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (const uint16_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u16m2((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (const uint16_t *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u16m2_m((vuint16m2_t *)(op0), (vuint16m2_t *)(op1), (vuint16m2_t *)(op2), (vuint16m2_t *)(op3), (vbool8_t)(op4), (vuint16m2_t)(op5), (vuint16m2_t)(op6), (vuint16m2_t)(op7), (vuint16m2_t)(op8), (const uint16_t *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (const uint16_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vbool32_t)(op4), (vuint16mf2_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (const uint16_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (const uint16_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vbool64_t)(op4), (vuint16mf4_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (const uint16_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (const int16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vbool16_t)(op5), (vint16m1_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (const int16_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (const int16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vbool32_t)(op5), (vint16mf2_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (const int16_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (const int16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vbool64_t)(op5), (vint16mf4_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (const int16_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (const uint16_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vbool16_t)(op5), (vuint16m1_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (const uint16_t *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (const uint16_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vbool32_t)(op5), (vuint16mf2_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (const uint16_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (const uint16_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vbool64_t)(op5), (vuint16mf4_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (const uint16_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (const int16_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vbool16_t)(op6), (vint16m1_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (const int16_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (const int16_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vbool32_t)(op6), (vint16mf2_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (const int16_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (const int16_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vbool64_t)(op6), (vint16mf4_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (const int16_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (const uint16_t *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vbool16_t)(op6), (vuint16m1_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (const uint16_t *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (const uint16_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vbool32_t)(op6), (vuint16mf2_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (const uint16_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (const uint16_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vbool64_t)(op6), (vuint16mf4_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (const uint16_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (const int16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vbool16_t)(op7), (vint16m1_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (const int16_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (const int16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vbool32_t)(op7), (vint16mf2_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (const int16_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (const int16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vbool64_t)(op7), (vint16mf4_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (const int16_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (const uint16_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vbool16_t)(op7), (vuint16m1_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (const uint16_t *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (const uint16_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vbool32_t)(op7), (vuint16mf2_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (const uint16_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (const uint16_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vbool64_t)(op7), (vuint16mf4_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (const uint16_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_i16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i16m1((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (const int16_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i16m1_m((vint16m1_t *)(op0), (vint16m1_t *)(op1), (vint16m1_t *)(op2), (vint16m1_t *)(op3), (vint16m1_t *)(op4), (vint16m1_t *)(op5), (vint16m1_t *)(op6), (vint16m1_t *)(op7), (vbool16_t)(op8), (vint16m1_t)(op9), (vint16m1_t)(op10), (vint16m1_t)(op11), (vint16m1_t)(op12), (vint16m1_t)(op13), (vint16m1_t)(op14), (vint16m1_t)(op15), (vint16m1_t)(op16), (const int16_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_i16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i16mf2((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (const int16_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i16mf2_m((vint16mf2_t *)(op0), (vint16mf2_t *)(op1), (vint16mf2_t *)(op2), (vint16mf2_t *)(op3), (vint16mf2_t *)(op4), (vint16mf2_t *)(op5), (vint16mf2_t *)(op6), (vint16mf2_t *)(op7), (vbool32_t)(op8), (vint16mf2_t)(op9), (vint16mf2_t)(op10), (vint16mf2_t)(op11), (vint16mf2_t)(op12), (vint16mf2_t)(op13), (vint16mf2_t)(op14), (vint16mf2_t)(op15), (vint16mf2_t)(op16), (const int16_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_i16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i16mf4((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (const int16_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i16mf4_m((vint16mf4_t *)(op0), (vint16mf4_t *)(op1), (vint16mf4_t *)(op2), (vint16mf4_t *)(op3), (vint16mf4_t *)(op4), (vint16mf4_t *)(op5), (vint16mf4_t *)(op6), (vint16mf4_t *)(op7), (vbool64_t)(op8), (vint16mf4_t)(op9), (vint16mf4_t)(op10), (vint16mf4_t)(op11), (vint16mf4_t)(op12), (vint16mf4_t)(op13), (vint16mf4_t)(op14), (vint16mf4_t)(op15), (vint16mf4_t)(op16), (const int16_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u16m1((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (const uint16_t *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u16m1_m((vuint16m1_t *)(op0), (vuint16m1_t *)(op1), (vuint16m1_t *)(op2), (vuint16m1_t *)(op3), (vuint16m1_t *)(op4), (vuint16m1_t *)(op5), (vuint16m1_t *)(op6), (vuint16m1_t *)(op7), (vbool16_t)(op8), (vuint16m1_t)(op9), (vuint16m1_t)(op10), (vuint16m1_t)(op11), (vuint16m1_t)(op12), (vuint16m1_t)(op13), (vuint16m1_t)(op14), (vuint16m1_t)(op15), (vuint16m1_t)(op16), (const uint16_t *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u16mf2((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (const uint16_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u16mf2_m((vuint16mf2_t *)(op0), (vuint16mf2_t *)(op1), (vuint16mf2_t *)(op2), (vuint16mf2_t *)(op3), (vuint16mf2_t *)(op4), (vuint16mf2_t *)(op5), (vuint16mf2_t *)(op6), (vuint16mf2_t *)(op7), (vbool32_t)(op8), (vuint16mf2_t)(op9), (vuint16mf2_t)(op10), (vuint16mf2_t)(op11), (vuint16mf2_t)(op12), (vuint16mf2_t)(op13), (vuint16mf2_t)(op14), (vuint16mf2_t)(op15), (vuint16mf2_t)(op16), (const uint16_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u16mf4((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (const uint16_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u16mf4_m((vuint16mf4_t *)(op0), (vuint16mf4_t *)(op1), (vuint16mf4_t *)(op2), (vuint16mf4_t *)(op3), (vuint16mf4_t *)(op4), (vuint16mf4_t *)(op5), (vuint16mf4_t *)(op6), (vuint16mf4_t *)(op7), (vbool64_t)(op8), (vuint16mf4_t)(op9), (vuint16mf4_t)(op10), (vuint16mf4_t)(op11), (vuint16mf4_t)(op12), (vuint16mf4_t)(op13), (vuint16mf4_t)(op14), (vuint16mf4_t)(op15), (vuint16mf4_t)(op16), (const uint16_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg2ei8_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_i32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (const int32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vbool32_t)(op2), (vint32m1_t)(op3), (vint32m1_t)(op4), (const int32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (const int32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vbool16_t)(op2), (vint32m2_t)(op3), (vint32m2_t)(op4), (const int32_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i32m4((vint32m4_t *)(op0), (vint32m4_t *)(op1), (const int32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i32m4_m((vint32m4_t *)(op0), (vint32m4_t *)(op1), (vbool8_t)(op2), (vint32m4_t)(op3), (vint32m4_t)(op4), (const int32_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_i32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (const int32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vbool64_t)(op2), (vint32mf2_t)(op3), (vint32mf2_t)(op4), (const int32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (const uint32_t *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vbool32_t)(op2), (vuint32m1_t)(op3), (vuint32m1_t)(op4), (const uint32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (const uint32_t *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vbool16_t)(op2), (vuint32m2_t)(op3), (vuint32m2_t)(op4), (const uint32_t *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u32m4((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (const uint32_t *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u32m4_m((vuint32m4_t *)(op0), (vuint32m4_t *)(op1), (vbool8_t)(op2), (vuint32m4_t)(op3), (vuint32m4_t)(op4), (const uint32_t *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_u32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (const uint32_t *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vbool64_t)(op2), (vuint32mf2_t)(op3), (vuint32mf2_t)(op4), (const uint32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_i32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (const int32_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vbool32_t)(op3), (vint32m1_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (const int32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (const int32_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vbool16_t)(op3), (vint32m2_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (const int32_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_i32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (const int32_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vbool64_t)(op3), (vint32mf2_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (const int32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (const uint32_t *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vbool32_t)(op3), (vuint32m1_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (const uint32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (const uint32_t *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vbool16_t)(op3), (vuint32m2_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (const uint32_t *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_u32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (const uint32_t *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vbool64_t)(op3), (vuint32mf2_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (const uint32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (const int32_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vbool32_t)(op4), (vint32m1_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (const int32_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i32m2((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (const int32_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i32m2_m((vint32m2_t *)(op0), (vint32m2_t *)(op1), (vint32m2_t *)(op2), (vint32m2_t *)(op3), (vbool16_t)(op4), (vint32m2_t)(op5), (vint32m2_t)(op6), (vint32m2_t)(op7), (vint32m2_t)(op8), (const int32_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (const int32_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vbool64_t)(op4), (vint32mf2_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (const int32_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (const uint32_t *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vbool32_t)(op4), (vuint32m1_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (const uint32_t *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u32m2((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (const uint32_t *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u32m2_m((vuint32m2_t *)(op0), (vuint32m2_t *)(op1), (vuint32m2_t *)(op2), (vuint32m2_t *)(op3), (vbool16_t)(op4), (vuint32m2_t)(op5), (vuint32m2_t)(op6), (vuint32m2_t)(op7), (vuint32m2_t)(op8), (const uint32_t *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (const uint32_t *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vbool64_t)(op4), (vuint32mf2_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (const uint32_t *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (const int32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vbool32_t)(op5), (vint32m1_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (const int32_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (const int32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vbool64_t)(op5), (vint32mf2_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (const int32_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (const uint32_t *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vbool32_t)(op5), (vuint32m1_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (const uint32_t *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (const uint32_t *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vbool64_t)(op5), (vuint32mf2_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (const uint32_t *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (const int32_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vbool32_t)(op6), (vint32m1_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (const int32_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (const int32_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vbool64_t)(op6), (vint32mf2_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (const int32_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (const uint32_t *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vbool32_t)(op6), (vuint32m1_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (const uint32_t *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (const uint32_t *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vbool64_t)(op6), (vuint32mf2_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (const uint32_t *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (const int32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vbool32_t)(op7), (vint32m1_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (const int32_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (const int32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vbool64_t)(op7), (vint32mf2_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (const int32_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (const uint32_t *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vbool32_t)(op7), (vuint32m1_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (const uint32_t *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (const uint32_t *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vbool64_t)(op7), (vuint32mf2_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (const uint32_t *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_i32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i32m1((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (const int32_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i32m1_m((vint32m1_t *)(op0), (vint32m1_t *)(op1), (vint32m1_t *)(op2), (vint32m1_t *)(op3), (vint32m1_t *)(op4), (vint32m1_t *)(op5), (vint32m1_t *)(op6), (vint32m1_t *)(op7), (vbool32_t)(op8), (vint32m1_t)(op9), (vint32m1_t)(op10), (vint32m1_t)(op11), (vint32m1_t)(op12), (vint32m1_t)(op13), (vint32m1_t)(op14), (vint32m1_t)(op15), (vint32m1_t)(op16), (const int32_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_i32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_i32mf2((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (const int32_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_i32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_i32mf2_m((vint32mf2_t *)(op0), (vint32mf2_t *)(op1), (vint32mf2_t *)(op2), (vint32mf2_t *)(op3), (vint32mf2_t *)(op4), (vint32mf2_t *)(op5), (vint32mf2_t *)(op6), (vint32mf2_t *)(op7), (vbool64_t)(op8), (vint32mf2_t)(op9), (vint32mf2_t)(op10), (vint32mf2_t)(op11), (vint32mf2_t)(op12), (vint32mf2_t)(op13), (vint32mf2_t)(op14), (vint32mf2_t)(op15), (vint32mf2_t)(op16), (const int32_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u32m1((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (const uint32_t *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u32m1_m((vuint32m1_t *)(op0), (vuint32m1_t *)(op1), (vuint32m1_t *)(op2), (vuint32m1_t *)(op3), (vuint32m1_t *)(op4), (vuint32m1_t *)(op5), (vuint32m1_t *)(op6), (vuint32m1_t *)(op7), (vbool32_t)(op8), (vuint32m1_t)(op9), (vuint32m1_t)(op10), (vuint32m1_t)(op11), (vuint32m1_t)(op12), (vuint32m1_t)(op13), (vuint32m1_t)(op14), (vuint32m1_t)(op15), (vuint32m1_t)(op16), (const uint32_t *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_u32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_u32mf2((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (const uint32_t *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_u32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_u32mf2_m((vuint32mf2_t *)(op0), (vuint32mf2_t *)(op1), (vuint32mf2_t *)(op2), (vuint32mf2_t *)(op3), (vuint32mf2_t *)(op4), (vuint32mf2_t *)(op5), (vuint32mf2_t *)(op6), (vuint32mf2_t *)(op7), (vbool64_t)(op8), (vuint32mf2_t)(op9), (vuint32mf2_t)(op10), (vuint32mf2_t)(op11), (vuint32mf2_t)(op12), (vuint32mf2_t)(op13), (vuint32mf2_t)(op14), (vuint32mf2_t)(op15), (vuint32mf2_t)(op16), (const uint32_t *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#endif + +#if defined(__riscv_f) && defined(__riscv_zvlsseg) +#define vloxseg2ei8_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vsseg2e32_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_f32m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_f32m2((float *)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_f32m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_f32m4((float *)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_f32m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsseg2e32_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (size_t)(op3)) +#define vsseg2e32_v_f32mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_f32m2((float *)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsseg3e32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsseg3e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_f32m2((float *)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsseg4e32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsseg4e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsseg5e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsseg5e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsseg6e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsseg6e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsseg7e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsseg7e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e32_v_f32m1((float *)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsseg8e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e32_v_f32mf2((float *)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsseg8e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vssseg2e32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_f32m2((float *)(op0), (ptrdiff_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_f32m4((float *)(op0), (ptrdiff_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vssseg2e32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vssseg2e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_f32m2((float *)(op0), (ptrdiff_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vssseg3e32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vssseg3e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_f32m2((float *)(op0), (ptrdiff_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vssseg4e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vssseg4e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vssseg5e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vssseg5e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vssseg6e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vssseg6e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vssseg7e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vssseg7e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e32_v_f32m1((float *)(op0), (ptrdiff_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vssseg8e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e32_v_f32mf2((float *)(op0), (ptrdiff_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vssseg8e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (ptrdiff_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f32m4((float *)(op0), (vuint8m1_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint8m1_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f32m4((float *)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint16m2_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f32m4((float *)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint32m4_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f32m4((float *)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint64m8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f32m4((float *)(op0), (vuint8m1_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint8m1_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f32m2((float *)(op0), (vuint8mf2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint8mf2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_f32m1((float *)(op0), (vuint8mf4_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint8mf4_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_f32mf2((float *)(op0), (vuint8mf8_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint8mf8_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f32m4((float *)(op0), (vuint16m2_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint16m2_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f32m2((float *)(op0), (vuint16m1_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint16m1_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_f32m1((float *)(op0), (vuint16mf2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint16mf2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_f32mf2((float *)(op0), (vuint16mf4_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint16mf4_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f32m4((float *)(op0), (vuint32m4_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint32m4_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f32m2((float *)(op0), (vuint32m2_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_f32m1((float *)(op0), (vuint32m1_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_f32mf2((float *)(op0), (vuint32mf2_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f32m4((float *)(op0), (vuint64m8_t)(op1), (vfloat32m4_t)(op2), (vfloat32m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f32m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f32m4_m((vbool8_t)(op0), (float *)(op1), (vuint64m8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f32m2((float *)(op0), (vuint64m4_t)(op1), (vfloat32m2_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f32m2_m((vbool16_t)(op0), (float *)(op1), (vuint64m4_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_f32m1((float *)(op0), (vuint64m2_t)(op1), (vfloat32m1_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_f32m1_m((vbool32_t)(op0), (float *)(op1), (vuint64m2_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_f32mf2((float *)(op0), (vuint64m1_t)(op1), (vfloat32mf2_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_f32mf2_m((vbool64_t)(op0), (float *)(op1), (vuint64m1_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (size_t)(op11)) +#define vlseg2e32_v_f32m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (size_t)(op3)) +#define vlseg2e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (size_t)(op6)) +#define vlseg2e32_v_f32m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (size_t)(op3)) +#define vlseg2e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (size_t)(op6)) +#define vlseg2e32_v_f32m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (size_t)(op3)) +#define vlseg2e32_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (size_t)(op6)) +#define vlseg2e32_v_f32mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (size_t)(op3)) +#define vlseg2e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (size_t)(op6)) +#define vlseg3e32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (size_t)(op4)) +#define vlseg3e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (size_t)(op8)) +#define vlseg3e32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (size_t)(op4)) +#define vlseg3e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (size_t)(op8)) +#define vlseg3e32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (size_t)(op4)) +#define vlseg3e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (size_t)(op8)) +#define vlseg4e32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (size_t)(op5)) +#define vlseg4e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (size_t)(op10)) +#define vlseg4e32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (size_t)(op5)) +#define vlseg4e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (size_t)(op10)) +#define vlseg4e32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (size_t)(op5)) +#define vlseg4e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (size_t)(op10)) +#define vlseg5e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (size_t)(op6)) +#define vlseg5e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (size_t)(op12)) +#define vlseg5e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (size_t)(op6)) +#define vlseg5e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (size_t)(op12)) +#define vlseg6e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (size_t)(op7)) +#define vlseg6e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (size_t)(op14)) +#define vlseg6e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (size_t)(op7)) +#define vlseg6e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (size_t)(op14)) +#define vlseg7e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (size_t)(op8)) +#define vlseg7e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (size_t)(op16)) +#define vlseg7e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (size_t)(op8)) +#define vlseg7e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (size_t)(op16)) +#define vlseg8e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (size_t)(op9)) +#define vlseg8e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (size_t)(op18)) +#define vlseg8e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (size_t)(op9)) +#define vlseg8e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (size_t)(op18)) +#define vlseg2e32ff_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e32ff_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg3e32ff_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e32ff_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e32ff_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e32ff_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e32ff_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e32ff_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e32ff_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e32ff_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e32ff_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e32ff_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e32ff_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e32ff_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e32ff_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e32ff_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e32ff_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e32ff_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e32ff_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e32ff_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlsseg2e32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg3e32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vluxseg2ei8_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg3ei8_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_f32m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (const float *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vbool32_t)(op2), (vfloat32m1_t)(op3), (vfloat32m1_t)(op4), (const float *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f32m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (const float *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vbool16_t)(op2), (vfloat32m2_t)(op3), (vfloat32m2_t)(op4), (const float *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f32m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f32m4((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (const float *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f32m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f32m4_m((vfloat32m4_t *)(op0), (vfloat32m4_t *)(op1), (vbool8_t)(op2), (vfloat32m4_t)(op3), (vfloat32m4_t)(op4), (const float *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f32mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (const float *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vbool64_t)(op2), (vfloat32mf2_t)(op3), (vfloat32mf2_t)(op4), (const float *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_f32m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (const float *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vbool32_t)(op3), (vfloat32m1_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (const float *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_f32m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (const float *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vbool16_t)(op3), (vfloat32m2_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (const float *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_f32mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (const float *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vbool64_t)(op3), (vfloat32mf2_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (const float *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (const float *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vbool32_t)(op4), (vfloat32m1_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (const float *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_f32m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f32m2((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (const float *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f32m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f32m2_m((vfloat32m2_t *)(op0), (vfloat32m2_t *)(op1), (vfloat32m2_t *)(op2), (vfloat32m2_t *)(op3), (vbool16_t)(op4), (vfloat32m2_t)(op5), (vfloat32m2_t)(op6), (vfloat32m2_t)(op7), (vfloat32m2_t)(op8), (const float *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (const float *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vbool64_t)(op4), (vfloat32mf2_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (const float *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (const float *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vbool32_t)(op5), (vfloat32m1_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (const float *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (const float *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vbool64_t)(op5), (vfloat32mf2_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (const float *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (const float *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vbool32_t)(op6), (vfloat32m1_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (const float *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (const float *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vbool64_t)(op6), (vfloat32mf2_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (const float *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (const float *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vbool32_t)(op7), (vfloat32m1_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (const float *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (const float *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vbool64_t)(op7), (vfloat32mf2_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (const float *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_f32m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_f32m1((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (const float *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_f32m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_f32m1_m((vfloat32m1_t *)(op0), (vfloat32m1_t *)(op1), (vfloat32m1_t *)(op2), (vfloat32m1_t *)(op3), (vfloat32m1_t *)(op4), (vfloat32m1_t *)(op5), (vfloat32m1_t *)(op6), (vfloat32m1_t *)(op7), (vbool32_t)(op8), (vfloat32m1_t)(op9), (vfloat32m1_t)(op10), (vfloat32m1_t)(op11), (vfloat32m1_t)(op12), (vfloat32m1_t)(op13), (vfloat32m1_t)(op14), (vfloat32m1_t)(op15), (vfloat32m1_t)(op16), (const float *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_f32mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_f32mf2((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (const float *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_f32mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_f32mf2_m((vfloat32mf2_t *)(op0), (vfloat32mf2_t *)(op1), (vfloat32mf2_t *)(op2), (vfloat32mf2_t *)(op3), (vfloat32mf2_t *)(op4), (vfloat32mf2_t *)(op5), (vfloat32mf2_t *)(op6), (vfloat32mf2_t *)(op7), (vbool64_t)(op8), (vfloat32mf2_t)(op9), (vfloat32mf2_t)(op10), (vfloat32mf2_t)(op11), (vfloat32mf2_t)(op12), (vfloat32mf2_t)(op13), (vfloat32mf2_t)(op14), (vfloat32mf2_t)(op15), (vfloat32mf2_t)(op16), (const float *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#endif + +#if defined(__riscv_d) && defined(__riscv_zvlsseg) +#define vloxseg2ei8_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vsseg2e64_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_f64m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_f64m2((double *)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_f64m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsseg2e64_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e64_v_f64m4((double *)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (size_t)(op3)) +#define vsseg2e64_v_f64m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsseg3e64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e64_v_f64m2((double *)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsseg3e64_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsseg4e64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e64_v_f64m2((double *)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsseg4e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsseg5e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsseg5e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsseg6e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsseg6e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsseg7e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsseg7e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsseg8e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e64_v_f64m1((double *)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsseg8e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vssseg2e64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_f64m2((double *)(op0), (ptrdiff_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vssseg2e64_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e64_v_f64m4((double *)(op0), (ptrdiff_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vssseg2e64_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vssseg3e64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e64_v_f64m2((double *)(op0), (ptrdiff_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vssseg3e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vssseg4e64_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e64_v_f64m2((double *)(op0), (ptrdiff_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vssseg4e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vssseg5e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vssseg5e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vssseg6e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vssseg6e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vssseg7e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vssseg7e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vssseg8e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e64_v_f64m1((double *)(op0), (ptrdiff_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vssseg8e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (ptrdiff_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f64m4((double *)(op0), (vuint8mf2_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint8mf2_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f64m4((double *)(op0), (vuint16m1_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint16m1_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f64m4((double *)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint32m2_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f64m4((double *)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint64m4_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f64m4((double *)(op0), (vuint8mf2_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint8mf2_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f64m2((double *)(op0), (vuint8mf4_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint8mf4_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_f64m1((double *)(op0), (vuint8mf8_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint8mf8_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f64m4((double *)(op0), (vuint16m1_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint16m1_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f64m2((double *)(op0), (vuint16mf2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint16mf2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_f64m1((double *)(op0), (vuint16mf4_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint16mf4_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f64m4((double *)(op0), (vuint32m2_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint32m2_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f64m2((double *)(op0), (vuint32m1_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint32m1_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_f64m1((double *)(op0), (vuint32mf2_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint32mf2_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f64m4((double *)(op0), (vuint64m4_t)(op1), (vfloat64m4_t)(op2), (vfloat64m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f64m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f64m4_m((vbool16_t)(op0), (double *)(op1), (vuint64m4_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f64m2((double *)(op0), (vuint64m2_t)(op1), (vfloat64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f64m2_m((vbool32_t)(op0), (double *)(op1), (vuint64m2_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_f64m1((double *)(op0), (vuint64m1_t)(op1), (vfloat64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_f64m1_m((vbool64_t)(op0), (double *)(op1), (vuint64m1_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (size_t)(op11)) +#define vlseg2e64_v_f64m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (size_t)(op3)) +#define vlseg2e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (size_t)(op6)) +#define vlseg2e64_v_f64m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (size_t)(op3)) +#define vlseg2e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (size_t)(op6)) +#define vlseg2e64_v_f64m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e64_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (size_t)(op3)) +#define vlseg2e64_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e64_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (size_t)(op6)) +#define vlseg3e64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (size_t)(op4)) +#define vlseg3e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (size_t)(op8)) +#define vlseg3e64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (size_t)(op4)) +#define vlseg3e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (size_t)(op8)) +#define vlseg4e64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (size_t)(op5)) +#define vlseg4e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (size_t)(op10)) +#define vlseg4e64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (size_t)(op5)) +#define vlseg4e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (size_t)(op10)) +#define vlseg5e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (size_t)(op6)) +#define vlseg5e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (size_t)(op12)) +#define vlseg6e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (size_t)(op7)) +#define vlseg6e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (size_t)(op14)) +#define vlseg7e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (size_t)(op8)) +#define vlseg7e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (size_t)(op16)) +#define vlseg8e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (size_t)(op9)) +#define vlseg8e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (size_t)(op18)) +#define vlseg2e64ff_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e64ff_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e64ff_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e64ff_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e64ff_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg3e64ff_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e64ff_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e64ff_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e64ff_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e64ff_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e64ff_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e64ff_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e64ff_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e64ff_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e64ff_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e64ff_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e64ff_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e64ff_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e64ff_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e64ff_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e64ff_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e64ff_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlsseg2e64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e64_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e64_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e64_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e64_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg3e64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e64_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vluxseg2ei8_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei8_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_f64m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (const double *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vbool64_t)(op2), (vfloat64m1_t)(op3), (vfloat64m1_t)(op4), (const double *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f64m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (const double *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vbool32_t)(op2), (vfloat64m2_t)(op3), (vfloat64m2_t)(op4), (const double *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f64m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f64m4((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (const double *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f64m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f64m4_m((vfloat64m4_t *)(op0), (vfloat64m4_t *)(op1), (vbool16_t)(op2), (vfloat64m4_t)(op3), (vfloat64m4_t)(op4), (const double *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_f64m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (const double *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vbool64_t)(op3), (vfloat64m1_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (const double *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_f64m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (const double *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vbool32_t)(op3), (vfloat64m2_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (const double *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (const double *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vbool64_t)(op4), (vfloat64m1_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (const double *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_f64m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f64m2((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (const double *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f64m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f64m2_m((vfloat64m2_t *)(op0), (vfloat64m2_t *)(op1), (vfloat64m2_t *)(op2), (vfloat64m2_t *)(op3), (vbool32_t)(op4), (vfloat64m2_t)(op5), (vfloat64m2_t)(op6), (vfloat64m2_t)(op7), (vfloat64m2_t)(op8), (const double *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (const double *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vbool64_t)(op5), (vfloat64m1_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (const double *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (const double *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vbool64_t)(op6), (vfloat64m1_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (const double *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (const double *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vbool64_t)(op7), (vfloat64m1_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (const double *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_f64m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_f64m1((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (const double *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_f64m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_f64m1_m((vfloat64m1_t *)(op0), (vfloat64m1_t *)(op1), (vfloat64m1_t *)(op2), (vfloat64m1_t *)(op3), (vfloat64m1_t *)(op4), (vfloat64m1_t *)(op5), (vfloat64m1_t *)(op6), (vfloat64m1_t *)(op7), (vbool64_t)(op8), (vfloat64m1_t)(op9), (vfloat64m1_t)(op10), (vfloat64m1_t)(op11), (vfloat64m1_t)(op12), (vfloat64m1_t)(op13), (vfloat64m1_t)(op14), (vfloat64m1_t)(op15), (vfloat64m1_t)(op16), (const double *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#endif + +#if defined(__riscv_zfh) && defined(__riscv_zvlsseg) +#define vloxseg2ei8_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg2ei8_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vloxseg2ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg3ei8_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg3ei8_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vloxseg3ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg4ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vloxseg4ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vloxseg4ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vloxseg5ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vloxseg5ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vloxseg5ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vloxseg6ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vloxseg6ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vloxseg6ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vloxseg7ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vloxseg7ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vloxseg7ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vloxseg8ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vloxseg8ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vloxseg8ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vloxseg2ei16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg2ei16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vloxseg2ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg3ei16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg3ei16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vloxseg3ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg4ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vloxseg4ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vloxseg4ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vloxseg5ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vloxseg5ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vloxseg5ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vloxseg6ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vloxseg6ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vloxseg6ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vloxseg7ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vloxseg7ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vloxseg7ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vloxseg8ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vloxseg8ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vloxseg8ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vloxseg2ei32_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg2ei32_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vloxseg2ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg3ei32_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg3ei32_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vloxseg3ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg4ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vloxseg4ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vloxseg4ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vloxseg5ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vloxseg5ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vloxseg5ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vloxseg6ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vloxseg6ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vloxseg6ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vloxseg7ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vloxseg7ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vloxseg7ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vloxseg8ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vloxseg8ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vloxseg8ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vloxseg2ei64_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg2ei64_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vloxseg2ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vloxseg2ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg2ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg3ei64_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg3ei64_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vloxseg3ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vloxseg3ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg3ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg4ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vloxseg4ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vloxseg4ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vloxseg4ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vloxseg4ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vloxseg5ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vloxseg5ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vloxseg5ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vloxseg5ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vloxseg5ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vloxseg6ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vloxseg6ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vloxseg6ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vloxseg6ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vloxseg6ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vloxseg7ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vloxseg7ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vloxseg7ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vloxseg7ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vloxseg7ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vloxseg8ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vloxseg8ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vloxseg8ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vloxseg8ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vloxseg8ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#define vsseg2e16_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_f16m1_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_f16m2((_Float16 *)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_f16m2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_f16m4((_Float16 *)(op0), (vfloat16m4_t)(op1), (vfloat16m4_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_f16m4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_f16mf2_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsseg2e16_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vsseg2e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (size_t)(op3)) +#define vsseg2e16_v_f16mf4_m(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg2e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_f16m2((_Float16 *)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsseg3e16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsseg3e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsseg3e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg3e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_f16m2((_Float16 *)(op0), (vfloat16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsseg4e16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsseg4e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsseg4e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg4e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsseg5e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsseg5e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsseg5e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg5e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsseg6e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsseg6e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsseg6e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg6e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsseg7e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsseg7e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsseg7e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg7e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_f16m1((_Float16 *)(op0), (vfloat16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_f16mf2((_Float16 *)(op0), (vfloat16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsseg8e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsseg8e16_v_f16mf4((_Float16 *)(op0), (vfloat16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsseg8e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsseg8e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vssseg2e16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_f16m2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_f16m4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vssseg2e16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vssseg2e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vssseg2e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg2e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_f16m2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vssseg3e16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vssseg3e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vssseg3e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg3e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_f16m2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vssseg4e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vssseg4e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vssseg4e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg4e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vssseg5e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vssseg5e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vssseg5e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg5e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vssseg6e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vssseg6e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vssseg6e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg6e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vssseg7e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vssseg7e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vssseg7e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg7e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_f16m1((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_f16mf2((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vssseg8e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vssseg8e16_v_f16mf4((_Float16 *)(op0), (ptrdiff_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vssseg8e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vssseg8e16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (ptrdiff_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei8_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f16m4((_Float16 *)(op0), (vuint8m2_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint8m2_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei8_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei8_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f16m4((_Float16 *)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint16m4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei32_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f16m4((_Float16 *)(op0), (vuint32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint32m8_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei32_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei32_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsuxseg2ei64_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg2ei64_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsuxseg2ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsuxseg2ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg2ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg3ei64_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsuxseg3ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsuxseg3ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg3ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg4ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsuxseg4ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsuxseg4ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg4ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg5ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsuxseg5ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsuxseg5ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg5ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg6ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsuxseg6ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsuxseg6ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg6ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg7ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsuxseg7ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsuxseg7ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg7ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsuxseg8ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsuxseg8ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsuxseg8ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsuxseg8ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei8_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f16m4((_Float16 *)(op0), (vuint8m2_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint8m2_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei8_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei8_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f16m2((_Float16 *)(op0), (vuint8m1_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint8m1_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_f16m1((_Float16 *)(op0), (vuint8mf2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint8mf2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_f16mf2((_Float16 *)(op0), (vuint8mf4_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint8mf4_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei8_v_f16mf4((_Float16 *)(op0), (vuint8mf8_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei8_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint8mf8_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f16m4((_Float16 *)(op0), (vuint16m4_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint16m4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f16m2((_Float16 *)(op0), (vuint16m2_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_f16m1((_Float16 *)(op0), (vuint16m1_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_f16mf2((_Float16 *)(op0), (vuint16mf2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei16_v_f16mf4((_Float16 *)(op0), (vuint16mf4_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei16_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei32_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f16m4((_Float16 *)(op0), (vuint32m8_t)(op1), (vfloat16m4_t)(op2), (vfloat16m4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f16m4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f16m4_m((vbool4_t)(op0), (_Float16 *)(op1), (vuint32m8_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei32_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei32_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f16m2((_Float16 *)(op0), (vuint32m4_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint32m4_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_f16m1((_Float16 *)(op0), (vuint32m2_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint32m2_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_f16mf2((_Float16 *)(op0), (vuint32m1_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint32m1_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei32_v_f16mf4((_Float16 *)(op0), (vuint32mf2_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei32_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint32mf2_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vsoxseg2ei64_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg2ei64_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vsoxseg2ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (size_t)(op4)) +#define vsoxseg2ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg2ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg3ei64_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vsoxseg3ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (size_t)(op5)) +#define vsoxseg3ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg3ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f16m2((_Float16 *)(op0), (vuint64m8_t)(op1), (vfloat16m2_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f16m2_m((vbool8_t)(op0), (_Float16 *)(op1), (vuint64m8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg4ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vsoxseg4ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (size_t)(op6)) +#define vsoxseg4ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg4ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg5ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vsoxseg5ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (size_t)(op7)) +#define vsoxseg5ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg5ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg6ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vsoxseg6ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (size_t)(op8)) +#define vsoxseg6ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg6ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg7ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vsoxseg7ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (size_t)(op9)) +#define vsoxseg7ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg7ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_f16m1((_Float16 *)(op0), (vuint64m4_t)(op1), (vfloat16m1_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_f16m1_m((vbool16_t)(op0), (_Float16 *)(op1), (vuint64m4_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_f16mf2((_Float16 *)(op0), (vuint64m2_t)(op1), (vfloat16mf2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_f16mf2_m((vbool32_t)(op0), (_Float16 *)(op1), (vuint64m2_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (size_t)(op11)) +#define vsoxseg8ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vsoxseg8ei64_v_f16mf4((_Float16 *)(op0), (vuint64m1_t)(op1), (vfloat16mf4_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (size_t)(op10)) +#define vsoxseg8ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vsoxseg8ei64_v_f16mf4_m((vbool64_t)(op0), (_Float16 *)(op1), (vuint64m1_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (size_t)(op11)) +#define vlseg2e16_v_f16m1(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vlseg2e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg2e16_v_f16m2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vlseg2e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg2e16_v_f16m4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vlseg2e16_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg2e16_v_f16mf2(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vlseg2e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg2e16_v_f16mf4(op0, op1, op2, op3) \ +__builtin_rvv_vlseg2e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (size_t)(op3)) +#define vlseg2e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg2e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg3e16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (size_t)(op4)) +#define vlseg3e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg3e16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (size_t)(op4)) +#define vlseg3e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg3e16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (size_t)(op4)) +#define vlseg3e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg3e16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg3e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (size_t)(op4)) +#define vlseg3e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg3e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg4e16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (size_t)(op5)) +#define vlseg4e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (size_t)(op10)) +#define vlseg4e16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (size_t)(op5)) +#define vlseg4e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (size_t)(op10)) +#define vlseg4e16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (size_t)(op5)) +#define vlseg4e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (size_t)(op10)) +#define vlseg4e16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg4e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (size_t)(op5)) +#define vlseg4e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg4e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (size_t)(op10)) +#define vlseg5e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg5e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (size_t)(op12)) +#define vlseg5e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg5e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (size_t)(op12)) +#define vlseg5e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg5e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (size_t)(op6)) +#define vlseg5e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12) \ +__builtin_rvv_vlseg5e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (size_t)(op12)) +#define vlseg6e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (size_t)(op7)) +#define vlseg6e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (size_t)(op14)) +#define vlseg6e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (size_t)(op7)) +#define vlseg6e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (size_t)(op14)) +#define vlseg6e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg6e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (size_t)(op7)) +#define vlseg6e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14) \ +__builtin_rvv_vlseg6e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (size_t)(op14)) +#define vlseg7e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg7e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (size_t)(op16)) +#define vlseg7e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg7e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (size_t)(op16)) +#define vlseg7e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg7e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (size_t)(op8)) +#define vlseg7e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16) \ +__builtin_rvv_vlseg7e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (size_t)(op16)) +#define vlseg8e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (size_t)(op9)) +#define vlseg8e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (size_t)(op18)) +#define vlseg8e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (size_t)(op9)) +#define vlseg8e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (size_t)(op18)) +#define vlseg8e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg8e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (size_t)(op9)) +#define vlseg8e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18) \ +__builtin_rvv_vlseg8e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (size_t)(op18)) +#define vlseg2e16ff_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg2e16ff_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlseg2e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (size_t *)(op3), (size_t)(op4)) +#define vlseg2e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg2e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg3e16ff_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg3e16ff_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlseg3e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (size_t *)(op4), (size_t)(op5)) +#define vlseg3e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg3e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg4e16ff_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg4e16ff_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlseg4e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (size_t *)(op5), (size_t)(op6)) +#define vlseg4e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlseg4e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (size_t *)(op10), (size_t)(op11)) +#define vlseg5e16ff_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg5e16ff_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlseg5e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (size_t *)(op6), (size_t)(op7)) +#define vlseg5e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlseg5e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (size_t *)(op12), (size_t)(op13)) +#define vlseg6e16ff_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg6e16ff_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlseg6e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (size_t *)(op7), (size_t)(op8)) +#define vlseg6e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlseg6e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (size_t *)(op14), (size_t)(op15)) +#define vlseg7e16ff_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg7e16ff_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlseg7e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (size_t *)(op8), (size_t)(op9)) +#define vlseg7e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlseg7e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (size_t *)(op16), (size_t)(op17)) +#define vlseg8e16ff_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlseg8e16ff_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlseg8e16ff_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (size_t *)(op9), (size_t)(op10)) +#define vlseg8e16ff_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlseg8e16ff_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (size_t *)(op18), (size_t)(op19)) +#define vlsseg2e16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg2e16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vlsseg2e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (ptrdiff_t)(op3), (size_t)(op4)) +#define vlsseg2e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg2e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg3e16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg3e16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vlsseg3e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (ptrdiff_t)(op4), (size_t)(op5)) +#define vlsseg3e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg3e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg4e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg4e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vlsseg4e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (ptrdiff_t)(op5), (size_t)(op6)) +#define vlsseg4e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vlsseg4e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (ptrdiff_t)(op10), (size_t)(op11)) +#define vlsseg5e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg5e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vlsseg5e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (ptrdiff_t)(op6), (size_t)(op7)) +#define vlsseg5e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vlsseg5e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (ptrdiff_t)(op12), (size_t)(op13)) +#define vlsseg6e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg6e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vlsseg6e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (ptrdiff_t)(op7), (size_t)(op8)) +#define vlsseg6e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vlsseg6e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (ptrdiff_t)(op14), (size_t)(op15)) +#define vlsseg7e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg7e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vlsseg7e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (ptrdiff_t)(op8), (size_t)(op9)) +#define vlsseg7e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vlsseg7e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (ptrdiff_t)(op16), (size_t)(op17)) +#define vlsseg8e16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vlsseg8e16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vlsseg8e16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (ptrdiff_t)(op9), (size_t)(op10)) +#define vlsseg8e16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vlsseg8e16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (ptrdiff_t)(op18), (size_t)(op19)) +#define vluxseg2ei8_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint8mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint8m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint8m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (vuint8m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (vuint8m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint8mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg2ei8_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint8mf8_t)(op3), (size_t)(op4)) +#define vluxseg2ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg3ei8_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint8mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint8m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint8m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint8mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg3ei8_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint8mf8_t)(op4), (size_t)(op5)) +#define vluxseg3ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg4ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint8mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint8mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint8m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint8m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint8mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint8mf4_t)(op10), (size_t)(op11)) +#define vluxseg4ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint8mf8_t)(op5), (size_t)(op6)) +#define vluxseg4ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint8mf8_t)(op10), (size_t)(op11)) +#define vluxseg5ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint8mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint8mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint8mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint8mf4_t)(op12), (size_t)(op13)) +#define vluxseg5ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint8mf8_t)(op6), (size_t)(op7)) +#define vluxseg5ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint8mf8_t)(op12), (size_t)(op13)) +#define vluxseg6ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint8mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint8mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint8mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint8mf4_t)(op14), (size_t)(op15)) +#define vluxseg6ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint8mf8_t)(op7), (size_t)(op8)) +#define vluxseg6ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint8mf8_t)(op14), (size_t)(op15)) +#define vluxseg7ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint8mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint8mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint8mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint8mf4_t)(op16), (size_t)(op17)) +#define vluxseg7ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint8mf8_t)(op8), (size_t)(op9)) +#define vluxseg7ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint8mf8_t)(op16), (size_t)(op17)) +#define vluxseg8ei8_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint8mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint8mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint8mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint8mf4_t)(op18), (size_t)(op19)) +#define vluxseg8ei8_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei8_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint8mf8_t)(op9), (size_t)(op10)) +#define vluxseg8ei8_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei8_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint8mf8_t)(op18), (size_t)(op19)) +#define vluxseg2ei16_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint16m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint16m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint16m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (vuint16m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (vuint16m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint16mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg2ei16_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint16mf4_t)(op3), (size_t)(op4)) +#define vluxseg2ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg3ei16_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint16m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint16m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint16m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint16mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg3ei16_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint16mf4_t)(op4), (size_t)(op5)) +#define vluxseg3ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg4ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint16m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint16m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint16m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint16m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint16mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint16mf2_t)(op10), (size_t)(op11)) +#define vluxseg4ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint16mf4_t)(op5), (size_t)(op6)) +#define vluxseg4ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint16mf4_t)(op10), (size_t)(op11)) +#define vluxseg5ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint16m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint16m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint16mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint16mf2_t)(op12), (size_t)(op13)) +#define vluxseg5ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint16mf4_t)(op6), (size_t)(op7)) +#define vluxseg5ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint16mf4_t)(op12), (size_t)(op13)) +#define vluxseg6ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint16m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint16m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint16mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint16mf2_t)(op14), (size_t)(op15)) +#define vluxseg6ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint16mf4_t)(op7), (size_t)(op8)) +#define vluxseg6ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint16mf4_t)(op14), (size_t)(op15)) +#define vluxseg7ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint16m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint16m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint16mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint16mf2_t)(op16), (size_t)(op17)) +#define vluxseg7ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint16mf4_t)(op8), (size_t)(op9)) +#define vluxseg7ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint16mf4_t)(op16), (size_t)(op17)) +#define vluxseg8ei16_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint16m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint16m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint16mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint16mf2_t)(op18), (size_t)(op19)) +#define vluxseg8ei16_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei16_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint16mf4_t)(op9), (size_t)(op10)) +#define vluxseg8ei16_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei16_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint16mf4_t)(op18), (size_t)(op19)) +#define vluxseg2ei32_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint32m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint32m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint32m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f16m4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f16m4((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (const _Float16 *)(op2), (vuint32m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f16m4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f16m4_m((vfloat16m4_t *)(op0), (vfloat16m4_t *)(op1), (vbool4_t)(op2), (vfloat16m4_t)(op3), (vfloat16m4_t)(op4), (const _Float16 *)(op5), (vuint32m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint32m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg2ei32_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint32mf2_t)(op3), (size_t)(op4)) +#define vluxseg2ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg3ei32_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint32m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint32m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint32m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint32m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg3ei32_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint32mf2_t)(op4), (size_t)(op5)) +#define vluxseg3ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg4ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint32m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint32m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint32m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint32m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint32m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint32m1_t)(op10), (size_t)(op11)) +#define vluxseg4ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint32mf2_t)(op5), (size_t)(op6)) +#define vluxseg4ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint32mf2_t)(op10), (size_t)(op11)) +#define vluxseg5ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint32m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint32m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint32m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint32m1_t)(op12), (size_t)(op13)) +#define vluxseg5ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint32mf2_t)(op6), (size_t)(op7)) +#define vluxseg5ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint32mf2_t)(op12), (size_t)(op13)) +#define vluxseg6ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint32m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint32m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint32m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint32m1_t)(op14), (size_t)(op15)) +#define vluxseg6ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint32mf2_t)(op7), (size_t)(op8)) +#define vluxseg6ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint32mf2_t)(op14), (size_t)(op15)) +#define vluxseg7ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint32m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint32m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint32m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint32m1_t)(op16), (size_t)(op17)) +#define vluxseg7ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint32mf2_t)(op8), (size_t)(op9)) +#define vluxseg7ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint32mf2_t)(op16), (size_t)(op17)) +#define vluxseg8ei32_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint32m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint32m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint32m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint32m1_t)(op18), (size_t)(op19)) +#define vluxseg8ei32_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei32_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint32mf2_t)(op9), (size_t)(op10)) +#define vluxseg8ei32_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei32_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint32mf2_t)(op18), (size_t)(op19)) +#define vluxseg2ei64_v_f16m1(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (const _Float16 *)(op2), (vuint64m4_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vbool16_t)(op2), (vfloat16m1_t)(op3), (vfloat16m1_t)(op4), (const _Float16 *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f16m2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (const _Float16 *)(op2), (vuint64m8_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vbool8_t)(op2), (vfloat16m2_t)(op3), (vfloat16m2_t)(op4), (const _Float16 *)(op5), (vuint64m8_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f16mf2(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (const _Float16 *)(op2), (vuint64m2_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vbool32_t)(op2), (vfloat16mf2_t)(op3), (vfloat16mf2_t)(op4), (const _Float16 *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg2ei64_v_f16mf4(op0, op1, op2, op3, op4) \ +__builtin_rvv_vluxseg2ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (const _Float16 *)(op2), (vuint64m1_t)(op3), (size_t)(op4)) +#define vluxseg2ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg2ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vbool64_t)(op2), (vfloat16mf4_t)(op3), (vfloat16mf4_t)(op4), (const _Float16 *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg3ei64_v_f16m1(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (const _Float16 *)(op3), (vuint64m4_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vbool16_t)(op3), (vfloat16m1_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (const _Float16 *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_f16m2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (const _Float16 *)(op3), (vuint64m8_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vbool8_t)(op3), (vfloat16m2_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (const _Float16 *)(op7), (vuint64m8_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_f16mf2(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (const _Float16 *)(op3), (vuint64m2_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vbool32_t)(op3), (vfloat16mf2_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (const _Float16 *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg3ei64_v_f16mf4(op0, op1, op2, op3, op4, op5) \ +__builtin_rvv_vluxseg3ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (const _Float16 *)(op3), (vuint64m1_t)(op4), (size_t)(op5)) +#define vluxseg3ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg3ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vbool64_t)(op3), (vfloat16mf4_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (const _Float16 *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg4ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (const _Float16 *)(op4), (vuint64m4_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vbool16_t)(op4), (vfloat16m1_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (const _Float16 *)(op9), (vuint64m4_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_f16m2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f16m2((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (const _Float16 *)(op4), (vuint64m8_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f16m2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f16m2_m((vfloat16m2_t *)(op0), (vfloat16m2_t *)(op1), (vfloat16m2_t *)(op2), (vfloat16m2_t *)(op3), (vbool8_t)(op4), (vfloat16m2_t)(op5), (vfloat16m2_t)(op6), (vfloat16m2_t)(op7), (vfloat16m2_t)(op8), (const _Float16 *)(op9), (vuint64m8_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (const _Float16 *)(op4), (vuint64m2_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vbool32_t)(op4), (vfloat16mf2_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (const _Float16 *)(op9), (vuint64m2_t)(op10), (size_t)(op11)) +#define vluxseg4ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6) \ +__builtin_rvv_vluxseg4ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (const _Float16 *)(op4), (vuint64m1_t)(op5), (size_t)(op6)) +#define vluxseg4ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11) \ +__builtin_rvv_vluxseg4ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vbool64_t)(op4), (vfloat16mf4_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (const _Float16 *)(op9), (vuint64m1_t)(op10), (size_t)(op11)) +#define vluxseg5ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (const _Float16 *)(op5), (vuint64m4_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vbool16_t)(op5), (vfloat16m1_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (const _Float16 *)(op11), (vuint64m4_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (const _Float16 *)(op5), (vuint64m2_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vbool32_t)(op5), (vfloat16mf2_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (const _Float16 *)(op11), (vuint64m2_t)(op12), (size_t)(op13)) +#define vluxseg5ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7) \ +__builtin_rvv_vluxseg5ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (const _Float16 *)(op5), (vuint64m1_t)(op6), (size_t)(op7)) +#define vluxseg5ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13) \ +__builtin_rvv_vluxseg5ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vbool64_t)(op5), (vfloat16mf4_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (const _Float16 *)(op11), (vuint64m1_t)(op12), (size_t)(op13)) +#define vluxseg6ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (const _Float16 *)(op6), (vuint64m4_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vbool16_t)(op6), (vfloat16m1_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (const _Float16 *)(op13), (vuint64m4_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (const _Float16 *)(op6), (vuint64m2_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vbool32_t)(op6), (vfloat16mf2_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (const _Float16 *)(op13), (vuint64m2_t)(op14), (size_t)(op15)) +#define vluxseg6ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8) \ +__builtin_rvv_vluxseg6ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (const _Float16 *)(op6), (vuint64m1_t)(op7), (size_t)(op8)) +#define vluxseg6ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15) \ +__builtin_rvv_vluxseg6ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vbool64_t)(op6), (vfloat16mf4_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (const _Float16 *)(op13), (vuint64m1_t)(op14), (size_t)(op15)) +#define vluxseg7ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (const _Float16 *)(op7), (vuint64m4_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vbool16_t)(op7), (vfloat16m1_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (const _Float16 *)(op15), (vuint64m4_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (const _Float16 *)(op7), (vuint64m2_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vbool32_t)(op7), (vfloat16mf2_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (const _Float16 *)(op15), (vuint64m2_t)(op16), (size_t)(op17)) +#define vluxseg7ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9) \ +__builtin_rvv_vluxseg7ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (const _Float16 *)(op7), (vuint64m1_t)(op8), (size_t)(op9)) +#define vluxseg7ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17) \ +__builtin_rvv_vluxseg7ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vbool64_t)(op7), (vfloat16mf4_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (const _Float16 *)(op15), (vuint64m1_t)(op16), (size_t)(op17)) +#define vluxseg8ei64_v_f16m1(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_f16m1((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (const _Float16 *)(op8), (vuint64m4_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_f16m1_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_f16m1_m((vfloat16m1_t *)(op0), (vfloat16m1_t *)(op1), (vfloat16m1_t *)(op2), (vfloat16m1_t *)(op3), (vfloat16m1_t *)(op4), (vfloat16m1_t *)(op5), (vfloat16m1_t *)(op6), (vfloat16m1_t *)(op7), (vbool16_t)(op8), (vfloat16m1_t)(op9), (vfloat16m1_t)(op10), (vfloat16m1_t)(op11), (vfloat16m1_t)(op12), (vfloat16m1_t)(op13), (vfloat16m1_t)(op14), (vfloat16m1_t)(op15), (vfloat16m1_t)(op16), (const _Float16 *)(op17), (vuint64m4_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_f16mf2(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_f16mf2((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (const _Float16 *)(op8), (vuint64m2_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_f16mf2_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_f16mf2_m((vfloat16mf2_t *)(op0), (vfloat16mf2_t *)(op1), (vfloat16mf2_t *)(op2), (vfloat16mf2_t *)(op3), (vfloat16mf2_t *)(op4), (vfloat16mf2_t *)(op5), (vfloat16mf2_t *)(op6), (vfloat16mf2_t *)(op7), (vbool32_t)(op8), (vfloat16mf2_t)(op9), (vfloat16mf2_t)(op10), (vfloat16mf2_t)(op11), (vfloat16mf2_t)(op12), (vfloat16mf2_t)(op13), (vfloat16mf2_t)(op14), (vfloat16mf2_t)(op15), (vfloat16mf2_t)(op16), (const _Float16 *)(op17), (vuint64m2_t)(op18), (size_t)(op19)) +#define vluxseg8ei64_v_f16mf4(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10) \ +__builtin_rvv_vluxseg8ei64_v_f16mf4((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (const _Float16 *)(op8), (vuint64m1_t)(op9), (size_t)(op10)) +#define vluxseg8ei64_v_f16mf4_m(op0, op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16, op17, op18, op19) \ +__builtin_rvv_vluxseg8ei64_v_f16mf4_m((vfloat16mf4_t *)(op0), (vfloat16mf4_t *)(op1), (vfloat16mf4_t *)(op2), (vfloat16mf4_t *)(op3), (vfloat16mf4_t *)(op4), (vfloat16mf4_t *)(op5), (vfloat16mf4_t *)(op6), (vfloat16mf4_t *)(op7), (vbool64_t)(op8), (vfloat16mf4_t)(op9), (vfloat16mf4_t)(op10), (vfloat16mf4_t)(op11), (vfloat16mf4_t)(op12), (vfloat16mf4_t)(op13), (vfloat16mf4_t)(op14), (vfloat16mf4_t)(op15), (vfloat16mf4_t)(op16), (const _Float16 *)(op17), (vuint64m1_t)(op18), (size_t)(op19)) +#endif + +#define __riscv_v_intrinsic_overloading 1 +#define __rvv_overloaded static inline __attribute__((__always_inline__, __nodebug__, __overloadable__)) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1))) +vint8m1_t vadd(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_m))) +vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m2))) +vint8m2_t vadd(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m2_m))) +vint8m2_t vadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m4))) +vint8m4_t vadd(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m4_m))) +vint8m4_t vadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m8))) +vint8m8_t vadd(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m8_m))) +vint8m8_t vadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8mf2))) +vint8mf2_t vadd(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8mf2_m))) +vint8mf2_t vadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8mf4))) +vint8mf4_t vadd(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8mf4_m))) +vint8mf4_t vadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8mf8))) +vint8mf8_t vadd(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8mf8_m))) +vint8mf8_t vadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m1))) +vint16m1_t vadd(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m1_m))) +vint16m1_t vadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m2))) +vint16m2_t vadd(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m2_m))) +vint16m2_t vadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m4))) +vint16m4_t vadd(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m4_m))) +vint16m4_t vadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m8))) +vint16m8_t vadd(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16m8_m))) +vint16m8_t vadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16mf2))) +vint16mf2_t vadd(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16mf2_m))) +vint16mf2_t vadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16mf4))) +vint16mf4_t vadd(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i16mf4_m))) +vint16mf4_t vadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m1))) +vint32m1_t vadd(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m1_m))) +vint32m1_t vadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m2))) +vint32m2_t vadd(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m2_m))) +vint32m2_t vadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m4))) +vint32m4_t vadd(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m4_m))) +vint32m4_t vadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m8))) +vint32m8_t vadd(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32m8_m))) +vint32m8_t vadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32mf2))) +vint32mf2_t vadd(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i32mf2_m))) +vint32mf2_t vadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m1))) +vint64m1_t vadd(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m1_m))) +vint64m1_t vadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m2))) +vint64m2_t vadd(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m2_m))) +vint64m2_t vadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m4))) +vint64m4_t vadd(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m4_m))) +vint64m4_t vadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m8))) +vint64m8_t vadd(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i64m8_m))) +vint64m8_t vadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16mf4))) +vuint16mf4_t vwaddu_vv(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16mf4_m))) +vuint16mf4_t vwaddu_vv(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16mf2))) +vuint16mf2_t vwaddu_vv(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16mf2_m))) +vuint16mf2_t vwaddu_vv(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m1))) +vuint16m1_t vwaddu_vv(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m1_m))) +vuint16m1_t vwaddu_vv(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m2))) +vuint16m2_t vwaddu_vv(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m2_m))) +vuint16m2_t vwaddu_vv(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m4))) +vuint16m4_t vwaddu_vv(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m4_m))) +vuint16m4_t vwaddu_vv(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m8))) +vuint16m8_t vwaddu_vv(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u16m8_m))) +vuint16m8_t vwaddu_vv(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32mf2))) +vuint32mf2_t vwaddu_vv(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32mf2_m))) +vuint32mf2_t vwaddu_vv(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m1))) +vuint32m1_t vwaddu_vv(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m1_m))) +vuint32m1_t vwaddu_vv(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m2))) +vuint32m2_t vwaddu_vv(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m2_m))) +vuint32m2_t vwaddu_vv(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m4))) +vuint32m4_t vwaddu_vv(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m4_m))) +vuint32m4_t vwaddu_vv(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m8))) +vuint32m8_t vwaddu_vv(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u32m8_m))) +vuint32m8_t vwaddu_vv(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m1))) +vuint64m1_t vwaddu_vv(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m1_m))) +vuint64m1_t vwaddu_vv(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m2))) +vuint64m2_t vwaddu_vv(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m2_m))) +vuint64m2_t vwaddu_vv(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m4))) +vuint64m4_t vwaddu_vv(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m4_m))) +vuint64m4_t vwaddu_vv(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m8))) +vuint64m8_t vwaddu_vv(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vv_u64m8_m))) +vuint64m8_t vwaddu_vv(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m1))) +vuint8m1_t vluxei8(const uint8_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m1_m))) +vuint8m1_t vluxei8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m2))) +vuint8m2_t vluxei8(const uint8_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m2_m))) +vuint8m2_t vluxei8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m4))) +vuint8m4_t vluxei8(const uint8_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m4_m))) +vuint8m4_t vluxei8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m8))) +vuint8m8_t vluxei8(const uint8_t * op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8m8_m))) +vuint8m8_t vluxei8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8mf2))) +vuint8mf2_t vluxei8(const uint8_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8mf2_m))) +vuint8mf2_t vluxei8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8mf4))) +vuint8mf4_t vluxei8(const uint8_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8mf4_m))) +vuint8mf4_t vluxei8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8mf8))) +vuint8mf8_t vluxei8(const uint8_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u8mf8_m))) +vuint8mf8_t vluxei8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_u64m1_m))) +vuint64m1_t vlse64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_u64m2_m))) +vuint64m2_t vlse64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_u64m4_m))) +vuint64m4_t vlse64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_u64m8_m))) +vuint64m8_t vlse64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m1))) +void vsse16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m1_m))) +void vsse16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m2))) +void vsse16(int16_t * op0, ptrdiff_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m2_m))) +void vsse16(vbool8_t op0, int16_t * op1, ptrdiff_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m4))) +void vsse16(int16_t * op0, ptrdiff_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m4_m))) +void vsse16(vbool4_t op0, int16_t * op1, ptrdiff_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m8))) +void vsse16(int16_t * op0, ptrdiff_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16m8_m))) +void vsse16(vbool2_t op0, int16_t * op1, ptrdiff_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16mf2))) +void vsse16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16mf2_m))) +void vsse16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16mf4))) +void vsse16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_i16mf4_m))) +void vsse16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m1))) +void vsse16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m1_m))) +void vsse16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m2))) +void vsse16(uint16_t * op0, ptrdiff_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m2_m))) +void vsse16(vbool8_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m4))) +void vsse16(uint16_t * op0, ptrdiff_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m4_m))) +void vsse16(vbool4_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m8))) +void vsse16(uint16_t * op0, ptrdiff_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16m8_m))) +void vsse16(vbool2_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16mf2))) +void vsse16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16mf2_m))) +void vsse16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16mf4))) +void vsse16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_u16mf4_m))) +void vsse16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m1))) +void vsse32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m1_m))) +void vsse32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m2))) +void vsse32(int32_t * op0, ptrdiff_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m2_m))) +void vsse32(vbool16_t op0, int32_t * op1, ptrdiff_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m4))) +void vsse32(int32_t * op0, ptrdiff_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m4_m))) +void vsse32(vbool8_t op0, int32_t * op1, ptrdiff_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m8))) +void vsse32(int32_t * op0, ptrdiff_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32m8_m))) +void vsse32(vbool4_t op0, int32_t * op1, ptrdiff_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32mf2))) +void vsse32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_i32mf2_m))) +void vsse32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m1))) +void vsse32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m1_m))) +void vsse32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m2))) +void vsse32(uint32_t * op0, ptrdiff_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m2_m))) +void vsse32(vbool16_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m4))) +void vsse32(uint32_t * op0, ptrdiff_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m4_m))) +void vsse32(vbool8_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m8))) +void vsse32(uint32_t * op0, ptrdiff_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32m8_m))) +void vsse32(vbool4_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32mf2))) +void vsse32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_u32mf2_m))) +void vsse32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m1))) +void vsse64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m1_m))) +void vsse64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m2))) +void vsse64(int64_t * op0, ptrdiff_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m2_m))) +void vsse64(vbool32_t op0, int64_t * op1, ptrdiff_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m4))) +void vsse64(int64_t * op0, ptrdiff_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m4_m))) +void vsse64(vbool16_t op0, int64_t * op1, ptrdiff_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m8))) +void vsse64(int64_t * op0, ptrdiff_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_i64m8_m))) +void vsse64(vbool8_t op0, int64_t * op1, ptrdiff_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m1))) +void vsse64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m1_m))) +void vsse64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m2))) +void vsse64(uint64_t * op0, ptrdiff_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m2_m))) +void vsse64(vbool32_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m4))) +void vsse64(uint64_t * op0, ptrdiff_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m4_m))) +void vsse64(vbool16_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m8))) +void vsse64(uint64_t * op0, ptrdiff_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_u64m8_m))) +void vsse64(vbool8_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8m1))) +vint8m1_t vluxei16(const int8_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8m1_m))) +vint8m1_t vluxei16(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8m2))) +vint8m2_t vluxei16(const int8_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8m2_m))) +vint8m2_t vluxei16(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8m4))) +vint8m4_t vluxei16(const int8_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8m4_m))) +vint8m4_t vluxei16(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8mf2))) +vint8mf2_t vluxei16(const int8_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8mf2_m))) +vint8mf2_t vluxei16(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8mf4))) +vint8mf4_t vluxei16(const int8_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8mf4_m))) +vint8mf4_t vluxei16(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8mf8))) +vint8mf8_t vluxei16(const int8_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i8mf8_m))) +vint8mf8_t vluxei16(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8m1))) +vuint8m1_t vluxei16(const uint8_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8m1_m))) +vuint8m1_t vluxei16(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8m2))) +vuint8m2_t vluxei16(const uint8_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8m2_m))) +vuint8m2_t vluxei16(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8m4))) +vuint8m4_t vluxei16(const uint8_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8m4_m))) +vuint8m4_t vluxei16(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8mf2))) +vuint8mf2_t vluxei16(const uint8_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8mf2_m))) +vuint8mf2_t vluxei16(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8mf4))) +vuint8mf4_t vluxei16(const uint8_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8mf4_m))) +vuint8mf4_t vluxei16(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8mf8))) +vuint8mf8_t vluxei16(const uint8_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u8mf8_m))) +vuint8mf8_t vluxei16(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8m1))) +vint8m1_t vluxei32(const int8_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8m1_m))) +vint8m1_t vluxei32(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8m2))) +vint8m2_t vluxei32(const int8_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8m2_m))) +vint8m2_t vluxei32(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8mf2))) +vint8mf2_t vluxei32(const int8_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8mf2_m))) +vint8mf2_t vluxei32(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8mf4))) +vint8mf4_t vluxei32(const int8_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8mf4_m))) +vint8mf4_t vluxei32(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8mf8))) +vint8mf8_t vluxei32(const int8_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i8mf8_m))) +vint8mf8_t vluxei32(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8m1))) +vuint8m1_t vluxei32(const uint8_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8m1_m))) +vuint8m1_t vluxei32(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8m2))) +vuint8m2_t vluxei32(const uint8_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8m2_m))) +vuint8m2_t vluxei32(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8mf2))) +vuint8mf2_t vluxei32(const uint8_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8mf2_m))) +vuint8mf2_t vluxei32(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8mf4))) +vuint8mf4_t vluxei32(const uint8_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8mf4_m))) +vuint8mf4_t vluxei32(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8mf8))) +vuint8mf8_t vluxei32(const uint8_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u8mf8_m))) +vuint8mf8_t vluxei32(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8m1))) +vint8m1_t vluxei64(const int8_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8m1_m))) +vint8m1_t vluxei64(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8mf2))) +vint8mf2_t vluxei64(const int8_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8mf2_m))) +vint8mf2_t vluxei64(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8mf4))) +vint8mf4_t vluxei64(const int8_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8mf4_m))) +vint8mf4_t vluxei64(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8mf8))) +vint8mf8_t vluxei64(const int8_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i8mf8_m))) +vint8mf8_t vluxei64(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8m1))) +vuint8m1_t vluxei64(const uint8_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8m1_m))) +vuint8m1_t vluxei64(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8mf2))) +vuint8mf2_t vluxei64(const uint8_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8mf2_m))) +vuint8mf2_t vluxei64(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8mf4))) +vuint8mf4_t vluxei64(const uint8_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8mf4_m))) +vuint8mf4_t vluxei64(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8mf8))) +vuint8mf8_t vluxei64(const uint8_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u8mf8_m))) +vuint8mf8_t vluxei64(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m1))) +vint16m1_t vluxei8(const int16_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m1_m))) +vint16m1_t vluxei8(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m2))) +vint16m2_t vluxei8(const int16_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m2_m))) +vint16m2_t vluxei8(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m4))) +vint16m4_t vluxei8(const int16_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m4_m))) +vint16m4_t vluxei8(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m8))) +vint16m8_t vluxei8(const int16_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16m8_m))) +vint16m8_t vluxei8(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16mf2))) +vint16mf2_t vluxei8(const int16_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16mf2_m))) +vint16mf2_t vluxei8(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16mf4))) +vint16mf4_t vluxei8(const int16_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i16mf4_m))) +vint16mf4_t vluxei8(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m1))) +vuint16m1_t vluxei8(const uint16_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m1_m))) +vuint16m1_t vluxei8(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m2))) +vuint16m2_t vluxei8(const uint16_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m2_m))) +vuint16m2_t vluxei8(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m4))) +vuint16m4_t vluxei8(const uint16_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m4_m))) +vuint16m4_t vluxei8(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m8))) +vuint16m8_t vluxei8(const uint16_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16m8_m))) +vuint16m8_t vluxei8(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16mf2))) +vuint16mf2_t vluxei8(const uint16_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16mf2_m))) +vuint16mf2_t vluxei8(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16mf4))) +vuint16mf4_t vluxei8(const uint16_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u16mf4_m))) +vuint16mf4_t vluxei8(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m1))) +vint16m1_t vluxei16(const int16_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m1_m))) +vint16m1_t vluxei16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m2))) +vint16m2_t vluxei16(const int16_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m2_m))) +vint16m2_t vluxei16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m4))) +vint16m4_t vluxei16(const int16_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m4_m))) +vint16m4_t vluxei16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m8))) +vint16m8_t vluxei16(const int16_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16m8_m))) +vint16m8_t vluxei16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16mf2))) +vint16mf2_t vluxei16(const int16_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16mf2_m))) +vint16mf2_t vluxei16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16mf4))) +vint16mf4_t vluxei16(const int16_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i16mf4_m))) +vint16mf4_t vluxei16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m1))) +vuint16m1_t vluxei16(const uint16_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m1_m))) +vuint16m1_t vluxei16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m2))) +vuint16m2_t vluxei16(const uint16_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m2_m))) +vuint16m2_t vluxei16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m4))) +vuint16m4_t vluxei16(const uint16_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m4_m))) +vuint16m4_t vluxei16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m8))) +vuint16m8_t vluxei16(const uint16_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16m8_m))) +vuint16m8_t vluxei16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16mf2))) +vuint16mf2_t vluxei16(const uint16_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16mf2_m))) +vuint16mf2_t vluxei16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16mf4))) +vuint16mf4_t vluxei16(const uint16_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u16mf4_m))) +vuint16mf4_t vluxei16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16m1))) +vint16m1_t vluxei32(const int16_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16m1_m))) +vint16m1_t vluxei32(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16m2))) +vint16m2_t vluxei32(const int16_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16m2_m))) +vint16m2_t vluxei32(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16m4))) +vint16m4_t vluxei32(const int16_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16m4_m))) +vint16m4_t vluxei32(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16mf2))) +vint16mf2_t vluxei32(const int16_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16mf2_m))) +vint16mf2_t vluxei32(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16mf4))) +vint16mf4_t vluxei32(const int16_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i16mf4_m))) +vint16mf4_t vluxei32(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16m1))) +vuint16m1_t vluxei32(const uint16_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16m1_m))) +vuint16m1_t vluxei32(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16m2))) +vuint16m2_t vluxei32(const uint16_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16m2_m))) +vuint16m2_t vluxei32(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16m4))) +vuint16m4_t vluxei32(const uint16_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16m4_m))) +vuint16m4_t vluxei32(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16mf2))) +vuint16mf2_t vluxei32(const uint16_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16mf2_m))) +vuint16mf2_t vluxei32(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16mf4))) +vuint16mf4_t vluxei32(const uint16_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u16mf4_m))) +vuint16mf4_t vluxei32(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16m1))) +vint16m1_t vluxei64(const int16_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16m1_m))) +vint16m1_t vluxei64(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16m2))) +vint16m2_t vluxei64(const int16_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16m2_m))) +vint16m2_t vluxei64(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16mf2))) +vint16mf2_t vluxei64(const int16_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16mf2_m))) +vint16mf2_t vluxei64(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16mf4))) +vint16mf4_t vluxei64(const int16_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i16mf4_m))) +vint16mf4_t vluxei64(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16m1))) +vuint16m1_t vluxei64(const uint16_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16m1_m))) +vuint16m1_t vluxei64(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16m2))) +vuint16m2_t vluxei64(const uint16_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16m2_m))) +vuint16m2_t vluxei64(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16mf2))) +vuint16mf2_t vluxei64(const uint16_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16mf2_m))) +vuint16mf2_t vluxei64(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16mf4))) +vuint16mf4_t vluxei64(const uint16_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u16mf4_m))) +vuint16mf4_t vluxei64(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m1))) +vint32m1_t vluxei8(const int32_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m1_m))) +vint32m1_t vluxei8(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m2))) +vint32m2_t vluxei8(const int32_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m2_m))) +vint32m2_t vluxei8(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m4))) +vint32m4_t vluxei8(const int32_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m4_m))) +vint32m4_t vluxei8(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m8))) +vint32m8_t vluxei8(const int32_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32m8_m))) +vint32m8_t vluxei8(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32mf2))) +vint32mf2_t vluxei8(const int32_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i32mf2_m))) +vint32mf2_t vluxei8(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m1))) +vuint32m1_t vluxei8(const uint32_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m1_m))) +vuint32m1_t vluxei8(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m2))) +vuint32m2_t vluxei8(const uint32_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m2_m))) +vuint32m2_t vluxei8(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m4))) +vuint32m4_t vluxei8(const uint32_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m4_m))) +vuint32m4_t vluxei8(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m8))) +vuint32m8_t vluxei8(const uint32_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32m8_m))) +vuint32m8_t vluxei8(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32mf2))) +vuint32mf2_t vluxei8(const uint32_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u32mf2_m))) +vuint32mf2_t vluxei8(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m1))) +vint32m1_t vluxei16(const int32_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m1_m))) +vint32m1_t vluxei16(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m2))) +vint32m2_t vluxei16(const int32_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m2_m))) +vint32m2_t vluxei16(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m4))) +vint32m4_t vluxei16(const int32_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m4_m))) +vint32m4_t vluxei16(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m8))) +vint32m8_t vluxei16(const int32_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32m8_m))) +vint32m8_t vluxei16(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32mf2))) +vint32mf2_t vluxei16(const int32_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i32mf2_m))) +vint32mf2_t vluxei16(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m1))) +vuint32m1_t vluxei16(const uint32_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m1_m))) +vuint32m1_t vluxei16(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m2))) +vuint32m2_t vluxei16(const uint32_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m2_m))) +vuint32m2_t vluxei16(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m4))) +vuint32m4_t vluxei16(const uint32_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m4_m))) +vuint32m4_t vluxei16(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m8))) +vuint32m8_t vluxei16(const uint32_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32m8_m))) +vuint32m8_t vluxei16(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32mf2))) +vuint32mf2_t vluxei16(const uint32_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u32mf2_m))) +vuint32mf2_t vluxei16(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m1))) +vint32m1_t vluxei32(const int32_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m1_m))) +vint32m1_t vluxei32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m2))) +vint32m2_t vluxei32(const int32_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m2_m))) +vint32m2_t vluxei32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m4))) +vint32m4_t vluxei32(const int32_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m4_m))) +vint32m4_t vluxei32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m8))) +vint32m8_t vluxei32(const int32_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32m8_m))) +vint32m8_t vluxei32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32mf2))) +vint32mf2_t vluxei32(const int32_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i32mf2_m))) +vint32mf2_t vluxei32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m1))) +vuint32m1_t vluxei32(const uint32_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m1_m))) +vuint32m1_t vluxei32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m2))) +vuint32m2_t vluxei32(const uint32_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m2_m))) +vuint32m2_t vluxei32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m4))) +vuint32m4_t vluxei32(const uint32_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m4_m))) +vuint32m4_t vluxei32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m8))) +vuint32m8_t vluxei32(const uint32_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32m8_m))) +vuint32m8_t vluxei32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32mf2))) +vuint32mf2_t vluxei32(const uint32_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u32mf2_m))) +vuint32mf2_t vluxei32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32m1))) +vint32m1_t vluxei64(const int32_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32m1_m))) +vint32m1_t vluxei64(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32m2))) +vint32m2_t vluxei64(const int32_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32m2_m))) +vint32m2_t vluxei64(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32m4))) +vint32m4_t vluxei64(const int32_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32m4_m))) +vint32m4_t vluxei64(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32mf2))) +vint32mf2_t vluxei64(const int32_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i32mf2_m))) +vint32mf2_t vluxei64(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32m1))) +vuint32m1_t vluxei64(const uint32_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32m1_m))) +vuint32m1_t vluxei64(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32m2))) +vuint32m2_t vluxei64(const uint32_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32m2_m))) +vuint32m2_t vluxei64(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32m4))) +vuint32m4_t vluxei64(const uint32_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32m4_m))) +vuint32m4_t vluxei64(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32mf2))) +vuint32mf2_t vluxei64(const uint32_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u32mf2_m))) +vuint32mf2_t vluxei64(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m1))) +vint64m1_t vluxei8(const int64_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m1_m))) +vint64m1_t vluxei8(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m2))) +vint64m2_t vluxei8(const int64_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m2_m))) +vint64m2_t vluxei8(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m4))) +vint64m4_t vluxei8(const int64_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m4_m))) +vint64m4_t vluxei8(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m8))) +vint64m8_t vluxei8(const int64_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i64m8_m))) +vint64m8_t vluxei8(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m1))) +vuint64m1_t vluxei8(const uint64_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m1_m))) +vuint64m1_t vluxei8(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m2))) +vuint64m2_t vluxei8(const uint64_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m2_m))) +vuint64m2_t vluxei8(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m4))) +vuint64m4_t vluxei8(const uint64_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m4_m))) +vuint64m4_t vluxei8(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m8))) +vuint64m8_t vluxei8(const uint64_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_u64m8_m))) +vuint64m8_t vluxei8(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m1))) +vint64m1_t vluxei16(const int64_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m1_m))) +vint64m1_t vluxei16(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m2))) +vint64m2_t vluxei16(const int64_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m2_m))) +vint64m2_t vluxei16(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m4))) +vint64m4_t vluxei16(const int64_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m4_m))) +vint64m4_t vluxei16(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m8))) +vint64m8_t vluxei16(const int64_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_i64m8_m))) +vint64m8_t vluxei16(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m1))) +vuint64m1_t vluxei16(const uint64_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m1_m))) +vuint64m1_t vluxei16(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m2))) +vuint64m2_t vluxei16(const uint64_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m2_m))) +vuint64m2_t vluxei16(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m4))) +vuint64m4_t vluxei16(const uint64_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m4_m))) +vuint64m4_t vluxei16(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m8))) +vuint64m8_t vluxei16(const uint64_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_u64m8_m))) +vuint64m8_t vluxei16(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m1))) +vint64m1_t vluxei32(const int64_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m1_m))) +vint64m1_t vluxei32(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m2))) +vint64m2_t vluxei32(const int64_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m2_m))) +vint64m2_t vluxei32(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m4))) +vint64m4_t vluxei32(const int64_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m4_m))) +vint64m4_t vluxei32(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m8))) +vint64m8_t vluxei32(const int64_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_i64m8_m))) +vint64m8_t vluxei32(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m1))) +vuint64m1_t vluxei32(const uint64_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m1_m))) +vuint64m1_t vluxei32(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m2))) +vuint64m2_t vluxei32(const uint64_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m2_m))) +vuint64m2_t vluxei32(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m4))) +vuint64m4_t vluxei32(const uint64_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m4_m))) +vuint64m4_t vluxei32(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m8))) +vuint64m8_t vluxei32(const uint64_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_u64m8_m))) +vuint64m8_t vluxei32(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m1))) +vint64m1_t vluxei64(const int64_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m1_m))) +vint64m1_t vluxei64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m2))) +vint64m2_t vluxei64(const int64_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m2_m))) +vint64m2_t vluxei64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m4))) +vint64m4_t vluxei64(const int64_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m4_m))) +vint64m4_t vluxei64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m8))) +vint64m8_t vluxei64(const int64_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_i64m8_m))) +vint64m8_t vluxei64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m1))) +vuint64m1_t vluxei64(const uint64_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m1_m))) +vuint64m1_t vluxei64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m2))) +vuint64m2_t vluxei64(const uint64_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m2_m))) +vuint64m2_t vluxei64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m4))) +vuint64m4_t vluxei64(const uint64_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m4_m))) +vuint64m4_t vluxei64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m8))) +vuint64m8_t vluxei64(const uint64_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_u64m8_m))) +vuint64m8_t vluxei64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m1))) +vint8m1_t vloxei8(const int8_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m1_m))) +vint8m1_t vloxei8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m2))) +vint8m2_t vloxei8(const int8_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m2_m))) +vint8m2_t vloxei8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m4))) +vint8m4_t vloxei8(const int8_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m4_m))) +vint8m4_t vloxei8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m8))) +vint8m8_t vloxei8(const int8_t * op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8m8_m))) +vint8m8_t vloxei8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8mf2))) +vint8mf2_t vloxei8(const int8_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8mf2_m))) +vint8mf2_t vloxei8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8mf4))) +vint8mf4_t vloxei8(const int8_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8mf4_m))) +vint8mf4_t vloxei8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8mf8))) +vint8mf8_t vloxei8(const int8_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i8mf8_m))) +vint8mf8_t vloxei8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m1))) +vuint8m1_t vloxei8(const uint8_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m1_m))) +vuint8m1_t vloxei8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m2))) +vuint8m2_t vloxei8(const uint8_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m2_m))) +vuint8m2_t vloxei8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m4))) +vuint8m4_t vloxei8(const uint8_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m4_m))) +vuint8m4_t vloxei8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m8))) +vuint8m8_t vloxei8(const uint8_t * op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8m8_m))) +vuint8m8_t vloxei8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8mf2))) +vuint8mf2_t vloxei8(const uint8_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8mf2_m))) +vuint8mf2_t vloxei8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8mf4))) +vuint8mf4_t vloxei8(const uint8_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8mf4_m))) +vuint8mf4_t vloxei8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8mf8))) +vuint8mf8_t vloxei8(const uint8_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u8mf8_m))) +vuint8mf8_t vloxei8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8m1))) +vint8m1_t vloxei16(const int8_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8m1_m))) +vint8m1_t vloxei16(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8m2))) +vint8m2_t vloxei16(const int8_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8m2_m))) +vint8m2_t vloxei16(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8m4))) +vint8m4_t vloxei16(const int8_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8m4_m))) +vint8m4_t vloxei16(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8mf2))) +vint8mf2_t vloxei16(const int8_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8mf2_m))) +vint8mf2_t vloxei16(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8mf4))) +vint8mf4_t vloxei16(const int8_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8mf4_m))) +vint8mf4_t vloxei16(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8mf8))) +vint8mf8_t vloxei16(const int8_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i8mf8_m))) +vint8mf8_t vloxei16(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8m1))) +vuint8m1_t vloxei16(const uint8_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8m1_m))) +vuint8m1_t vloxei16(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8m2))) +vuint8m2_t vloxei16(const uint8_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8m2_m))) +vuint8m2_t vloxei16(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8m4))) +vuint8m4_t vloxei16(const uint8_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8m4_m))) +vuint8m4_t vloxei16(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8mf2))) +vuint8mf2_t vloxei16(const uint8_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8mf2_m))) +vuint8mf2_t vloxei16(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8mf4))) +vuint8mf4_t vloxei16(const uint8_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8mf4_m))) +vuint8mf4_t vloxei16(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8mf8))) +vuint8mf8_t vloxei16(const uint8_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u8mf8_m))) +vuint8mf8_t vloxei16(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8m1))) +vint8m1_t vloxei32(const int8_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8m1_m))) +vint8m1_t vloxei32(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8m2))) +vint8m2_t vloxei32(const int8_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8m2_m))) +vint8m2_t vloxei32(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8mf2))) +vint8mf2_t vloxei32(const int8_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8mf2_m))) +vint8mf2_t vloxei32(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8mf4))) +vint8mf4_t vloxei32(const int8_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8mf4_m))) +vint8mf4_t vloxei32(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8mf8))) +vint8mf8_t vloxei32(const int8_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i8mf8_m))) +vint8mf8_t vloxei32(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8m1))) +vuint8m1_t vloxei32(const uint8_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8m1_m))) +vuint8m1_t vloxei32(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8m2))) +vuint8m2_t vloxei32(const uint8_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8m2_m))) +vuint8m2_t vloxei32(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8mf2))) +vuint8mf2_t vloxei32(const uint8_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8mf2_m))) +vuint8mf2_t vloxei32(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8mf4))) +vuint8mf4_t vloxei32(const uint8_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8mf4_m))) +vuint8mf4_t vloxei32(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8mf8))) +vuint8mf8_t vloxei32(const uint8_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u8mf8_m))) +vuint8mf8_t vloxei32(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8m1))) +vint8m1_t vloxei64(const int8_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8m1_m))) +vint8m1_t vloxei64(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8mf2))) +vint8mf2_t vloxei64(const int8_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8mf2_m))) +vint8mf2_t vloxei64(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8mf4))) +vint8mf4_t vloxei64(const int8_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8mf4_m))) +vint8mf4_t vloxei64(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8mf8))) +vint8mf8_t vloxei64(const int8_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i8mf8_m))) +vint8mf8_t vloxei64(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8m1))) +vuint8m1_t vloxei64(const uint8_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8m1_m))) +vuint8m1_t vloxei64(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8mf2))) +vuint8mf2_t vloxei64(const uint8_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8mf2_m))) +vuint8mf2_t vloxei64(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8mf4))) +vuint8mf4_t vloxei64(const uint8_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8mf4_m))) +vuint8mf4_t vloxei64(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8mf8))) +vuint8mf8_t vloxei64(const uint8_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u8mf8_m))) +vuint8mf8_t vloxei64(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m1))) +vint16m1_t vloxei8(const int16_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m1_m))) +vint16m1_t vloxei8(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m2))) +vint16m2_t vloxei8(const int16_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m2_m))) +vint16m2_t vloxei8(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m4))) +vint16m4_t vloxei8(const int16_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m4_m))) +vint16m4_t vloxei8(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m8))) +vint16m8_t vloxei8(const int16_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16m8_m))) +vint16m8_t vloxei8(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16mf2))) +vint16mf2_t vloxei8(const int16_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16mf2_m))) +vint16mf2_t vloxei8(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16mf4))) +vint16mf4_t vloxei8(const int16_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i16mf4_m))) +vint16mf4_t vloxei8(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m1))) +vuint16m1_t vloxei8(const uint16_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m1_m))) +vuint16m1_t vloxei8(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m2))) +vuint16m2_t vloxei8(const uint16_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m2_m))) +vuint16m2_t vloxei8(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m4))) +vuint16m4_t vloxei8(const uint16_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m4_m))) +vuint16m4_t vloxei8(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m8))) +vuint16m8_t vloxei8(const uint16_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16m8_m))) +vuint16m8_t vloxei8(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16mf2))) +vuint16mf2_t vloxei8(const uint16_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16mf2_m))) +vuint16mf2_t vloxei8(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16mf4))) +vuint16mf4_t vloxei8(const uint16_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u16mf4_m))) +vuint16mf4_t vloxei8(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m1))) +vint16m1_t vloxei16(const int16_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m1_m))) +vint16m1_t vloxei16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m2))) +vint16m2_t vloxei16(const int16_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m2_m))) +vint16m2_t vloxei16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m4))) +vint16m4_t vloxei16(const int16_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m4_m))) +vint16m4_t vloxei16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m8))) +vint16m8_t vloxei16(const int16_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16m8_m))) +vint16m8_t vloxei16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16mf2))) +vint16mf2_t vloxei16(const int16_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16mf2_m))) +vint16mf2_t vloxei16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16mf4))) +vint16mf4_t vloxei16(const int16_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i16mf4_m))) +vint16mf4_t vloxei16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m1))) +vuint16m1_t vloxei16(const uint16_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m1_m))) +vuint16m1_t vloxei16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m2))) +vuint16m2_t vloxei16(const uint16_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m2_m))) +vuint16m2_t vloxei16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m4))) +vuint16m4_t vloxei16(const uint16_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m4_m))) +vuint16m4_t vloxei16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m8))) +vuint16m8_t vloxei16(const uint16_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16m8_m))) +vuint16m8_t vloxei16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16mf2))) +vuint16mf2_t vloxei16(const uint16_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16mf2_m))) +vuint16mf2_t vloxei16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16mf4))) +vuint16mf4_t vloxei16(const uint16_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u16mf4_m))) +vuint16mf4_t vloxei16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16m1))) +vint16m1_t vloxei32(const int16_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16m1_m))) +vint16m1_t vloxei32(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16m2))) +vint16m2_t vloxei32(const int16_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16m2_m))) +vint16m2_t vloxei32(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16m4))) +vint16m4_t vloxei32(const int16_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16m4_m))) +vint16m4_t vloxei32(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16mf2))) +vint16mf2_t vloxei32(const int16_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16mf2_m))) +vint16mf2_t vloxei32(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16mf4))) +vint16mf4_t vloxei32(const int16_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i16mf4_m))) +vint16mf4_t vloxei32(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16m1))) +vuint16m1_t vloxei32(const uint16_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16m1_m))) +vuint16m1_t vloxei32(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16m2))) +vuint16m2_t vloxei32(const uint16_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16m2_m))) +vuint16m2_t vloxei32(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16m4))) +vuint16m4_t vloxei32(const uint16_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16m4_m))) +vuint16m4_t vloxei32(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16mf2))) +vuint16mf2_t vloxei32(const uint16_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16mf2_m))) +vuint16mf2_t vloxei32(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16mf4))) +vuint16mf4_t vloxei32(const uint16_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u16mf4_m))) +vuint16mf4_t vloxei32(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16m1))) +vint16m1_t vloxei64(const int16_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16m1_m))) +vint16m1_t vloxei64(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16m2))) +vint16m2_t vloxei64(const int16_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16m2_m))) +vint16m2_t vloxei64(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16mf2))) +vint16mf2_t vloxei64(const int16_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16mf2_m))) +vint16mf2_t vloxei64(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16mf4))) +vint16mf4_t vloxei64(const int16_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i16mf4_m))) +vint16mf4_t vloxei64(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16m1))) +vuint16m1_t vloxei64(const uint16_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16m1_m))) +vuint16m1_t vloxei64(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16m2))) +vuint16m2_t vloxei64(const uint16_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16m2_m))) +vuint16m2_t vloxei64(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16mf2))) +vuint16mf2_t vloxei64(const uint16_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16mf2_m))) +vuint16mf2_t vloxei64(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16mf4))) +vuint16mf4_t vloxei64(const uint16_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u16mf4_m))) +vuint16mf4_t vloxei64(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m1))) +vint32m1_t vloxei8(const int32_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m1_m))) +vint32m1_t vloxei8(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m2))) +vint32m2_t vloxei8(const int32_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m2_m))) +vint32m2_t vloxei8(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m4))) +vint32m4_t vloxei8(const int32_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m4_m))) +vint32m4_t vloxei8(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m8))) +vint32m8_t vloxei8(const int32_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32m8_m))) +vint32m8_t vloxei8(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32mf2))) +vint32mf2_t vloxei8(const int32_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i32mf2_m))) +vint32mf2_t vloxei8(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m1))) +vuint32m1_t vloxei8(const uint32_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m1_m))) +vuint32m1_t vloxei8(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m2))) +vuint32m2_t vloxei8(const uint32_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m2_m))) +vuint32m2_t vloxei8(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m4))) +vuint32m4_t vloxei8(const uint32_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m4_m))) +vuint32m4_t vloxei8(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m8))) +vuint32m8_t vloxei8(const uint32_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32m8_m))) +vuint32m8_t vloxei8(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32mf2))) +vuint32mf2_t vloxei8(const uint32_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u32mf2_m))) +vuint32mf2_t vloxei8(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m1))) +vint32m1_t vloxei16(const int32_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m1_m))) +vint32m1_t vloxei16(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m2))) +vint32m2_t vloxei16(const int32_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m2_m))) +vint32m2_t vloxei16(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m4))) +vint32m4_t vloxei16(const int32_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m4_m))) +vint32m4_t vloxei16(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m8))) +vint32m8_t vloxei16(const int32_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32m8_m))) +vint32m8_t vloxei16(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32mf2))) +vint32mf2_t vloxei16(const int32_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i32mf2_m))) +vint32mf2_t vloxei16(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m1))) +vuint32m1_t vloxei16(const uint32_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m1_m))) +vuint32m1_t vloxei16(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m2))) +vuint32m2_t vloxei16(const uint32_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m2_m))) +vuint32m2_t vloxei16(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m4))) +vuint32m4_t vloxei16(const uint32_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m4_m))) +vuint32m4_t vloxei16(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m8))) +vuint32m8_t vloxei16(const uint32_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32m8_m))) +vuint32m8_t vloxei16(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32mf2))) +vuint32mf2_t vloxei16(const uint32_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u32mf2_m))) +vuint32mf2_t vloxei16(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m1))) +vint32m1_t vloxei32(const int32_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m1_m))) +vint32m1_t vloxei32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m2))) +vint32m2_t vloxei32(const int32_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m2_m))) +vint32m2_t vloxei32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m4))) +vint32m4_t vloxei32(const int32_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m4_m))) +vint32m4_t vloxei32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m8))) +vint32m8_t vloxei32(const int32_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32m8_m))) +vint32m8_t vloxei32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32mf2))) +vint32mf2_t vloxei32(const int32_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i32mf2_m))) +vint32mf2_t vloxei32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m1))) +vuint32m1_t vloxei32(const uint32_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m1_m))) +vuint32m1_t vloxei32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m2))) +vuint32m2_t vloxei32(const uint32_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m2_m))) +vuint32m2_t vloxei32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m4))) +vuint32m4_t vloxei32(const uint32_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m4_m))) +vuint32m4_t vloxei32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m8))) +vuint32m8_t vloxei32(const uint32_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32m8_m))) +vuint32m8_t vloxei32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32mf2))) +vuint32mf2_t vloxei32(const uint32_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u32mf2_m))) +vuint32mf2_t vloxei32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32m1))) +vint32m1_t vloxei64(const int32_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32m1_m))) +vint32m1_t vloxei64(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32m2))) +vint32m2_t vloxei64(const int32_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32m2_m))) +vint32m2_t vloxei64(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32m4))) +vint32m4_t vloxei64(const int32_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32m4_m))) +vint32m4_t vloxei64(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32mf2))) +vint32mf2_t vloxei64(const int32_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i32mf2_m))) +vint32mf2_t vloxei64(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32m1))) +vuint32m1_t vloxei64(const uint32_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32m1_m))) +vuint32m1_t vloxei64(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32m2))) +vuint32m2_t vloxei64(const uint32_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32m2_m))) +vuint32m2_t vloxei64(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32m4))) +vuint32m4_t vloxei64(const uint32_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32m4_m))) +vuint32m4_t vloxei64(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32mf2))) +vuint32mf2_t vloxei64(const uint32_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u32mf2_m))) +vuint32mf2_t vloxei64(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m1))) +vint64m1_t vloxei8(const int64_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m1_m))) +vint64m1_t vloxei8(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m2))) +vint64m2_t vloxei8(const int64_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m2_m))) +vint64m2_t vloxei8(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m4))) +vint64m4_t vloxei8(const int64_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m4_m))) +vint64m4_t vloxei8(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m8))) +vint64m8_t vloxei8(const int64_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_i64m8_m))) +vint64m8_t vloxei8(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m1))) +void vse8(int8_t * op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m1_m))) +void vse8(vbool8_t op0, int8_t * op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m2))) +void vse8(int8_t * op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m2_m))) +void vse8(vbool4_t op0, int8_t * op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m4))) +void vse8(int8_t * op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m4_m))) +void vse8(vbool2_t op0, int8_t * op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m8))) +void vse8(int8_t * op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8m8_m))) +void vse8(vbool1_t op0, int8_t * op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8mf2))) +void vse8(int8_t * op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8mf2_m))) +void vse8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8mf4))) +void vse8(int8_t * op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8mf4_m))) +void vse8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8mf8))) +void vse8(int8_t * op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_i8mf8_m))) +void vse8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m1))) +vuint64m1_t vloxei8(const uint64_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m1_m))) +vuint64m1_t vloxei8(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m2))) +vuint64m2_t vloxei8(const uint64_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m2_m))) +vuint64m2_t vloxei8(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m4))) +vuint64m4_t vloxei8(const uint64_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m4_m))) +vuint64m4_t vloxei8(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m8))) +vuint64m8_t vloxei8(const uint64_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_u64m8_m))) +vuint64m8_t vloxei8(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m1))) +vint64m1_t vloxei16(const int64_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m1_m))) +vint64m1_t vloxei16(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m2))) +vint64m2_t vloxei16(const int64_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m2_m))) +vint64m2_t vloxei16(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m4))) +vint64m4_t vloxei16(const int64_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m4_m))) +vint64m4_t vloxei16(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m8))) +vint64m8_t vloxei16(const int64_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_i64m8_m))) +vint64m8_t vloxei16(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m1))) +vuint64m1_t vloxei16(const uint64_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m1_m))) +vuint64m1_t vloxei16(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m2))) +vuint64m2_t vloxei16(const uint64_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m2_m))) +vuint64m2_t vloxei16(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m4))) +vuint64m4_t vloxei16(const uint64_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m4_m))) +vuint64m4_t vloxei16(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m8))) +vuint64m8_t vloxei16(const uint64_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_u64m8_m))) +vuint64m8_t vloxei16(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m1))) +vint64m1_t vloxei32(const int64_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m1_m))) +vint64m1_t vloxei32(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m2))) +vint64m2_t vloxei32(const int64_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m2_m))) +vint64m2_t vloxei32(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m4))) +vint64m4_t vloxei32(const int64_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m4_m))) +vint64m4_t vloxei32(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m8))) +vint64m8_t vloxei32(const int64_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_i64m8_m))) +vint64m8_t vloxei32(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m1))) +vuint64m1_t vloxei32(const uint64_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m1_m))) +vuint64m1_t vloxei32(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m2))) +vuint64m2_t vloxei32(const uint64_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m2_m))) +vuint64m2_t vloxei32(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m4))) +vuint64m4_t vloxei32(const uint64_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m4_m))) +vuint64m4_t vloxei32(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m8))) +vuint64m8_t vloxei32(const uint64_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_u64m8_m))) +vuint64m8_t vloxei32(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m1))) +vint64m1_t vloxei64(const int64_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m1_m))) +vint64m1_t vloxei64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m2))) +vint64m2_t vloxei64(const int64_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m2_m))) +vint64m2_t vloxei64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m4))) +vint64m4_t vloxei64(const int64_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m4_m))) +vint64m4_t vloxei64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m8))) +vint64m8_t vloxei64(const int64_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_i64m8_m))) +vint64m8_t vloxei64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m1))) +vuint64m1_t vloxei64(const uint64_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m1_m))) +vuint64m1_t vloxei64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m2))) +vuint64m2_t vloxei64(const uint64_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m2_m))) +vuint64m2_t vloxei64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m4))) +vuint64m4_t vloxei64(const uint64_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m4_m))) +vuint64m4_t vloxei64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m8))) +vuint64m8_t vloxei64(const uint64_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_u64m8_m))) +vuint64m8_t vloxei64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m1))) +void vse8(uint8_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m1_m))) +void vse8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m2))) +void vse8(uint8_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m2_m))) +void vse8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m4))) +void vse8(uint8_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m4_m))) +void vse8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m8))) +void vse8(uint8_t * op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8m8_m))) +void vse8(vbool1_t op0, uint8_t * op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8mf2))) +void vse8(uint8_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8mf2_m))) +void vse8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8mf4))) +void vse8(uint8_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8mf4_m))) +void vse8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8mf8))) +void vse8(uint8_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse8_v_u8mf8_m))) +void vse8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16mf4))) +vuint16mf4_t vwaddu_wv(vuint16mf4_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16mf4_m))) +vuint16mf4_t vwaddu_wv(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16mf2))) +vuint16mf2_t vwaddu_wv(vuint16mf2_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16mf2_m))) +vuint16mf2_t vwaddu_wv(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m1))) +vuint16m1_t vwaddu_wv(vuint16m1_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m1_m))) +vuint16m1_t vwaddu_wv(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m2))) +vuint16m2_t vwaddu_wv(vuint16m2_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m2_m))) +vuint16m2_t vwaddu_wv(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m4))) +vuint16m4_t vwaddu_wv(vuint16m4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m4_m))) +vuint16m4_t vwaddu_wv(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m8))) +vuint16m8_t vwaddu_wv(vuint16m8_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u16m8_m))) +vuint16m8_t vwaddu_wv(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32mf2))) +vuint32mf2_t vwaddu_wv(vuint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32mf2_m))) +vuint32mf2_t vwaddu_wv(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m1))) +vuint32m1_t vwaddu_wv(vuint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m1_m))) +vuint32m1_t vwaddu_wv(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m2))) +vuint32m2_t vwaddu_wv(vuint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m2_m))) +vuint32m2_t vwaddu_wv(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m4))) +vuint32m4_t vwaddu_wv(vuint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m4_m))) +vuint32m4_t vwaddu_wv(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m8))) +vuint32m8_t vwaddu_wv(vuint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u32m8_m))) +vuint32m8_t vwaddu_wv(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m1))) +vuint64m1_t vwaddu_wv(vuint64m1_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m1_m))) +vuint64m1_t vwaddu_wv(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m2))) +vuint64m2_t vwaddu_wv(vuint64m2_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m2_m))) +vuint64m2_t vwaddu_wv(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m4))) +vuint64m4_t vwaddu_wv(vuint64m4_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m4_m))) +vuint64m4_t vwaddu_wv(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m8))) +vuint64m8_t vwaddu_wv(vuint64m8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wv_u64m8_m))) +vuint64m8_t vwaddu_wv(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m1))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m1_m))) +void vsse8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m2))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m2_m))) +void vsse8(vbool4_t op0, int8_t * op1, ptrdiff_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m4))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m4_m))) +void vsse8(vbool2_t op0, int8_t * op1, ptrdiff_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m8))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8m8_m))) +void vsse8(vbool1_t op0, int8_t * op1, ptrdiff_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8mf2))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8mf2_m))) +void vsse8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8mf4))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8mf4_m))) +void vsse8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8mf8))) +void vsse8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_i8mf8_m))) +void vsse8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8m1))) +void vsuxei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8m1_m))) +void vsuxei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8m2))) +void vsuxei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8m2_m))) +void vsuxei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8m4))) +void vsuxei16(int8_t * op0, vuint16m8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8m4_m))) +void vsuxei16(vbool2_t op0, int8_t * op1, vuint16m8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8mf2))) +void vsuxei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8mf2_m))) +void vsuxei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8mf4))) +void vsuxei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8mf4_m))) +void vsuxei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8mf8))) +void vsuxei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i8mf8_m))) +void vsuxei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8m1))) +void vsuxei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8m1_m))) +void vsuxei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8m2))) +void vsuxei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8m2_m))) +void vsuxei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8m4))) +void vsuxei16(uint8_t * op0, vuint16m8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8m4_m))) +void vsuxei16(vbool2_t op0, uint8_t * op1, vuint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8mf2))) +void vsuxei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8mf2_m))) +void vsuxei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8mf4))) +void vsuxei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8mf4_m))) +void vsuxei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8mf8))) +void vsuxei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u8mf8_m))) +void vsuxei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8m1))) +void vsuxei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8m1_m))) +void vsuxei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8m2))) +void vsuxei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8m2_m))) +void vsuxei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8mf2))) +void vsuxei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8mf2_m))) +void vsuxei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8mf4))) +void vsuxei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8mf4_m))) +void vsuxei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8mf8))) +void vsuxei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i8mf8_m))) +void vsuxei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m1))) +vint8m1_t vadd(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m1_m))) +vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m2))) +vint8m2_t vadd(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m2_m))) +vint8m2_t vadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m4))) +vint8m4_t vadd(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m4_m))) +vint8m4_t vadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m8))) +vint8m8_t vadd(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8m8_m))) +vint8m8_t vadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8mf2))) +vint8mf2_t vadd(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8mf2_m))) +vint8mf2_t vadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8mf4))) +vint8mf4_t vadd(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8mf4_m))) +vint8mf4_t vadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8mf8))) +vint8mf8_t vadd(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i8mf8_m))) +vint8mf8_t vadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m1))) +vint16m1_t vadd(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m1_m))) +vint16m1_t vadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m2))) +vint16m2_t vadd(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m2_m))) +vint16m2_t vadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m4))) +vint16m4_t vadd(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m4_m))) +vint16m4_t vadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m8))) +vint16m8_t vadd(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16m8_m))) +vint16m8_t vadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16mf2))) +vint16mf2_t vadd(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16mf2_m))) +vint16mf2_t vadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16mf4))) +vint16mf4_t vadd(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i16mf4_m))) +vint16mf4_t vadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m1))) +vint32m1_t vadd(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m1_m))) +vint32m1_t vadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m2))) +vint32m2_t vadd(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m2_m))) +vint32m2_t vadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m4))) +vint32m4_t vadd(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m4_m))) +vint32m4_t vadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m8))) +vint32m8_t vadd(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32m8_m))) +vint32m8_t vadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32mf2))) +vint32mf2_t vadd(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i32mf2_m))) +vint32mf2_t vadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m1))) +vint64m1_t vadd(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m1_m))) +vint64m1_t vadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m2))) +vint64m2_t vadd(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m2_m))) +vint64m2_t vadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m4))) +vint64m4_t vadd(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m4_m))) +vint64m4_t vadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m8))) +vint64m8_t vadd(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_i64m8_m))) +vint64m8_t vadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m1))) +vuint8m1_t vadd(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m1_m))) +vuint8m1_t vadd(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m2))) +vuint8m2_t vadd(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m2_m))) +vuint8m2_t vadd(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m4))) +vuint8m4_t vadd(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m4_m))) +vuint8m4_t vadd(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m8))) +vuint8m8_t vadd(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8m8_m))) +vuint8m8_t vadd(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8mf2))) +vuint8mf2_t vadd(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8mf2_m))) +vuint8mf2_t vadd(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8mf4))) +vuint8mf4_t vadd(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8mf4_m))) +vuint8mf4_t vadd(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8mf8))) +vuint8mf8_t vadd(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u8mf8_m))) +vuint8mf8_t vadd(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m1))) +vuint16m1_t vadd(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m1_m))) +vuint16m1_t vadd(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m2))) +vuint16m2_t vadd(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m2_m))) +vuint16m2_t vadd(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m4))) +vuint16m4_t vadd(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m4_m))) +vuint16m4_t vadd(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m8))) +vuint16m8_t vadd(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16m8_m))) +vuint16m8_t vadd(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16mf2))) +vuint16mf2_t vadd(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16mf2_m))) +vuint16mf2_t vadd(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16mf4))) +vuint16mf4_t vadd(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u16mf4_m))) +vuint16mf4_t vadd(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m1))) +vuint32m1_t vadd(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m1_m))) +vuint32m1_t vadd(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m2))) +vuint32m2_t vadd(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m2_m))) +vuint32m2_t vadd(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m4))) +vuint32m4_t vadd(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m4_m))) +vuint32m4_t vadd(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m8))) +vuint32m8_t vadd(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32m8_m))) +vuint32m8_t vadd(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32mf2))) +vuint32mf2_t vadd(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u32mf2_m))) +vuint32mf2_t vadd(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m1))) +vuint64m1_t vadd(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m1_m))) +vuint64m1_t vadd(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m2))) +vuint64m2_t vadd(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m2_m))) +vuint64m2_t vadd(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m4))) +vuint64m4_t vadd(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m4_m))) +vuint64m4_t vadd(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m8))) +vuint64m8_t vadd(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_u64m8_m))) +vuint64m8_t vadd(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m1))) +vuint8m1_t vadd(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m1_m))) +vuint8m1_t vadd(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m2))) +vuint8m2_t vadd(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m2_m))) +vuint8m2_t vadd(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m4))) +vuint8m4_t vadd(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m4_m))) +vuint8m4_t vadd(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m8))) +vuint8m8_t vadd(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8m8_m))) +vuint8m8_t vadd(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8mf2))) +vuint8mf2_t vadd(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8mf2_m))) +vuint8mf2_t vadd(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8mf4))) +vuint8mf4_t vadd(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8mf4_m))) +vuint8mf4_t vadd(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8mf8))) +vuint8mf8_t vadd(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u8mf8_m))) +vuint8mf8_t vadd(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m1))) +vuint16m1_t vadd(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m1_m))) +vuint16m1_t vadd(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m2))) +vuint16m2_t vadd(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m2_m))) +vuint16m2_t vadd(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m4))) +vuint16m4_t vadd(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m4_m))) +vuint16m4_t vadd(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m8))) +vuint16m8_t vadd(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16m8_m))) +vuint16m8_t vadd(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16mf2))) +vuint16mf2_t vadd(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16mf2_m))) +vuint16mf2_t vadd(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16mf4))) +vuint16mf4_t vadd(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u16mf4_m))) +vuint16mf4_t vadd(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m1))) +vuint32m1_t vadd(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m1_m))) +vuint32m1_t vadd(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m2))) +vuint32m2_t vadd(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m2_m))) +vuint32m2_t vadd(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m4))) +vuint32m4_t vadd(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m4_m))) +vuint32m4_t vadd(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m8))) +vuint32m8_t vadd(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32m8_m))) +vuint32m8_t vadd(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32mf2))) +vuint32mf2_t vadd(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u32mf2_m))) +vuint32mf2_t vadd(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m1))) +vuint64m1_t vadd(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m1_m))) +vuint64m1_t vadd(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m2))) +vuint64m2_t vadd(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m2_m))) +vuint64m2_t vadd(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m4))) +vuint64m4_t vadd(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m4_m))) +vuint64m4_t vadd(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m8))) +vuint64m8_t vadd(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadd_vx_u64m8_m))) +vuint64m8_t vadd(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m1))) +vint8m1_t vsub(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m1_m))) +vint8m1_t vsub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m2))) +vint8m2_t vsub(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m2_m))) +vint8m2_t vsub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m4))) +vint8m4_t vsub(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m4_m))) +vint8m4_t vsub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m8))) +vint8m8_t vsub(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8m8_m))) +vint8m8_t vsub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8mf2))) +vint8mf2_t vsub(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8mf2_m))) +vint8mf2_t vsub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8mf4))) +vint8mf4_t vsub(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8mf4_m))) +vint8mf4_t vsub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8mf8))) +vint8mf8_t vsub(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i8mf8_m))) +vint8mf8_t vsub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m1))) +vint16m1_t vsub(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m1_m))) +vint16m1_t vsub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m2))) +vint16m2_t vsub(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m2_m))) +vint16m2_t vsub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m4))) +vint16m4_t vsub(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m4_m))) +vint16m4_t vsub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m8))) +vint16m8_t vsub(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16m8_m))) +vint16m8_t vsub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16mf2))) +vint16mf2_t vsub(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16mf2_m))) +vint16mf2_t vsub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16mf4))) +vint16mf4_t vsub(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i16mf4_m))) +vint16mf4_t vsub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m1))) +vint32m1_t vsub(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m1_m))) +vint32m1_t vsub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m2))) +vint32m2_t vsub(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m2_m))) +vint32m2_t vsub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m4))) +vint32m4_t vsub(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m4_m))) +vint32m4_t vsub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m8))) +vint32m8_t vsub(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32m8_m))) +vint32m8_t vsub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32mf2))) +vint32mf2_t vsub(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i32mf2_m))) +vint32mf2_t vsub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m1))) +vint64m1_t vsub(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m1_m))) +vint64m1_t vsub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m2))) +vint64m2_t vsub(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m2_m))) +vint64m2_t vsub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m4))) +vint64m4_t vsub(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m4_m))) +vint64m4_t vsub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m8))) +vint64m8_t vsub(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_i64m8_m))) +vint64m8_t vsub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8m1))) +void vsuxei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8m1_m))) +void vsuxei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8m2))) +void vsuxei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8m2_m))) +void vsuxei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8mf2))) +void vsuxei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8mf2_m))) +void vsuxei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8mf4))) +void vsuxei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8mf4_m))) +void vsuxei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8mf8))) +void vsuxei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u8mf8_m))) +void vsuxei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m1))) +vint8m1_t vsub(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m1_m))) +vint8m1_t vsub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m2))) +vint8m2_t vsub(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m2_m))) +vint8m2_t vsub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m4))) +vint8m4_t vsub(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m4_m))) +vint8m4_t vsub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m8))) +vint8m8_t vsub(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8m8_m))) +vint8m8_t vsub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8mf2))) +vint8mf2_t vsub(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8mf2_m))) +vint8mf2_t vsub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8mf4))) +vint8mf4_t vsub(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8mf4_m))) +vint8mf4_t vsub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8mf8))) +vint8mf8_t vsub(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i8mf8_m))) +vint8mf8_t vsub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m1))) +vint16m1_t vsub(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m1_m))) +vint16m1_t vsub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m2))) +vint16m2_t vsub(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m2_m))) +vint16m2_t vsub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m4))) +vint16m4_t vsub(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m4_m))) +vint16m4_t vsub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m8))) +vint16m8_t vsub(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16m8_m))) +vint16m8_t vsub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16mf2))) +vint16mf2_t vsub(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16mf2_m))) +vint16mf2_t vsub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16mf4))) +vint16mf4_t vsub(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i16mf4_m))) +vint16mf4_t vsub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m1))) +vint32m1_t vsub(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m1_m))) +vint32m1_t vsub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m2))) +vint32m2_t vsub(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m2_m))) +vint32m2_t vsub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m4))) +vint32m4_t vsub(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m4_m))) +vint32m4_t vsub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m8))) +vint32m8_t vsub(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32m8_m))) +vint32m8_t vsub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32mf2))) +vint32mf2_t vsub(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i32mf2_m))) +vint32mf2_t vsub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m1))) +vint64m1_t vsub(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m1_m))) +vint64m1_t vsub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m2))) +vint64m2_t vsub(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m2_m))) +vint64m2_t vsub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m4))) +vint64m4_t vsub(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m4_m))) +vint64m4_t vsub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m8))) +vint64m8_t vsub(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_i64m8_m))) +vint64m8_t vsub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m1))) +vuint8m1_t vsub(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m1_m))) +vuint8m1_t vsub(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m2))) +vuint8m2_t vsub(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m2_m))) +vuint8m2_t vsub(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m4))) +vuint8m4_t vsub(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m4_m))) +vuint8m4_t vsub(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m8))) +vuint8m8_t vsub(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8m8_m))) +vuint8m8_t vsub(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8mf2))) +vuint8mf2_t vsub(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8mf2_m))) +vuint8mf2_t vsub(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8mf4))) +vuint8mf4_t vsub(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8mf4_m))) +vuint8mf4_t vsub(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8mf8))) +vuint8mf8_t vsub(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u8mf8_m))) +vuint8mf8_t vsub(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m1))) +vuint16m1_t vsub(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m1_m))) +vuint16m1_t vsub(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m2))) +vuint16m2_t vsub(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m2_m))) +vuint16m2_t vsub(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m4))) +vuint16m4_t vsub(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m4_m))) +vuint16m4_t vsub(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m8))) +vuint16m8_t vsub(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16m8_m))) +vuint16m8_t vsub(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16mf2))) +vuint16mf2_t vsub(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16mf2_m))) +vuint16mf2_t vsub(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16mf4))) +vuint16mf4_t vsub(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u16mf4_m))) +vuint16mf4_t vsub(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m1))) +vuint32m1_t vsub(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m1_m))) +vuint32m1_t vsub(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m2))) +vuint32m2_t vsub(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m2_m))) +vuint32m2_t vsub(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m4))) +vuint32m4_t vsub(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m4_m))) +vuint32m4_t vsub(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m8))) +vuint32m8_t vsub(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32m8_m))) +vuint32m8_t vsub(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32mf2))) +vuint32mf2_t vsub(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u32mf2_m))) +vuint32mf2_t vsub(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m1))) +vuint64m1_t vsub(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m1_m))) +vuint64m1_t vsub(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m2))) +vuint64m2_t vsub(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m2_m))) +vuint64m2_t vsub(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m4))) +vuint64m4_t vsub(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m4_m))) +vuint64m4_t vsub(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m8))) +vuint64m8_t vsub(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vv_u64m8_m))) +vuint64m8_t vsub(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m1))) +vuint8m1_t vsub(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m1_m))) +vuint8m1_t vsub(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m2))) +vuint8m2_t vsub(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m2_m))) +vuint8m2_t vsub(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m4))) +vuint8m4_t vsub(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m4_m))) +vuint8m4_t vsub(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m8))) +vuint8m8_t vsub(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8m8_m))) +vuint8m8_t vsub(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8mf2))) +vuint8mf2_t vsub(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8mf2_m))) +vuint8mf2_t vsub(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8mf4))) +vuint8mf4_t vsub(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8mf4_m))) +vuint8mf4_t vsub(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8mf8))) +vuint8mf8_t vsub(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u8mf8_m))) +vuint8mf8_t vsub(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m1))) +vuint16m1_t vsub(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m1_m))) +vuint16m1_t vsub(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m2))) +vuint16m2_t vsub(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m2_m))) +vuint16m2_t vsub(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m4))) +vuint16m4_t vsub(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m4_m))) +vuint16m4_t vsub(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m8))) +vuint16m8_t vsub(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16m8_m))) +vuint16m8_t vsub(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16mf2))) +vuint16mf2_t vsub(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16mf2_m))) +vuint16mf2_t vsub(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16mf4))) +vuint16mf4_t vsub(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u16mf4_m))) +vuint16mf4_t vsub(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m1))) +vuint32m1_t vsub(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m1_m))) +vuint32m1_t vsub(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m2))) +vuint32m2_t vsub(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m2_m))) +vuint32m2_t vsub(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m4))) +vuint32m4_t vsub(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m4_m))) +vuint32m4_t vsub(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m8))) +vuint32m8_t vsub(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32m8_m))) +vuint32m8_t vsub(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32mf2))) +vuint32mf2_t vsub(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u32mf2_m))) +vuint32mf2_t vsub(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m1))) +vuint64m1_t vsub(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m1_m))) +vuint64m1_t vsub(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m2))) +vuint64m2_t vsub(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m2_m))) +vuint64m2_t vsub(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m4))) +vuint64m4_t vsub(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m4_m))) +vuint64m4_t vsub(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m8))) +vuint64m8_t vsub(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsub_vx_u64m8_m))) +vuint64m8_t vsub(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m1))) +vint8m1_t vrsub(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m1_m))) +vint8m1_t vrsub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m2))) +vint8m2_t vrsub(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m2_m))) +vint8m2_t vrsub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m4))) +vint8m4_t vrsub(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m4_m))) +vint8m4_t vrsub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m8))) +vint8m8_t vrsub(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8m8_m))) +vint8m8_t vrsub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8mf2))) +vint8mf2_t vrsub(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8mf2_m))) +vint8mf2_t vrsub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8mf4))) +vint8mf4_t vrsub(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8mf4_m))) +vint8mf4_t vrsub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8mf8))) +vint8mf8_t vrsub(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i8mf8_m))) +vint8mf8_t vrsub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m1))) +vint16m1_t vrsub(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m1_m))) +vint16m1_t vrsub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m2))) +vint16m2_t vrsub(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m2_m))) +vint16m2_t vrsub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m4))) +vint16m4_t vrsub(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m4_m))) +vint16m4_t vrsub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m8))) +vint16m8_t vrsub(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16m8_m))) +vint16m8_t vrsub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16mf2))) +vint16mf2_t vrsub(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16mf2_m))) +vint16mf2_t vrsub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16mf4))) +vint16mf4_t vrsub(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i16mf4_m))) +vint16mf4_t vrsub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m1))) +vint32m1_t vrsub(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m1_m))) +vint32m1_t vrsub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m2))) +vint32m2_t vrsub(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m2_m))) +vint32m2_t vrsub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m4))) +vint32m4_t vrsub(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m4_m))) +vint32m4_t vrsub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m8))) +vint32m8_t vrsub(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32m8_m))) +vint32m8_t vrsub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32mf2))) +vint32mf2_t vrsub(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i32mf2_m))) +vint32mf2_t vrsub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m1))) +vint64m1_t vrsub(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m1_m))) +vint64m1_t vrsub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m2))) +vint64m2_t vrsub(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m2_m))) +vint64m2_t vrsub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m4))) +vint64m4_t vrsub(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m4_m))) +vint64m4_t vrsub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m8))) +vint64m8_t vrsub(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_i64m8_m))) +vint64m8_t vrsub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m1))) +vuint8m1_t vrsub(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m1_m))) +vuint8m1_t vrsub(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m2))) +vuint8m2_t vrsub(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m2_m))) +vuint8m2_t vrsub(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m4))) +vuint8m4_t vrsub(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m4_m))) +vuint8m4_t vrsub(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m8))) +vuint8m8_t vrsub(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8m8_m))) +vuint8m8_t vrsub(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8mf2))) +vuint8mf2_t vrsub(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8mf2_m))) +vuint8mf2_t vrsub(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8mf4))) +vuint8mf4_t vrsub(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8mf4_m))) +vuint8mf4_t vrsub(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8mf8))) +vuint8mf8_t vrsub(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u8mf8_m))) +vuint8mf8_t vrsub(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m1))) +vuint16m1_t vrsub(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m1_m))) +vuint16m1_t vrsub(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m2))) +vuint16m2_t vrsub(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m2_m))) +vuint16m2_t vrsub(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m4))) +vuint16m4_t vrsub(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m4_m))) +vuint16m4_t vrsub(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m8))) +vuint16m8_t vrsub(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16m8_m))) +vuint16m8_t vrsub(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16mf2))) +vuint16mf2_t vrsub(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16mf2_m))) +vuint16mf2_t vrsub(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16mf4))) +vuint16mf4_t vrsub(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u16mf4_m))) +vuint16mf4_t vrsub(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m1))) +vuint32m1_t vrsub(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m1_m))) +vuint32m1_t vrsub(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m2))) +vuint32m2_t vrsub(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m2_m))) +vuint32m2_t vrsub(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m4))) +vuint32m4_t vrsub(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m4_m))) +vuint32m4_t vrsub(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m8))) +vuint32m8_t vrsub(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32m8_m))) +vuint32m8_t vrsub(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32mf2))) +vuint32mf2_t vrsub(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u32mf2_m))) +vuint32mf2_t vrsub(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m1))) +vuint64m1_t vrsub(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m1_m))) +vuint64m1_t vrsub(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m2))) +vuint64m2_t vrsub(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m2_m))) +vuint64m2_t vrsub(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m4))) +vuint64m4_t vrsub(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m4_m))) +vuint64m4_t vrsub(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m8))) +vuint64m8_t vrsub(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrsub_vx_u64m8_m))) +vuint64m8_t vrsub(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16mf4))) +vuint16mf4_t vwaddu_vx(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16mf4_m))) +vuint16mf4_t vwaddu_vx(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16mf2))) +vuint16mf2_t vwaddu_vx(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16mf2_m))) +vuint16mf2_t vwaddu_vx(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m1))) +vuint16m1_t vwaddu_vx(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m1_m))) +vuint16m1_t vwaddu_vx(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m2))) +vuint16m2_t vwaddu_vx(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m2_m))) +vuint16m2_t vwaddu_vx(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m4))) +vuint16m4_t vwaddu_vx(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m4_m))) +vuint16m4_t vwaddu_vx(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m8))) +vuint16m8_t vwaddu_vx(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u16m8_m))) +vuint16m8_t vwaddu_vx(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32mf2))) +vuint32mf2_t vwaddu_vx(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32mf2_m))) +vuint32mf2_t vwaddu_vx(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m1))) +vuint32m1_t vwaddu_vx(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m1_m))) +vuint32m1_t vwaddu_vx(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m2))) +vuint32m2_t vwaddu_vx(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m2_m))) +vuint32m2_t vwaddu_vx(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m4))) +vuint32m4_t vwaddu_vx(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m4_m))) +vuint32m4_t vwaddu_vx(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m8))) +vuint32m8_t vwaddu_vx(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u32m8_m))) +vuint32m8_t vwaddu_vx(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m1))) +vuint64m1_t vwaddu_vx(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m1_m))) +vuint64m1_t vwaddu_vx(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m2))) +vuint64m2_t vwaddu_vx(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m2_m))) +vuint64m2_t vwaddu_vx(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m4))) +vuint64m4_t vwaddu_vx(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m4_m))) +vuint64m4_t vwaddu_vx(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m8))) +vuint64m8_t vwaddu_vx(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_vx_u64m8_m))) +vuint64m8_t vwaddu_vx(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16mf4))) +vuint16mf4_t vwsubu_vv(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16mf4_m))) +vuint16mf4_t vwsubu_vv(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16mf2))) +vuint16mf2_t vwsubu_vv(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16mf2_m))) +vuint16mf2_t vwsubu_vv(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m1))) +vuint16m1_t vwsubu_vv(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m1_m))) +vuint16m1_t vwsubu_vv(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m2))) +vuint16m2_t vwsubu_vv(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m2_m))) +vuint16m2_t vwsubu_vv(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m4))) +vuint16m4_t vwsubu_vv(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m4_m))) +vuint16m4_t vwsubu_vv(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m8))) +vuint16m8_t vwsubu_vv(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u16m8_m))) +vuint16m8_t vwsubu_vv(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32mf2))) +vuint32mf2_t vwsubu_vv(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32mf2_m))) +vuint32mf2_t vwsubu_vv(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m1))) +vuint32m1_t vwsubu_vv(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m1_m))) +vuint32m1_t vwsubu_vv(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m2))) +vuint32m2_t vwsubu_vv(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m2_m))) +vuint32m2_t vwsubu_vv(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m4))) +vuint32m4_t vwsubu_vv(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m4_m))) +vuint32m4_t vwsubu_vv(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m8))) +vuint32m8_t vwsubu_vv(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u32m8_m))) +vuint32m8_t vwsubu_vv(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m1))) +vuint64m1_t vwsubu_vv(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m1_m))) +vuint64m1_t vwsubu_vv(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m2))) +vuint64m2_t vwsubu_vv(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m2_m))) +vuint64m2_t vwsubu_vv(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m4))) +vuint64m4_t vwsubu_vv(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m4_m))) +vuint64m4_t vwsubu_vv(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m8))) +vuint64m8_t vwsubu_vv(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vv_u64m8_m))) +vuint64m8_t vwsubu_vv(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16mf4))) +vuint16mf4_t vwsubu_vx(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16mf4_m))) +vuint16mf4_t vwsubu_vx(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16mf2))) +vuint16mf2_t vwsubu_vx(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16mf2_m))) +vuint16mf2_t vwsubu_vx(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m1))) +vuint16m1_t vwsubu_vx(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m1_m))) +vuint16m1_t vwsubu_vx(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m2))) +vuint16m2_t vwsubu_vx(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m2_m))) +vuint16m2_t vwsubu_vx(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m4))) +vuint16m4_t vwsubu_vx(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m4_m))) +vuint16m4_t vwsubu_vx(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m8))) +vuint16m8_t vwsubu_vx(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u16m8_m))) +vuint16m8_t vwsubu_vx(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32mf2))) +vuint32mf2_t vwsubu_vx(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32mf2_m))) +vuint32mf2_t vwsubu_vx(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m1))) +vuint32m1_t vwsubu_vx(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m1_m))) +vuint32m1_t vwsubu_vx(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m2))) +vuint32m2_t vwsubu_vx(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m2_m))) +vuint32m2_t vwsubu_vx(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m4))) +vuint32m4_t vwsubu_vx(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m4_m))) +vuint32m4_t vwsubu_vx(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m8))) +vuint32m8_t vwsubu_vx(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u32m8_m))) +vuint32m8_t vwsubu_vx(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m1))) +vuint64m1_t vwsubu_vx(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m1_m))) +vuint64m1_t vwsubu_vx(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m2))) +vuint64m2_t vwsubu_vx(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m2_m))) +vuint64m2_t vwsubu_vx(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m4))) +vuint64m4_t vwsubu_vx(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m4_m))) +vuint64m4_t vwsubu_vx(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m8))) +vuint64m8_t vwsubu_vx(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_vx_u64m8_m))) +vuint64m8_t vwsubu_vx(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16mf4))) +vint16mf4_t vwadd_vv(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16mf4_m))) +vint16mf4_t vwadd_vv(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16mf2))) +vint16mf2_t vwadd_vv(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16mf2_m))) +vint16mf2_t vwadd_vv(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m1))) +vint16m1_t vwadd_vv(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m1_m))) +vint16m1_t vwadd_vv(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m2))) +vint16m2_t vwadd_vv(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m2_m))) +vint16m2_t vwadd_vv(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m4))) +vint16m4_t vwadd_vv(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m4_m))) +vint16m4_t vwadd_vv(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m8))) +vint16m8_t vwadd_vv(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i16m8_m))) +vint16m8_t vwadd_vv(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32mf2))) +vint32mf2_t vwadd_vv(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32mf2_m))) +vint32mf2_t vwadd_vv(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m1))) +vint32m1_t vwadd_vv(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m1_m))) +vint32m1_t vwadd_vv(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m2))) +vint32m2_t vwadd_vv(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m2_m))) +vint32m2_t vwadd_vv(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m4))) +vint32m4_t vwadd_vv(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m4_m))) +vint32m4_t vwadd_vv(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m8))) +vint32m8_t vwadd_vv(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i32m8_m))) +vint32m8_t vwadd_vv(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m1))) +vint64m1_t vwadd_vv(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m1_m))) +vint64m1_t vwadd_vv(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m2))) +vint64m2_t vwadd_vv(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m2_m))) +vint64m2_t vwadd_vv(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m4))) +vint64m4_t vwadd_vv(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m4_m))) +vint64m4_t vwadd_vv(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m8))) +vint64m8_t vwadd_vv(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vv_i64m8_m))) +vint64m8_t vwadd_vv(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16mf4))) +vint16mf4_t vwadd_vx(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16mf4_m))) +vint16mf4_t vwadd_vx(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16mf2))) +vint16mf2_t vwadd_vx(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16mf2_m))) +vint16mf2_t vwadd_vx(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m1))) +vint16m1_t vwadd_vx(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m1_m))) +vint16m1_t vwadd_vx(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m2))) +vint16m2_t vwadd_vx(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m2_m))) +vint16m2_t vwadd_vx(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m4))) +vint16m4_t vwadd_vx(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m4_m))) +vint16m4_t vwadd_vx(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m8))) +vint16m8_t vwadd_vx(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i16m8_m))) +vint16m8_t vwadd_vx(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32mf2))) +vint32mf2_t vwadd_vx(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32mf2_m))) +vint32mf2_t vwadd_vx(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m1))) +vint32m1_t vwadd_vx(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m1_m))) +vint32m1_t vwadd_vx(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m2))) +vint32m2_t vwadd_vx(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m2_m))) +vint32m2_t vwadd_vx(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m4))) +vint32m4_t vwadd_vx(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m4_m))) +vint32m4_t vwadd_vx(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m8))) +vint32m8_t vwadd_vx(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i32m8_m))) +vint32m8_t vwadd_vx(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m1))) +vint64m1_t vwadd_vx(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m1_m))) +vint64m1_t vwadd_vx(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m2))) +vint64m2_t vwadd_vx(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m2_m))) +vint64m2_t vwadd_vx(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m4))) +vint64m4_t vwadd_vx(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m4_m))) +vint64m4_t vwadd_vx(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m8))) +vint64m8_t vwadd_vx(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_vx_i64m8_m))) +vint64m8_t vwadd_vx(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8m1))) +void vsuxei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8m1_m))) +void vsuxei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8mf2))) +void vsuxei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8mf2_m))) +void vsuxei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8mf4))) +void vsuxei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8mf4_m))) +void vsuxei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8mf8))) +void vsuxei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i8mf8_m))) +void vsuxei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16mf4))) +vint16mf4_t vwsub_vv(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16mf4_m))) +vint16mf4_t vwsub_vv(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16mf2))) +vint16mf2_t vwsub_vv(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16mf2_m))) +vint16mf2_t vwsub_vv(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m1))) +vint16m1_t vwsub_vv(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m1_m))) +vint16m1_t vwsub_vv(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m2))) +vint16m2_t vwsub_vv(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m2_m))) +vint16m2_t vwsub_vv(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m4))) +vint16m4_t vwsub_vv(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m4_m))) +vint16m4_t vwsub_vv(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m8))) +vint16m8_t vwsub_vv(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i16m8_m))) +vint16m8_t vwsub_vv(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32mf2))) +vint32mf2_t vwsub_vv(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32mf2_m))) +vint32mf2_t vwsub_vv(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m1))) +vint32m1_t vwsub_vv(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m1_m))) +vint32m1_t vwsub_vv(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m2))) +vint32m2_t vwsub_vv(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m2_m))) +vint32m2_t vwsub_vv(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m4))) +vint32m4_t vwsub_vv(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m4_m))) +vint32m4_t vwsub_vv(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m8))) +vint32m8_t vwsub_vv(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i32m8_m))) +vint32m8_t vwsub_vv(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m1))) +vint64m1_t vwsub_vv(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m1_m))) +vint64m1_t vwsub_vv(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m2))) +vint64m2_t vwsub_vv(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m2_m))) +vint64m2_t vwsub_vv(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m4))) +vint64m4_t vwsub_vv(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m4_m))) +vint64m4_t vwsub_vv(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m8))) +vint64m8_t vwsub_vv(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vv_i64m8_m))) +vint64m8_t vwsub_vv(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16mf4))) +vint16mf4_t vwsub_vx(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16mf4_m))) +vint16mf4_t vwsub_vx(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16mf2))) +vint16mf2_t vwsub_vx(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16mf2_m))) +vint16mf2_t vwsub_vx(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m1))) +vint16m1_t vwsub_vx(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m1_m))) +vint16m1_t vwsub_vx(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m2))) +vint16m2_t vwsub_vx(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m2_m))) +vint16m2_t vwsub_vx(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m4))) +vint16m4_t vwsub_vx(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m4_m))) +vint16m4_t vwsub_vx(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m8))) +vint16m8_t vwsub_vx(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i16m8_m))) +vint16m8_t vwsub_vx(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32mf2))) +vint32mf2_t vwsub_vx(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32mf2_m))) +vint32mf2_t vwsub_vx(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m1))) +vint32m1_t vwsub_vx(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m1_m))) +vint32m1_t vwsub_vx(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m2))) +vint32m2_t vwsub_vx(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m2_m))) +vint32m2_t vwsub_vx(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m4))) +vint32m4_t vwsub_vx(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m4_m))) +vint32m4_t vwsub_vx(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m8))) +vint32m8_t vwsub_vx(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i32m8_m))) +vint32m8_t vwsub_vx(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m1))) +vint64m1_t vwsub_vx(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m1_m))) +vint64m1_t vwsub_vx(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m2))) +vint64m2_t vwsub_vx(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m2_m))) +vint64m2_t vwsub_vx(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m4))) +vint64m4_t vwsub_vx(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m4_m))) +vint64m4_t vwsub_vx(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m8))) +vint64m8_t vwsub_vx(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_vx_i64m8_m))) +vint64m8_t vwsub_vx(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16mf4))) +vuint16mf4_t vwaddu_wx(vuint16mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16mf4_m))) +vuint16mf4_t vwaddu_wx(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16mf2))) +vuint16mf2_t vwaddu_wx(vuint16mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16mf2_m))) +vuint16mf2_t vwaddu_wx(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m1))) +vuint16m1_t vwaddu_wx(vuint16m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m1_m))) +vuint16m1_t vwaddu_wx(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m2))) +vuint16m2_t vwaddu_wx(vuint16m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m2_m))) +vuint16m2_t vwaddu_wx(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m4))) +vuint16m4_t vwaddu_wx(vuint16m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m4_m))) +vuint16m4_t vwaddu_wx(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m8))) +vuint16m8_t vwaddu_wx(vuint16m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u16m8_m))) +vuint16m8_t vwaddu_wx(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32mf2))) +vuint32mf2_t vwaddu_wx(vuint32mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32mf2_m))) +vuint32mf2_t vwaddu_wx(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m1))) +vuint32m1_t vwaddu_wx(vuint32m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m1_m))) +vuint32m1_t vwaddu_wx(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m2))) +vuint32m2_t vwaddu_wx(vuint32m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m2_m))) +vuint32m2_t vwaddu_wx(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m4))) +vuint32m4_t vwaddu_wx(vuint32m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m4_m))) +vuint32m4_t vwaddu_wx(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m8))) +vuint32m8_t vwaddu_wx(vuint32m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u32m8_m))) +vuint32m8_t vwaddu_wx(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m1))) +vuint64m1_t vwaddu_wx(vuint64m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m1_m))) +vuint64m1_t vwaddu_wx(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m2))) +vuint64m2_t vwaddu_wx(vuint64m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m2_m))) +vuint64m2_t vwaddu_wx(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m4))) +vuint64m4_t vwaddu_wx(vuint64m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m4_m))) +vuint64m4_t vwaddu_wx(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m8))) +vuint64m8_t vwaddu_wx(vuint64m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwaddu_wx_u64m8_m))) +vuint64m8_t vwaddu_wx(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16mf4))) +vuint16mf4_t vwsubu_wv(vuint16mf4_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16mf4_m))) +vuint16mf4_t vwsubu_wv(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16mf2))) +vuint16mf2_t vwsubu_wv(vuint16mf2_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16mf2_m))) +vuint16mf2_t vwsubu_wv(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m1))) +vuint16m1_t vwsubu_wv(vuint16m1_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m1_m))) +vuint16m1_t vwsubu_wv(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m2))) +vuint16m2_t vwsubu_wv(vuint16m2_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m2_m))) +vuint16m2_t vwsubu_wv(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m4))) +vuint16m4_t vwsubu_wv(vuint16m4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m4_m))) +vuint16m4_t vwsubu_wv(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m8))) +vuint16m8_t vwsubu_wv(vuint16m8_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u16m8_m))) +vuint16m8_t vwsubu_wv(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32mf2))) +vuint32mf2_t vwsubu_wv(vuint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32mf2_m))) +vuint32mf2_t vwsubu_wv(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m1))) +vuint32m1_t vwsubu_wv(vuint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m1_m))) +vuint32m1_t vwsubu_wv(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m2))) +vuint32m2_t vwsubu_wv(vuint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m2_m))) +vuint32m2_t vwsubu_wv(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m4))) +vuint32m4_t vwsubu_wv(vuint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m4_m))) +vuint32m4_t vwsubu_wv(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m8))) +vuint32m8_t vwsubu_wv(vuint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u32m8_m))) +vuint32m8_t vwsubu_wv(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m1))) +vuint64m1_t vwsubu_wv(vuint64m1_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m1_m))) +vuint64m1_t vwsubu_wv(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m2))) +vuint64m2_t vwsubu_wv(vuint64m2_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m2_m))) +vuint64m2_t vwsubu_wv(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m4))) +vuint64m4_t vwsubu_wv(vuint64m4_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m4_m))) +vuint64m4_t vwsubu_wv(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m8))) +vuint64m8_t vwsubu_wv(vuint64m8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wv_u64m8_m))) +vuint64m8_t vwsubu_wv(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16mf4))) +vuint16mf4_t vwsubu_wx(vuint16mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16mf4_m))) +vuint16mf4_t vwsubu_wx(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16mf2))) +vuint16mf2_t vwsubu_wx(vuint16mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16mf2_m))) +vuint16mf2_t vwsubu_wx(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m1))) +vuint16m1_t vwsubu_wx(vuint16m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m1_m))) +vuint16m1_t vwsubu_wx(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m2))) +vuint16m2_t vwsubu_wx(vuint16m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m2_m))) +vuint16m2_t vwsubu_wx(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m4))) +vuint16m4_t vwsubu_wx(vuint16m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m4_m))) +vuint16m4_t vwsubu_wx(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m8))) +vuint16m8_t vwsubu_wx(vuint16m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u16m8_m))) +vuint16m8_t vwsubu_wx(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32mf2))) +vuint32mf2_t vwsubu_wx(vuint32mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32mf2_m))) +vuint32mf2_t vwsubu_wx(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m1))) +vuint32m1_t vwsubu_wx(vuint32m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m1_m))) +vuint32m1_t vwsubu_wx(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m2))) +vuint32m2_t vwsubu_wx(vuint32m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m2_m))) +vuint32m2_t vwsubu_wx(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m4))) +vuint32m4_t vwsubu_wx(vuint32m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m4_m))) +vuint32m4_t vwsubu_wx(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m8))) +vuint32m8_t vwsubu_wx(vuint32m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u32m8_m))) +vuint32m8_t vwsubu_wx(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m1))) +vuint64m1_t vwsubu_wx(vuint64m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m1_m))) +vuint64m1_t vwsubu_wx(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m2))) +vuint64m2_t vwsubu_wx(vuint64m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m2_m))) +vuint64m2_t vwsubu_wx(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m4))) +vuint64m4_t vwsubu_wx(vuint64m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m4_m))) +vuint64m4_t vwsubu_wx(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m8))) +vuint64m8_t vwsubu_wx(vuint64m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsubu_wx_u64m8_m))) +vuint64m8_t vwsubu_wx(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16mf4))) +vint16mf4_t vwadd_wv(vint16mf4_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16mf4_m))) +vint16mf4_t vwadd_wv(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16mf2))) +vint16mf2_t vwadd_wv(vint16mf2_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16mf2_m))) +vint16mf2_t vwadd_wv(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m1))) +vint16m1_t vwadd_wv(vint16m1_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m1_m))) +vint16m1_t vwadd_wv(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m2))) +vint16m2_t vwadd_wv(vint16m2_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m2_m))) +vint16m2_t vwadd_wv(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m4))) +vint16m4_t vwadd_wv(vint16m4_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m4_m))) +vint16m4_t vwadd_wv(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m8))) +vint16m8_t vwadd_wv(vint16m8_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i16m8_m))) +vint16m8_t vwadd_wv(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32mf2))) +vint32mf2_t vwadd_wv(vint32mf2_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32mf2_m))) +vint32mf2_t vwadd_wv(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m1))) +vint32m1_t vwadd_wv(vint32m1_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m1_m))) +vint32m1_t vwadd_wv(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m2))) +vint32m2_t vwadd_wv(vint32m2_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m2_m))) +vint32m2_t vwadd_wv(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m4))) +vint32m4_t vwadd_wv(vint32m4_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m4_m))) +vint32m4_t vwadd_wv(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m8))) +vint32m8_t vwadd_wv(vint32m8_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i32m8_m))) +vint32m8_t vwadd_wv(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m1))) +vint64m1_t vwadd_wv(vint64m1_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m1_m))) +vint64m1_t vwadd_wv(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m2))) +vint64m2_t vwadd_wv(vint64m2_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m2_m))) +vint64m2_t vwadd_wv(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m4))) +vint64m4_t vwadd_wv(vint64m4_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m4_m))) +vint64m4_t vwadd_wv(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m8))) +vint64m8_t vwadd_wv(vint64m8_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wv_i64m8_m))) +vint64m8_t vwadd_wv(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16mf4))) +vint16mf4_t vwadd_wx(vint16mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16mf4_m))) +vint16mf4_t vwadd_wx(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16mf2))) +vint16mf2_t vwadd_wx(vint16mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16mf2_m))) +vint16mf2_t vwadd_wx(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m1))) +vint16m1_t vwadd_wx(vint16m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m1_m))) +vint16m1_t vwadd_wx(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m2))) +vint16m2_t vwadd_wx(vint16m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m2_m))) +vint16m2_t vwadd_wx(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m4))) +vint16m4_t vwadd_wx(vint16m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m4_m))) +vint16m4_t vwadd_wx(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m8))) +vint16m8_t vwadd_wx(vint16m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i16m8_m))) +vint16m8_t vwadd_wx(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32mf2))) +vint32mf2_t vwadd_wx(vint32mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32mf2_m))) +vint32mf2_t vwadd_wx(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m1))) +vint32m1_t vwadd_wx(vint32m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m1_m))) +vint32m1_t vwadd_wx(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m2))) +vint32m2_t vwadd_wx(vint32m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m2_m))) +vint32m2_t vwadd_wx(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m4))) +vint32m4_t vwadd_wx(vint32m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m4_m))) +vint32m4_t vwadd_wx(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m8))) +vint32m8_t vwadd_wx(vint32m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i32m8_m))) +vint32m8_t vwadd_wx(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m1))) +vint64m1_t vwadd_wx(vint64m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m1_m))) +vint64m1_t vwadd_wx(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m2))) +vint64m2_t vwadd_wx(vint64m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m2_m))) +vint64m2_t vwadd_wx(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m4))) +vint64m4_t vwadd_wx(vint64m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m4_m))) +vint64m4_t vwadd_wx(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m8))) +vint64m8_t vwadd_wx(vint64m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwadd_wx_i64m8_m))) +vint64m8_t vwadd_wx(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16mf4))) +vint16mf4_t vwsub_wv(vint16mf4_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16mf4_m))) +vint16mf4_t vwsub_wv(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16mf2))) +vint16mf2_t vwsub_wv(vint16mf2_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16mf2_m))) +vint16mf2_t vwsub_wv(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m1))) +vint16m1_t vwsub_wv(vint16m1_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m1_m))) +vint16m1_t vwsub_wv(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m2))) +vint16m2_t vwsub_wv(vint16m2_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m2_m))) +vint16m2_t vwsub_wv(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m4))) +vint16m4_t vwsub_wv(vint16m4_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m4_m))) +vint16m4_t vwsub_wv(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m8))) +vint16m8_t vwsub_wv(vint16m8_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i16m8_m))) +vint16m8_t vwsub_wv(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32mf2))) +vint32mf2_t vwsub_wv(vint32mf2_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32mf2_m))) +vint32mf2_t vwsub_wv(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m1))) +vint32m1_t vwsub_wv(vint32m1_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m1_m))) +vint32m1_t vwsub_wv(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m2))) +vint32m2_t vwsub_wv(vint32m2_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m2_m))) +vint32m2_t vwsub_wv(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m4))) +vint32m4_t vwsub_wv(vint32m4_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m4_m))) +vint32m4_t vwsub_wv(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m8))) +vint32m8_t vwsub_wv(vint32m8_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i32m8_m))) +vint32m8_t vwsub_wv(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m1))) +vint64m1_t vwsub_wv(vint64m1_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m1_m))) +vint64m1_t vwsub_wv(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m2))) +vint64m2_t vwsub_wv(vint64m2_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m2_m))) +vint64m2_t vwsub_wv(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m4))) +vint64m4_t vwsub_wv(vint64m4_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m4_m))) +vint64m4_t vwsub_wv(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m8))) +vint64m8_t vwsub_wv(vint64m8_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wv_i64m8_m))) +vint64m8_t vwsub_wv(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16mf4))) +vint16mf4_t vwsub_wx(vint16mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16mf4_m))) +vint16mf4_t vwsub_wx(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16mf2))) +vint16mf2_t vwsub_wx(vint16mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16mf2_m))) +vint16mf2_t vwsub_wx(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m1))) +vint16m1_t vwsub_wx(vint16m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m1_m))) +vint16m1_t vwsub_wx(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m2))) +vint16m2_t vwsub_wx(vint16m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m2_m))) +vint16m2_t vwsub_wx(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m4))) +vint16m4_t vwsub_wx(vint16m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m4_m))) +vint16m4_t vwsub_wx(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m8))) +vint16m8_t vwsub_wx(vint16m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i16m8_m))) +vint16m8_t vwsub_wx(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32mf2))) +vint32mf2_t vwsub_wx(vint32mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32mf2_m))) +vint32mf2_t vwsub_wx(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m1))) +vint32m1_t vwsub_wx(vint32m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m1_m))) +vint32m1_t vwsub_wx(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m2))) +vint32m2_t vwsub_wx(vint32m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m2_m))) +vint32m2_t vwsub_wx(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m4))) +vint32m4_t vwsub_wx(vint32m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m4_m))) +vint32m4_t vwsub_wx(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m8))) +vint32m8_t vwsub_wx(vint32m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i32m8_m))) +vint32m8_t vwsub_wx(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m1))) +vint64m1_t vwsub_wx(vint64m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m1_m))) +vint64m1_t vwsub_wx(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m2))) +vint64m2_t vwsub_wx(vint64m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m2_m))) +vint64m2_t vwsub_wx(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m4))) +vint64m4_t vwsub_wx(vint64m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m4_m))) +vint64m4_t vwsub_wx(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m8))) +vint64m8_t vwsub_wx(vint64m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwsub_wx_i64m8_m))) +vint64m8_t vwsub_wx(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m2))) +vint16m2_t vwcvt_x(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m2_m))) +vint16m2_t vwcvt_x(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m4))) +vint16m4_t vwcvt_x(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m4_m))) +vint16m4_t vwcvt_x(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m8))) +vint16m8_t vwcvt_x(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m8_m))) +vint16m8_t vwcvt_x(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m1))) +vint16m1_t vwcvt_x(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16m1_m))) +vint16m1_t vwcvt_x(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16mf2))) +vint16mf2_t vwcvt_x(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16mf2_m))) +vint16mf2_t vwcvt_x(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16mf4))) +vint16mf4_t vwcvt_x(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i16mf4_m))) +vint16mf4_t vwcvt_x(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m2))) +vint32m2_t vwcvt_x(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m2_m))) +vint32m2_t vwcvt_x(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m4))) +vint32m4_t vwcvt_x(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m4_m))) +vint32m4_t vwcvt_x(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m8))) +vint32m8_t vwcvt_x(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m8_m))) +vint32m8_t vwcvt_x(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m1))) +vint32m1_t vwcvt_x(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32m1_m))) +vint32m1_t vwcvt_x(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32mf2))) +vint32mf2_t vwcvt_x(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i32mf2_m))) +vint32mf2_t vwcvt_x(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m2))) +vint64m2_t vwcvt_x(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m2_m))) +vint64m2_t vwcvt_x(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m4))) +vint64m4_t vwcvt_x(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m4_m))) +vint64m4_t vwcvt_x(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m8))) +vint64m8_t vwcvt_x(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m8_m))) +vint64m8_t vwcvt_x(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m1))) +vint64m1_t vwcvt_x(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvt_x_x_v_i64m1_m))) +vint64m1_t vwcvt_x(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8m1))) +void vsuxei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8m1_m))) +void vsuxei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8mf2))) +void vsuxei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8mf2_m))) +void vsuxei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8mf4))) +void vsuxei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8mf4_m))) +void vsuxei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8mf8))) +void vsuxei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u8mf8_m))) +void vsuxei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8m1))) +vint8m1_t vadc(vint8m1_t op0, vint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8m2))) +vint8m2_t vadc(vint8m2_t op0, vint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8m4))) +vint8m4_t vadc(vint8m4_t op0, vint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8m8))) +vint8m8_t vadc(vint8m8_t op0, vint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8mf2))) +vint8mf2_t vadc(vint8mf2_t op0, vint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8mf4))) +vint8mf4_t vadc(vint8mf4_t op0, vint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i8mf8))) +vint8mf8_t vadc(vint8mf8_t op0, vint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i16m1))) +vint16m1_t vadc(vint16m1_t op0, vint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i16m2))) +vint16m2_t vadc(vint16m2_t op0, vint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i16m4))) +vint16m4_t vadc(vint16m4_t op0, vint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i16m8))) +vint16m8_t vadc(vint16m8_t op0, vint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i16mf2))) +vint16mf2_t vadc(vint16mf2_t op0, vint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i16mf4))) +vint16mf4_t vadc(vint16mf4_t op0, vint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i32m1))) +vint32m1_t vadc(vint32m1_t op0, vint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i32m2))) +vint32m2_t vadc(vint32m2_t op0, vint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i32m4))) +vint32m4_t vadc(vint32m4_t op0, vint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i32m8))) +vint32m8_t vadc(vint32m8_t op0, vint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i32mf2))) +vint32mf2_t vadc(vint32mf2_t op0, vint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i64m1))) +vint64m1_t vadc(vint64m1_t op0, vint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i64m2))) +vint64m2_t vadc(vint64m2_t op0, vint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i64m4))) +vint64m4_t vadc(vint64m4_t op0, vint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_i64m8))) +vint64m8_t vadc(vint64m8_t op0, vint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8m1))) +vint8m1_t vadc(vint8m1_t op0, int8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8m2))) +vint8m2_t vadc(vint8m2_t op0, int8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8m4))) +vint8m4_t vadc(vint8m4_t op0, int8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8m8))) +vint8m8_t vadc(vint8m8_t op0, int8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8mf2))) +vint8mf2_t vadc(vint8mf2_t op0, int8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8mf4))) +vint8mf4_t vadc(vint8mf4_t op0, int8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i8mf8))) +vint8mf8_t vadc(vint8mf8_t op0, int8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i16m1))) +vint16m1_t vadc(vint16m1_t op0, int16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i16m2))) +vint16m2_t vadc(vint16m2_t op0, int16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i16m4))) +vint16m4_t vadc(vint16m4_t op0, int16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i16m8))) +vint16m8_t vadc(vint16m8_t op0, int16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i16mf2))) +vint16mf2_t vadc(vint16mf2_t op0, int16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i16mf4))) +vint16mf4_t vadc(vint16mf4_t op0, int16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i32m1))) +vint32m1_t vadc(vint32m1_t op0, int32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i32m2))) +vint32m2_t vadc(vint32m2_t op0, int32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i32m4))) +vint32m4_t vadc(vint32m4_t op0, int32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i32m8))) +vint32m8_t vadc(vint32m8_t op0, int32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i32mf2))) +vint32mf2_t vadc(vint32mf2_t op0, int32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i64m1))) +vint64m1_t vadc(vint64m1_t op0, int64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i64m2))) +vint64m2_t vadc(vint64m2_t op0, int64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i64m4))) +vint64m4_t vadc(vint64m4_t op0, int64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_i64m8))) +vint64m8_t vadc(vint64m8_t op0, int64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8m1))) +vuint8m1_t vadc(vuint8m1_t op0, vuint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8m2))) +vuint8m2_t vadc(vuint8m2_t op0, vuint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8m4))) +vuint8m4_t vadc(vuint8m4_t op0, vuint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8m8))) +vuint8m8_t vadc(vuint8m8_t op0, vuint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8mf2))) +vuint8mf2_t vadc(vuint8mf2_t op0, vuint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8mf4))) +vuint8mf4_t vadc(vuint8mf4_t op0, vuint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u8mf8))) +vuint8mf8_t vadc(vuint8mf8_t op0, vuint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u16m1))) +vuint16m1_t vadc(vuint16m1_t op0, vuint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u16m2))) +vuint16m2_t vadc(vuint16m2_t op0, vuint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u16m4))) +vuint16m4_t vadc(vuint16m4_t op0, vuint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u16m8))) +vuint16m8_t vadc(vuint16m8_t op0, vuint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u16mf2))) +vuint16mf2_t vadc(vuint16mf2_t op0, vuint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u16mf4))) +vuint16mf4_t vadc(vuint16mf4_t op0, vuint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u32m1))) +vuint32m1_t vadc(vuint32m1_t op0, vuint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u32m2))) +vuint32m2_t vadc(vuint32m2_t op0, vuint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u32m4))) +vuint32m4_t vadc(vuint32m4_t op0, vuint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u32m8))) +vuint32m8_t vadc(vuint32m8_t op0, vuint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u32mf2))) +vuint32mf2_t vadc(vuint32mf2_t op0, vuint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u64m1))) +vuint64m1_t vadc(vuint64m1_t op0, vuint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u64m2))) +vuint64m2_t vadc(vuint64m2_t op0, vuint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u64m4))) +vuint64m4_t vadc(vuint64m4_t op0, vuint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vvm_u64m8))) +vuint64m8_t vadc(vuint64m8_t op0, vuint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8m1))) +vuint8m1_t vadc(vuint8m1_t op0, uint8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8m2))) +vuint8m2_t vadc(vuint8m2_t op0, uint8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8m4))) +vuint8m4_t vadc(vuint8m4_t op0, uint8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8m8))) +vuint8m8_t vadc(vuint8m8_t op0, uint8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8mf2))) +vuint8mf2_t vadc(vuint8mf2_t op0, uint8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8mf4))) +vuint8mf4_t vadc(vuint8mf4_t op0, uint8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u8mf8))) +vuint8mf8_t vadc(vuint8mf8_t op0, uint8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u16m1))) +vuint16m1_t vadc(vuint16m1_t op0, uint16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u16m2))) +vuint16m2_t vadc(vuint16m2_t op0, uint16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u16m4))) +vuint16m4_t vadc(vuint16m4_t op0, uint16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u16m8))) +vuint16m8_t vadc(vuint16m8_t op0, uint16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u16mf2))) +vuint16mf2_t vadc(vuint16mf2_t op0, uint16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u16mf4))) +vuint16mf4_t vadc(vuint16mf4_t op0, uint16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u32m1))) +vuint32m1_t vadc(vuint32m1_t op0, uint32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u32m2))) +vuint32m2_t vadc(vuint32m2_t op0, uint32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u32m4))) +vuint32m4_t vadc(vuint32m4_t op0, uint32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u32m8))) +vuint32m8_t vadc(vuint32m8_t op0, uint32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u32mf2))) +vuint32mf2_t vadc(vuint32mf2_t op0, uint32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u64m1))) +vuint64m1_t vadc(vuint64m1_t op0, uint64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u64m2))) +vuint64m2_t vadc(vuint64m2_t op0, uint64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u64m4))) +vuint64m4_t vadc(vuint64m4_t op0, uint64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vadc_vxm_u64m8))) +vuint64m8_t vadc(vuint64m8_t op0, uint64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8m1_b8))) +vbool8_t vmadc(vint8m1_t op0, vint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8m2_b4))) +vbool4_t vmadc(vint8m2_t op0, vint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8m4_b2))) +vbool2_t vmadc(vint8m4_t op0, vint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8m8_b1))) +vbool1_t vmadc(vint8m8_t op0, vint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8mf2_b16))) +vbool16_t vmadc(vint8mf2_t op0, vint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8mf4_b32))) +vbool32_t vmadc(vint8mf4_t op0, vint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i8mf8_b64))) +vbool64_t vmadc(vint8mf8_t op0, vint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i16m1_b16))) +vbool16_t vmadc(vint16m1_t op0, vint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i16m2_b8))) +vbool8_t vmadc(vint16m2_t op0, vint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i16m4_b4))) +vbool4_t vmadc(vint16m4_t op0, vint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i16m8_b2))) +vbool2_t vmadc(vint16m8_t op0, vint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i16mf2_b32))) +vbool32_t vmadc(vint16mf2_t op0, vint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i16mf4_b64))) +vbool64_t vmadc(vint16mf4_t op0, vint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i32m1_b32))) +vbool32_t vmadc(vint32m1_t op0, vint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i32m2_b16))) +vbool16_t vmadc(vint32m2_t op0, vint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i32m4_b8))) +vbool8_t vmadc(vint32m4_t op0, vint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i32m8_b4))) +vbool4_t vmadc(vint32m8_t op0, vint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i32mf2_b64))) +vbool64_t vmadc(vint32mf2_t op0, vint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i64m1_b64))) +vbool64_t vmadc(vint64m1_t op0, vint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i64m2_b32))) +vbool32_t vmadc(vint64m2_t op0, vint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i64m4_b16))) +vbool16_t vmadc(vint64m4_t op0, vint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_i64m8_b8))) +vbool8_t vmadc(vint64m8_t op0, vint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8m1_b8))) +vbool8_t vmadc(vint8m1_t op0, int8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8m2_b4))) +vbool4_t vmadc(vint8m2_t op0, int8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8m4_b2))) +vbool2_t vmadc(vint8m4_t op0, int8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8m8_b1))) +vbool1_t vmadc(vint8m8_t op0, int8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8mf2_b16))) +vbool16_t vmadc(vint8mf2_t op0, int8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8mf4_b32))) +vbool32_t vmadc(vint8mf4_t op0, int8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i8mf8_b64))) +vbool64_t vmadc(vint8mf8_t op0, int8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i16m1_b16))) +vbool16_t vmadc(vint16m1_t op0, int16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i16m2_b8))) +vbool8_t vmadc(vint16m2_t op0, int16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i16m4_b4))) +vbool4_t vmadc(vint16m4_t op0, int16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i16m8_b2))) +vbool2_t vmadc(vint16m8_t op0, int16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i16mf2_b32))) +vbool32_t vmadc(vint16mf2_t op0, int16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i16mf4_b64))) +vbool64_t vmadc(vint16mf4_t op0, int16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i32m1_b32))) +vbool32_t vmadc(vint32m1_t op0, int32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i32m2_b16))) +vbool16_t vmadc(vint32m2_t op0, int32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i32m4_b8))) +vbool8_t vmadc(vint32m4_t op0, int32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i32m8_b4))) +vbool4_t vmadc(vint32m8_t op0, int32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i32mf2_b64))) +vbool64_t vmadc(vint32mf2_t op0, int32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i64m1_b64))) +vbool64_t vmadc(vint64m1_t op0, int64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i64m2_b32))) +vbool32_t vmadc(vint64m2_t op0, int64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i64m4_b16))) +vbool16_t vmadc(vint64m4_t op0, int64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_i64m8_b8))) +vbool8_t vmadc(vint64m8_t op0, int64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8m1_b8))) +vbool8_t vmadc(vuint8m1_t op0, vuint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8m2_b4))) +vbool4_t vmadc(vuint8m2_t op0, vuint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8m4_b2))) +vbool2_t vmadc(vuint8m4_t op0, vuint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8m8_b1))) +vbool1_t vmadc(vuint8m8_t op0, vuint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8mf2_b16))) +vbool16_t vmadc(vuint8mf2_t op0, vuint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8mf4_b32))) +vbool32_t vmadc(vuint8mf4_t op0, vuint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u8mf8_b64))) +vbool64_t vmadc(vuint8mf8_t op0, vuint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u16m1_b16))) +vbool16_t vmadc(vuint16m1_t op0, vuint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u16m2_b8))) +vbool8_t vmadc(vuint16m2_t op0, vuint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u16m4_b4))) +vbool4_t vmadc(vuint16m4_t op0, vuint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u16m8_b2))) +vbool2_t vmadc(vuint16m8_t op0, vuint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u16mf2_b32))) +vbool32_t vmadc(vuint16mf2_t op0, vuint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u16mf4_b64))) +vbool64_t vmadc(vuint16mf4_t op0, vuint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u32m1_b32))) +vbool32_t vmadc(vuint32m1_t op0, vuint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u32m2_b16))) +vbool16_t vmadc(vuint32m2_t op0, vuint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u32m4_b8))) +vbool8_t vmadc(vuint32m4_t op0, vuint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u32m8_b4))) +vbool4_t vmadc(vuint32m8_t op0, vuint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u32mf2_b64))) +vbool64_t vmadc(vuint32mf2_t op0, vuint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u64m1_b64))) +vbool64_t vmadc(vuint64m1_t op0, vuint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u64m2_b32))) +vbool32_t vmadc(vuint64m2_t op0, vuint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u64m4_b16))) +vbool16_t vmadc(vuint64m4_t op0, vuint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vvm_u64m8_b8))) +vbool8_t vmadc(vuint64m8_t op0, vuint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8m1_b8))) +vbool8_t vmadc(vuint8m1_t op0, uint8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8m2_b4))) +vbool4_t vmadc(vuint8m2_t op0, uint8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8m4_b2))) +vbool2_t vmadc(vuint8m4_t op0, uint8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8m8_b1))) +vbool1_t vmadc(vuint8m8_t op0, uint8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8mf2_b16))) +vbool16_t vmadc(vuint8mf2_t op0, uint8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8mf4_b32))) +vbool32_t vmadc(vuint8mf4_t op0, uint8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u8mf8_b64))) +vbool64_t vmadc(vuint8mf8_t op0, uint8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u16m1_b16))) +vbool16_t vmadc(vuint16m1_t op0, uint16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u16m2_b8))) +vbool8_t vmadc(vuint16m2_t op0, uint16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u16m4_b4))) +vbool4_t vmadc(vuint16m4_t op0, uint16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u16m8_b2))) +vbool2_t vmadc(vuint16m8_t op0, uint16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u16mf2_b32))) +vbool32_t vmadc(vuint16mf2_t op0, uint16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u16mf4_b64))) +vbool64_t vmadc(vuint16mf4_t op0, uint16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u32m1_b32))) +vbool32_t vmadc(vuint32m1_t op0, uint32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u32m2_b16))) +vbool16_t vmadc(vuint32m2_t op0, uint32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u32m4_b8))) +vbool8_t vmadc(vuint32m4_t op0, uint32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u32m8_b4))) +vbool4_t vmadc(vuint32m8_t op0, uint32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u32mf2_b64))) +vbool64_t vmadc(vuint32mf2_t op0, uint32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u64m1_b64))) +vbool64_t vmadc(vuint64m1_t op0, uint64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u64m2_b32))) +vbool32_t vmadc(vuint64m2_t op0, uint64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u64m4_b16))) +vbool16_t vmadc(vuint64m4_t op0, uint64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vxm_u64m8_b8))) +vbool8_t vmadc(vuint64m8_t op0, uint64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8m1_b8))) +vbool8_t vmadc(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8m2_b4))) +vbool4_t vmadc(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8m4_b2))) +vbool2_t vmadc(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8m8_b1))) +vbool1_t vmadc(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8mf2_b16))) +vbool16_t vmadc(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8mf4_b32))) +vbool32_t vmadc(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i8mf8_b64))) +vbool64_t vmadc(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i16m1_b16))) +vbool16_t vmadc(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i16m2_b8))) +vbool8_t vmadc(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i16m4_b4))) +vbool4_t vmadc(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i16m8_b2))) +vbool2_t vmadc(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i16mf2_b32))) +vbool32_t vmadc(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i16mf4_b64))) +vbool64_t vmadc(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i32m1_b32))) +vbool32_t vmadc(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i32m2_b16))) +vbool16_t vmadc(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i32m4_b8))) +vbool8_t vmadc(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i32m8_b4))) +vbool4_t vmadc(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i32mf2_b64))) +vbool64_t vmadc(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i64m1_b64))) +vbool64_t vmadc(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i64m2_b32))) +vbool32_t vmadc(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i64m4_b16))) +vbool16_t vmadc(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_i64m8_b8))) +vbool8_t vmadc(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8m1_b8))) +vbool8_t vmadc(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8m2_b4))) +vbool4_t vmadc(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8m4_b2))) +vbool2_t vmadc(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8m8_b1))) +vbool1_t vmadc(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8mf2_b16))) +vbool16_t vmadc(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8mf4_b32))) +vbool32_t vmadc(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i8mf8_b64))) +vbool64_t vmadc(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i16m1_b16))) +vbool16_t vmadc(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i16m2_b8))) +vbool8_t vmadc(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i16m4_b4))) +vbool4_t vmadc(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i16m8_b2))) +vbool2_t vmadc(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i16mf2_b32))) +vbool32_t vmadc(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i16mf4_b64))) +vbool64_t vmadc(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i32m1_b32))) +vbool32_t vmadc(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i32m2_b16))) +vbool16_t vmadc(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i32m4_b8))) +vbool8_t vmadc(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i32m8_b4))) +vbool4_t vmadc(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i32mf2_b64))) +vbool64_t vmadc(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i64m1_b64))) +vbool64_t vmadc(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i64m2_b32))) +vbool32_t vmadc(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i64m4_b16))) +vbool16_t vmadc(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_i64m8_b8))) +vbool8_t vmadc(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m1))) +void vsuxei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m1_m))) +void vsuxei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m2))) +void vsuxei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m2_m))) +void vsuxei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m4))) +void vsuxei8(int16_t * op0, vuint8m2_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m4_m))) +void vsuxei8(vbool4_t op0, int16_t * op1, vuint8m2_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m8))) +void vsuxei8(int16_t * op0, vuint8m4_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16m8_m))) +void vsuxei8(vbool2_t op0, int16_t * op1, vuint8m4_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16mf2))) +void vsuxei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16mf2_m))) +void vsuxei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16mf4))) +void vsuxei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i16mf4_m))) +void vsuxei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8m1_b8))) +vbool8_t vmadc(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8m2_b4))) +vbool4_t vmadc(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8m4_b2))) +vbool2_t vmadc(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8m8_b1))) +vbool1_t vmadc(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8mf2_b16))) +vbool16_t vmadc(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8mf4_b32))) +vbool32_t vmadc(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u8mf8_b64))) +vbool64_t vmadc(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u16m1_b16))) +vbool16_t vmadc(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u16m2_b8))) +vbool8_t vmadc(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u16m4_b4))) +vbool4_t vmadc(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u16m8_b2))) +vbool2_t vmadc(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u16mf2_b32))) +vbool32_t vmadc(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u16mf4_b64))) +vbool64_t vmadc(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u32m1_b32))) +vbool32_t vmadc(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u32m2_b16))) +vbool16_t vmadc(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u32m4_b8))) +vbool8_t vmadc(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u32m8_b4))) +vbool4_t vmadc(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u32mf2_b64))) +vbool64_t vmadc(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u64m1_b64))) +vbool64_t vmadc(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u64m2_b32))) +vbool32_t vmadc(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u64m4_b16))) +vbool16_t vmadc(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vv_u64m8_b8))) +vbool8_t vmadc(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8m1_b8))) +vbool8_t vmadc(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8m2_b4))) +vbool4_t vmadc(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8m4_b2))) +vbool2_t vmadc(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8m8_b1))) +vbool1_t vmadc(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8mf2_b16))) +vbool16_t vmadc(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8mf4_b32))) +vbool32_t vmadc(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u8mf8_b64))) +vbool64_t vmadc(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u16m1_b16))) +vbool16_t vmadc(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u16m2_b8))) +vbool8_t vmadc(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u16m4_b4))) +vbool4_t vmadc(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u16m8_b2))) +vbool2_t vmadc(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u16mf2_b32))) +vbool32_t vmadc(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u16mf4_b64))) +vbool64_t vmadc(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u32m1_b32))) +vbool32_t vmadc(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u32m2_b16))) +vbool16_t vmadc(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u32m4_b8))) +vbool8_t vmadc(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u32m8_b4))) +vbool4_t vmadc(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u32mf2_b64))) +vbool64_t vmadc(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u64m1_b64))) +vbool64_t vmadc(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u64m2_b32))) +vbool32_t vmadc(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u64m4_b16))) +vbool16_t vmadc(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadc_vx_u64m8_b8))) +vbool8_t vmadc(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8m1))) +vint8m1_t vsbc(vint8m1_t op0, vint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8m2))) +vint8m2_t vsbc(vint8m2_t op0, vint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8m4))) +vint8m4_t vsbc(vint8m4_t op0, vint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8m8))) +vint8m8_t vsbc(vint8m8_t op0, vint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8mf2))) +vint8mf2_t vsbc(vint8mf2_t op0, vint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8mf4))) +vint8mf4_t vsbc(vint8mf4_t op0, vint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i8mf8))) +vint8mf8_t vsbc(vint8mf8_t op0, vint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i16m1))) +vint16m1_t vsbc(vint16m1_t op0, vint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i16m2))) +vint16m2_t vsbc(vint16m2_t op0, vint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i16m4))) +vint16m4_t vsbc(vint16m4_t op0, vint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i16m8))) +vint16m8_t vsbc(vint16m8_t op0, vint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i16mf2))) +vint16mf2_t vsbc(vint16mf2_t op0, vint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i16mf4))) +vint16mf4_t vsbc(vint16mf4_t op0, vint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i32m1))) +vint32m1_t vsbc(vint32m1_t op0, vint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i32m2))) +vint32m2_t vsbc(vint32m2_t op0, vint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i32m4))) +vint32m4_t vsbc(vint32m4_t op0, vint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i32m8))) +vint32m8_t vsbc(vint32m8_t op0, vint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i32mf2))) +vint32mf2_t vsbc(vint32mf2_t op0, vint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i64m1))) +vint64m1_t vsbc(vint64m1_t op0, vint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i64m2))) +vint64m2_t vsbc(vint64m2_t op0, vint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i64m4))) +vint64m4_t vsbc(vint64m4_t op0, vint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_i64m8))) +vint64m8_t vsbc(vint64m8_t op0, vint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8m1))) +vint8m1_t vsbc(vint8m1_t op0, int8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8m2))) +vint8m2_t vsbc(vint8m2_t op0, int8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8m4))) +vint8m4_t vsbc(vint8m4_t op0, int8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8m8))) +vint8m8_t vsbc(vint8m8_t op0, int8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8mf2))) +vint8mf2_t vsbc(vint8mf2_t op0, int8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8mf4))) +vint8mf4_t vsbc(vint8mf4_t op0, int8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i8mf8))) +vint8mf8_t vsbc(vint8mf8_t op0, int8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i16m1))) +vint16m1_t vsbc(vint16m1_t op0, int16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i16m2))) +vint16m2_t vsbc(vint16m2_t op0, int16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i16m4))) +vint16m4_t vsbc(vint16m4_t op0, int16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i16m8))) +vint16m8_t vsbc(vint16m8_t op0, int16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i16mf2))) +vint16mf2_t vsbc(vint16mf2_t op0, int16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i16mf4))) +vint16mf4_t vsbc(vint16mf4_t op0, int16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i32m1))) +vint32m1_t vsbc(vint32m1_t op0, int32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i32m2))) +vint32m2_t vsbc(vint32m2_t op0, int32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i32m4))) +vint32m4_t vsbc(vint32m4_t op0, int32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i32m8))) +vint32m8_t vsbc(vint32m8_t op0, int32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i32mf2))) +vint32mf2_t vsbc(vint32mf2_t op0, int32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i64m1))) +vint64m1_t vsbc(vint64m1_t op0, int64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i64m2))) +vint64m2_t vsbc(vint64m2_t op0, int64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i64m4))) +vint64m4_t vsbc(vint64m4_t op0, int64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_i64m8))) +vint64m8_t vsbc(vint64m8_t op0, int64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8m1))) +vuint8m1_t vsbc(vuint8m1_t op0, vuint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8m2))) +vuint8m2_t vsbc(vuint8m2_t op0, vuint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8m4))) +vuint8m4_t vsbc(vuint8m4_t op0, vuint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8m8))) +vuint8m8_t vsbc(vuint8m8_t op0, vuint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8mf2))) +vuint8mf2_t vsbc(vuint8mf2_t op0, vuint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8mf4))) +vuint8mf4_t vsbc(vuint8mf4_t op0, vuint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u8mf8))) +vuint8mf8_t vsbc(vuint8mf8_t op0, vuint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u16m1))) +vuint16m1_t vsbc(vuint16m1_t op0, vuint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u16m2))) +vuint16m2_t vsbc(vuint16m2_t op0, vuint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u16m4))) +vuint16m4_t vsbc(vuint16m4_t op0, vuint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u16m8))) +vuint16m8_t vsbc(vuint16m8_t op0, vuint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u16mf2))) +vuint16mf2_t vsbc(vuint16mf2_t op0, vuint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u16mf4))) +vuint16mf4_t vsbc(vuint16mf4_t op0, vuint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u32m1))) +vuint32m1_t vsbc(vuint32m1_t op0, vuint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u32m2))) +vuint32m2_t vsbc(vuint32m2_t op0, vuint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u32m4))) +vuint32m4_t vsbc(vuint32m4_t op0, vuint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u32m8))) +vuint32m8_t vsbc(vuint32m8_t op0, vuint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u32mf2))) +vuint32mf2_t vsbc(vuint32mf2_t op0, vuint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u64m1))) +vuint64m1_t vsbc(vuint64m1_t op0, vuint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u64m2))) +vuint64m2_t vsbc(vuint64m2_t op0, vuint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u64m4))) +vuint64m4_t vsbc(vuint64m4_t op0, vuint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vvm_u64m8))) +vuint64m8_t vsbc(vuint64m8_t op0, vuint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8m1))) +vuint8m1_t vsbc(vuint8m1_t op0, uint8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8m2))) +vuint8m2_t vsbc(vuint8m2_t op0, uint8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8m4))) +vuint8m4_t vsbc(vuint8m4_t op0, uint8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8m8))) +vuint8m8_t vsbc(vuint8m8_t op0, uint8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8mf2))) +vuint8mf2_t vsbc(vuint8mf2_t op0, uint8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8mf4))) +vuint8mf4_t vsbc(vuint8mf4_t op0, uint8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u8mf8))) +vuint8mf8_t vsbc(vuint8mf8_t op0, uint8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u16m1))) +vuint16m1_t vsbc(vuint16m1_t op0, uint16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u16m2))) +vuint16m2_t vsbc(vuint16m2_t op0, uint16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u16m4))) +vuint16m4_t vsbc(vuint16m4_t op0, uint16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u16m8))) +vuint16m8_t vsbc(vuint16m8_t op0, uint16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u16mf2))) +vuint16mf2_t vsbc(vuint16mf2_t op0, uint16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u16mf4))) +vuint16mf4_t vsbc(vuint16mf4_t op0, uint16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u32m1))) +vuint32m1_t vsbc(vuint32m1_t op0, uint32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u32m2))) +vuint32m2_t vsbc(vuint32m2_t op0, uint32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u32m4))) +vuint32m4_t vsbc(vuint32m4_t op0, uint32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u32m8))) +vuint32m8_t vsbc(vuint32m8_t op0, uint32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u32mf2))) +vuint32mf2_t vsbc(vuint32mf2_t op0, uint32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u64m1))) +vuint64m1_t vsbc(vuint64m1_t op0, uint64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u64m2))) +vuint64m2_t vsbc(vuint64m2_t op0, uint64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u64m4))) +vuint64m4_t vsbc(vuint64m4_t op0, uint64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsbc_vxm_u64m8))) +vuint64m8_t vsbc(vuint64m8_t op0, uint64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8m1_b8))) +vbool8_t vmsbc(vint8m1_t op0, vint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8m2_b4))) +vbool4_t vmsbc(vint8m2_t op0, vint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8m4_b2))) +vbool2_t vmsbc(vint8m4_t op0, vint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8m8_b1))) +vbool1_t vmsbc(vint8m8_t op0, vint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8mf2_b16))) +vbool16_t vmsbc(vint8mf2_t op0, vint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8mf4_b32))) +vbool32_t vmsbc(vint8mf4_t op0, vint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i8mf8_b64))) +vbool64_t vmsbc(vint8mf8_t op0, vint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i16m1_b16))) +vbool16_t vmsbc(vint16m1_t op0, vint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i16m2_b8))) +vbool8_t vmsbc(vint16m2_t op0, vint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i16m4_b4))) +vbool4_t vmsbc(vint16m4_t op0, vint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i16m8_b2))) +vbool2_t vmsbc(vint16m8_t op0, vint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i16mf2_b32))) +vbool32_t vmsbc(vint16mf2_t op0, vint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i16mf4_b64))) +vbool64_t vmsbc(vint16mf4_t op0, vint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i32m1_b32))) +vbool32_t vmsbc(vint32m1_t op0, vint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i32m2_b16))) +vbool16_t vmsbc(vint32m2_t op0, vint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i32m4_b8))) +vbool8_t vmsbc(vint32m4_t op0, vint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i32m8_b4))) +vbool4_t vmsbc(vint32m8_t op0, vint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i32mf2_b64))) +vbool64_t vmsbc(vint32mf2_t op0, vint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i64m1_b64))) +vbool64_t vmsbc(vint64m1_t op0, vint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i64m2_b32))) +vbool32_t vmsbc(vint64m2_t op0, vint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i64m4_b16))) +vbool16_t vmsbc(vint64m4_t op0, vint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_i64m8_b8))) +vbool8_t vmsbc(vint64m8_t op0, vint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8m1_b8))) +vbool8_t vmsbc(vint8m1_t op0, int8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8m2_b4))) +vbool4_t vmsbc(vint8m2_t op0, int8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8m4_b2))) +vbool2_t vmsbc(vint8m4_t op0, int8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8m8_b1))) +vbool1_t vmsbc(vint8m8_t op0, int8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8mf2_b16))) +vbool16_t vmsbc(vint8mf2_t op0, int8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8mf4_b32))) +vbool32_t vmsbc(vint8mf4_t op0, int8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i8mf8_b64))) +vbool64_t vmsbc(vint8mf8_t op0, int8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i16m1_b16))) +vbool16_t vmsbc(vint16m1_t op0, int16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i16m2_b8))) +vbool8_t vmsbc(vint16m2_t op0, int16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i16m4_b4))) +vbool4_t vmsbc(vint16m4_t op0, int16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i16m8_b2))) +vbool2_t vmsbc(vint16m8_t op0, int16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i16mf2_b32))) +vbool32_t vmsbc(vint16mf2_t op0, int16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i16mf4_b64))) +vbool64_t vmsbc(vint16mf4_t op0, int16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i32m1_b32))) +vbool32_t vmsbc(vint32m1_t op0, int32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i32m2_b16))) +vbool16_t vmsbc(vint32m2_t op0, int32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i32m4_b8))) +vbool8_t vmsbc(vint32m4_t op0, int32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i32m8_b4))) +vbool4_t vmsbc(vint32m8_t op0, int32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i32mf2_b64))) +vbool64_t vmsbc(vint32mf2_t op0, int32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i64m1_b64))) +vbool64_t vmsbc(vint64m1_t op0, int64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i64m2_b32))) +vbool32_t vmsbc(vint64m2_t op0, int64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i64m4_b16))) +vbool16_t vmsbc(vint64m4_t op0, int64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_i64m8_b8))) +vbool8_t vmsbc(vint64m8_t op0, int64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8m1_b8))) +vbool8_t vmsbc(vuint8m1_t op0, vuint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8m2_b4))) +vbool4_t vmsbc(vuint8m2_t op0, vuint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8m4_b2))) +vbool2_t vmsbc(vuint8m4_t op0, vuint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8m8_b1))) +vbool1_t vmsbc(vuint8m8_t op0, vuint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8mf2_b16))) +vbool16_t vmsbc(vuint8mf2_t op0, vuint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8mf4_b32))) +vbool32_t vmsbc(vuint8mf4_t op0, vuint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u8mf8_b64))) +vbool64_t vmsbc(vuint8mf8_t op0, vuint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u16m1_b16))) +vbool16_t vmsbc(vuint16m1_t op0, vuint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u16m2_b8))) +vbool8_t vmsbc(vuint16m2_t op0, vuint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u16m4_b4))) +vbool4_t vmsbc(vuint16m4_t op0, vuint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u16m8_b2))) +vbool2_t vmsbc(vuint16m8_t op0, vuint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u16mf2_b32))) +vbool32_t vmsbc(vuint16mf2_t op0, vuint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u16mf4_b64))) +vbool64_t vmsbc(vuint16mf4_t op0, vuint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u32m1_b32))) +vbool32_t vmsbc(vuint32m1_t op0, vuint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u32m2_b16))) +vbool16_t vmsbc(vuint32m2_t op0, vuint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u32m4_b8))) +vbool8_t vmsbc(vuint32m4_t op0, vuint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u32m8_b4))) +vbool4_t vmsbc(vuint32m8_t op0, vuint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u32mf2_b64))) +vbool64_t vmsbc(vuint32mf2_t op0, vuint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u64m1_b64))) +vbool64_t vmsbc(vuint64m1_t op0, vuint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u64m2_b32))) +vbool32_t vmsbc(vuint64m2_t op0, vuint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u64m4_b16))) +vbool16_t vmsbc(vuint64m4_t op0, vuint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vvm_u64m8_b8))) +vbool8_t vmsbc(vuint64m8_t op0, vuint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8m1_b8))) +vbool8_t vmsbc(vuint8m1_t op0, uint8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8m2_b4))) +vbool4_t vmsbc(vuint8m2_t op0, uint8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8m4_b2))) +vbool2_t vmsbc(vuint8m4_t op0, uint8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8m8_b1))) +vbool1_t vmsbc(vuint8m8_t op0, uint8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8mf2_b16))) +vbool16_t vmsbc(vuint8mf2_t op0, uint8_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8mf4_b32))) +vbool32_t vmsbc(vuint8mf4_t op0, uint8_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u8mf8_b64))) +vbool64_t vmsbc(vuint8mf8_t op0, uint8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u16m1_b16))) +vbool16_t vmsbc(vuint16m1_t op0, uint16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u16m2_b8))) +vbool8_t vmsbc(vuint16m2_t op0, uint16_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u16m4_b4))) +vbool4_t vmsbc(vuint16m4_t op0, uint16_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u16m8_b2))) +vbool2_t vmsbc(vuint16m8_t op0, uint16_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u16mf2_b32))) +vbool32_t vmsbc(vuint16mf2_t op0, uint16_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u16mf4_b64))) +vbool64_t vmsbc(vuint16mf4_t op0, uint16_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u32m1_b32))) +vbool32_t vmsbc(vuint32m1_t op0, uint32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u32m2_b16))) +vbool16_t vmsbc(vuint32m2_t op0, uint32_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u32m4_b8))) +vbool8_t vmsbc(vuint32m4_t op0, uint32_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u32m8_b4))) +vbool4_t vmsbc(vuint32m8_t op0, uint32_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u32mf2_b64))) +vbool64_t vmsbc(vuint32mf2_t op0, uint32_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u64m1_b64))) +vbool64_t vmsbc(vuint64m1_t op0, uint64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u64m2_b32))) +vbool32_t vmsbc(vuint64m2_t op0, uint64_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u64m4_b16))) +vbool16_t vmsbc(vuint64m4_t op0, uint64_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vxm_u64m8_b8))) +vbool8_t vmsbc(vuint64m8_t op0, uint64_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m1))) +void vsuxei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m1_m))) +void vsuxei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m2))) +void vsuxei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m2_m))) +void vsuxei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m4))) +void vsuxei8(uint16_t * op0, vuint8m2_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m4_m))) +void vsuxei8(vbool4_t op0, uint16_t * op1, vuint8m2_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m8))) +void vsuxei8(uint16_t * op0, vuint8m4_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16m8_m))) +void vsuxei8(vbool2_t op0, uint16_t * op1, vuint8m4_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16mf2))) +void vsuxei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16mf2_m))) +void vsuxei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16mf4))) +void vsuxei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u16mf4_m))) +void vsuxei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8m1_b8))) +vbool8_t vmsbc(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8m2_b4))) +vbool4_t vmsbc(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8m4_b2))) +vbool2_t vmsbc(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8m8_b1))) +vbool1_t vmsbc(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8mf2_b16))) +vbool16_t vmsbc(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8mf4_b32))) +vbool32_t vmsbc(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i8mf8_b64))) +vbool64_t vmsbc(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i16m1_b16))) +vbool16_t vmsbc(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i16m2_b8))) +vbool8_t vmsbc(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i16m4_b4))) +vbool4_t vmsbc(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i16m8_b2))) +vbool2_t vmsbc(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i16mf2_b32))) +vbool32_t vmsbc(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i16mf4_b64))) +vbool64_t vmsbc(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i32m1_b32))) +vbool32_t vmsbc(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i32m2_b16))) +vbool16_t vmsbc(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i32m4_b8))) +vbool8_t vmsbc(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i32m8_b4))) +vbool4_t vmsbc(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i32mf2_b64))) +vbool64_t vmsbc(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i64m1_b64))) +vbool64_t vmsbc(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i64m2_b32))) +vbool32_t vmsbc(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i64m4_b16))) +vbool16_t vmsbc(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_i64m8_b8))) +vbool8_t vmsbc(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8m1_b8))) +vbool8_t vmsbc(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8m2_b4))) +vbool4_t vmsbc(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8m4_b2))) +vbool2_t vmsbc(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8m8_b1))) +vbool1_t vmsbc(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8mf2_b16))) +vbool16_t vmsbc(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8mf4_b32))) +vbool32_t vmsbc(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i8mf8_b64))) +vbool64_t vmsbc(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i16m1_b16))) +vbool16_t vmsbc(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i16m2_b8))) +vbool8_t vmsbc(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i16m4_b4))) +vbool4_t vmsbc(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i16m8_b2))) +vbool2_t vmsbc(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i16mf2_b32))) +vbool32_t vmsbc(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i16mf4_b64))) +vbool64_t vmsbc(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i32m1_b32))) +vbool32_t vmsbc(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i32m2_b16))) +vbool16_t vmsbc(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i32m4_b8))) +vbool8_t vmsbc(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i32m8_b4))) +vbool4_t vmsbc(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i32mf2_b64))) +vbool64_t vmsbc(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i64m1_b64))) +vbool64_t vmsbc(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i64m2_b32))) +vbool32_t vmsbc(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i64m4_b16))) +vbool16_t vmsbc(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_i64m8_b8))) +vbool8_t vmsbc(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8m1_b8))) +vbool8_t vmsbc(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8m2_b4))) +vbool4_t vmsbc(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8m4_b2))) +vbool2_t vmsbc(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8m8_b1))) +vbool1_t vmsbc(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8mf2_b16))) +vbool16_t vmsbc(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8mf4_b32))) +vbool32_t vmsbc(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u8mf8_b64))) +vbool64_t vmsbc(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u16m1_b16))) +vbool16_t vmsbc(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u16m2_b8))) +vbool8_t vmsbc(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u16m4_b4))) +vbool4_t vmsbc(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u16m8_b2))) +vbool2_t vmsbc(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u16mf2_b32))) +vbool32_t vmsbc(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u16mf4_b64))) +vbool64_t vmsbc(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u32m1_b32))) +vbool32_t vmsbc(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u32m2_b16))) +vbool16_t vmsbc(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u32m4_b8))) +vbool8_t vmsbc(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u32m8_b4))) +vbool4_t vmsbc(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u32mf2_b64))) +vbool64_t vmsbc(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u64m1_b64))) +vbool64_t vmsbc(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u64m2_b32))) +vbool32_t vmsbc(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u64m4_b16))) +vbool16_t vmsbc(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vv_u64m8_b8))) +vbool8_t vmsbc(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8m1_b8))) +vbool8_t vmsbc(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8m2_b4))) +vbool4_t vmsbc(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8m4_b2))) +vbool2_t vmsbc(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8m8_b1))) +vbool1_t vmsbc(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8mf2_b16))) +vbool16_t vmsbc(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8mf4_b32))) +vbool32_t vmsbc(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u8mf8_b64))) +vbool64_t vmsbc(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u16m1_b16))) +vbool16_t vmsbc(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u16m2_b8))) +vbool8_t vmsbc(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u16m4_b4))) +vbool4_t vmsbc(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u16m8_b2))) +vbool2_t vmsbc(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u16mf2_b32))) +vbool32_t vmsbc(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u16mf4_b64))) +vbool64_t vmsbc(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u32m1_b32))) +vbool32_t vmsbc(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u32m2_b16))) +vbool16_t vmsbc(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u32m4_b8))) +vbool8_t vmsbc(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u32m8_b4))) +vbool4_t vmsbc(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u32mf2_b64))) +vbool64_t vmsbc(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u64m1_b64))) +vbool64_t vmsbc(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u64m2_b32))) +vbool32_t vmsbc(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u64m4_b16))) +vbool16_t vmsbc(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbc_vx_u64m8_b8))) +vbool8_t vmsbc(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m1))) +vint8m1_t vand(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m1_m))) +vint8m1_t vand(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m2))) +vint8m2_t vand(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m2_m))) +vint8m2_t vand(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m4))) +vint8m4_t vand(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m4_m))) +vint8m4_t vand(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m8))) +vint8m8_t vand(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8m8_m))) +vint8m8_t vand(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8mf2))) +vint8mf2_t vand(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8mf2_m))) +vint8mf2_t vand(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8mf4))) +vint8mf4_t vand(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8mf4_m))) +vint8mf4_t vand(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8mf8))) +vint8mf8_t vand(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i8mf8_m))) +vint8mf8_t vand(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m1))) +vint16m1_t vand(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m1_m))) +vint16m1_t vand(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m2))) +vint16m2_t vand(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m2_m))) +vint16m2_t vand(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m4))) +vint16m4_t vand(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m4_m))) +vint16m4_t vand(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m8))) +vint16m8_t vand(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16m8_m))) +vint16m8_t vand(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16mf2))) +vint16mf2_t vand(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16mf2_m))) +vint16mf2_t vand(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16mf4))) +vint16mf4_t vand(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i16mf4_m))) +vint16mf4_t vand(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m1))) +vint32m1_t vand(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m1_m))) +vint32m1_t vand(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m2))) +vint32m2_t vand(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m2_m))) +vint32m2_t vand(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m4))) +vint32m4_t vand(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m4_m))) +vint32m4_t vand(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m8))) +vint32m8_t vand(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32m8_m))) +vint32m8_t vand(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32mf2))) +vint32mf2_t vand(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i32mf2_m))) +vint32mf2_t vand(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m1))) +vint64m1_t vand(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m1_m))) +vint64m1_t vand(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m2))) +vint64m2_t vand(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m2_m))) +vint64m2_t vand(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m4))) +vint64m4_t vand(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m4_m))) +vint64m4_t vand(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m8))) +vint64m8_t vand(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_i64m8_m))) +vint64m8_t vand(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m1))) +vint8m1_t vand(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m1_m))) +vint8m1_t vand(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m2))) +vint8m2_t vand(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m2_m))) +vint8m2_t vand(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m4))) +vint8m4_t vand(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m4_m))) +vint8m4_t vand(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m8))) +vint8m8_t vand(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8m8_m))) +vint8m8_t vand(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8mf2))) +vint8mf2_t vand(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8mf2_m))) +vint8mf2_t vand(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8mf4))) +vint8mf4_t vand(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8mf4_m))) +vint8mf4_t vand(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8mf8))) +vint8mf8_t vand(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i8mf8_m))) +vint8mf8_t vand(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m1))) +vint16m1_t vand(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m1_m))) +vint16m1_t vand(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m2))) +vint16m2_t vand(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m2_m))) +vint16m2_t vand(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m4))) +vint16m4_t vand(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m4_m))) +vint16m4_t vand(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m8))) +vint16m8_t vand(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16m8_m))) +vint16m8_t vand(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16mf2))) +vint16mf2_t vand(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16mf2_m))) +vint16mf2_t vand(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16mf4))) +vint16mf4_t vand(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i16mf4_m))) +vint16mf4_t vand(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m1))) +vint32m1_t vand(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m1_m))) +vint32m1_t vand(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m2))) +vint32m2_t vand(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m2_m))) +vint32m2_t vand(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m4))) +vint32m4_t vand(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m4_m))) +vint32m4_t vand(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m8))) +vint32m8_t vand(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32m8_m))) +vint32m8_t vand(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32mf2))) +vint32mf2_t vand(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i32mf2_m))) +vint32mf2_t vand(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m1))) +vint64m1_t vand(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m1_m))) +vint64m1_t vand(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m2))) +vint64m2_t vand(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m2_m))) +vint64m2_t vand(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m4))) +vint64m4_t vand(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m4_m))) +vint64m4_t vand(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m8))) +vint64m8_t vand(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_i64m8_m))) +vint64m8_t vand(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m1))) +vuint8m1_t vand(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m1_m))) +vuint8m1_t vand(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m2))) +vuint8m2_t vand(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m2_m))) +vuint8m2_t vand(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m4))) +vuint8m4_t vand(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m4_m))) +vuint8m4_t vand(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m8))) +vuint8m8_t vand(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8m8_m))) +vuint8m8_t vand(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8mf2))) +vuint8mf2_t vand(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8mf2_m))) +vuint8mf2_t vand(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8mf4))) +vuint8mf4_t vand(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8mf4_m))) +vuint8mf4_t vand(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8mf8))) +vuint8mf8_t vand(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u8mf8_m))) +vuint8mf8_t vand(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m1))) +vuint16m1_t vand(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m1_m))) +vuint16m1_t vand(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m2))) +vuint16m2_t vand(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m2_m))) +vuint16m2_t vand(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m4))) +vuint16m4_t vand(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m4_m))) +vuint16m4_t vand(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m8))) +vuint16m8_t vand(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16m8_m))) +vuint16m8_t vand(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16mf2))) +vuint16mf2_t vand(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16mf2_m))) +vuint16mf2_t vand(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16mf4))) +vuint16mf4_t vand(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u16mf4_m))) +vuint16mf4_t vand(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m1))) +vuint32m1_t vand(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m1_m))) +vuint32m1_t vand(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m2))) +vuint32m2_t vand(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m2_m))) +vuint32m2_t vand(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m4))) +vuint32m4_t vand(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m4_m))) +vuint32m4_t vand(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m8))) +vuint32m8_t vand(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32m8_m))) +vuint32m8_t vand(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32mf2))) +vuint32mf2_t vand(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u32mf2_m))) +vuint32mf2_t vand(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m1))) +vuint64m1_t vand(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m1_m))) +vuint64m1_t vand(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m2))) +vuint64m2_t vand(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m2_m))) +vuint64m2_t vand(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m4))) +vuint64m4_t vand(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m4_m))) +vuint64m4_t vand(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m8))) +vuint64m8_t vand(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vv_u64m8_m))) +vuint64m8_t vand(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m1))) +vuint8m1_t vand(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m1_m))) +vuint8m1_t vand(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m2))) +vuint8m2_t vand(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m2_m))) +vuint8m2_t vand(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m4))) +vuint8m4_t vand(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m4_m))) +vuint8m4_t vand(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m8))) +vuint8m8_t vand(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8m8_m))) +vuint8m8_t vand(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8mf2))) +vuint8mf2_t vand(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8mf2_m))) +vuint8mf2_t vand(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8mf4))) +vuint8mf4_t vand(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8mf4_m))) +vuint8mf4_t vand(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8mf8))) +vuint8mf8_t vand(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u8mf8_m))) +vuint8mf8_t vand(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m1))) +vuint16m1_t vand(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m1_m))) +vuint16m1_t vand(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m2))) +vuint16m2_t vand(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m2_m))) +vuint16m2_t vand(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m4))) +vuint16m4_t vand(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m4_m))) +vuint16m4_t vand(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m8))) +vuint16m8_t vand(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16m8_m))) +vuint16m8_t vand(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16mf2))) +vuint16mf2_t vand(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16mf2_m))) +vuint16mf2_t vand(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16mf4))) +vuint16mf4_t vand(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u16mf4_m))) +vuint16mf4_t vand(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m1))) +vuint32m1_t vand(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m1_m))) +vuint32m1_t vand(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m2))) +vuint32m2_t vand(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m2_m))) +vuint32m2_t vand(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m4))) +vuint32m4_t vand(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m4_m))) +vuint32m4_t vand(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m8))) +vuint32m8_t vand(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32m8_m))) +vuint32m8_t vand(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32mf2))) +vuint32mf2_t vand(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u32mf2_m))) +vuint32mf2_t vand(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m1))) +vuint64m1_t vand(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m1_m))) +vuint64m1_t vand(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m2))) +vuint64m2_t vand(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m2_m))) +vuint64m2_t vand(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m4))) +vuint64m4_t vand(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m4_m))) +vuint64m4_t vand(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m8))) +vuint64m8_t vand(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vand_vx_u64m8_m))) +vuint64m8_t vand(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m1))) +vint8m1_t vxor(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m1_m))) +vint8m1_t vxor(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m2))) +vint8m2_t vxor(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m2_m))) +vint8m2_t vxor(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m4))) +vint8m4_t vxor(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m4_m))) +vint8m4_t vxor(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m8))) +vint8m8_t vxor(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8m8_m))) +vint8m8_t vxor(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8mf2))) +vint8mf2_t vxor(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8mf2_m))) +vint8mf2_t vxor(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8mf4))) +vint8mf4_t vxor(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8mf4_m))) +vint8mf4_t vxor(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8mf8))) +vint8mf8_t vxor(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i8mf8_m))) +vint8mf8_t vxor(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m1))) +vint16m1_t vxor(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m1_m))) +vint16m1_t vxor(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m2))) +vint16m2_t vxor(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m2_m))) +vint16m2_t vxor(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m4))) +vint16m4_t vxor(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m4_m))) +vint16m4_t vxor(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m8))) +vint16m8_t vxor(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16m8_m))) +vint16m8_t vxor(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16mf2))) +vint16mf2_t vxor(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16mf2_m))) +vint16mf2_t vxor(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16mf4))) +vint16mf4_t vxor(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i16mf4_m))) +vint16mf4_t vxor(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m1))) +vint32m1_t vxor(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m1_m))) +vint32m1_t vxor(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m2))) +vint32m2_t vxor(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m2_m))) +vint32m2_t vxor(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m4))) +vint32m4_t vxor(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m4_m))) +vint32m4_t vxor(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m8))) +vint32m8_t vxor(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32m8_m))) +vint32m8_t vxor(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32mf2))) +vint32mf2_t vxor(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i32mf2_m))) +vint32mf2_t vxor(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m1))) +vint64m1_t vxor(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m1_m))) +vint64m1_t vxor(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m2))) +vint64m2_t vxor(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m2_m))) +vint64m2_t vxor(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m4))) +vint64m4_t vxor(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m4_m))) +vint64m4_t vxor(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m8))) +vint64m8_t vxor(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_i64m8_m))) +vint64m8_t vxor(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m1))) +vint8m1_t vxor(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m1_m))) +vint8m1_t vxor(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m2))) +vint8m2_t vxor(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m2_m))) +vint8m2_t vxor(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m4))) +vint8m4_t vxor(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m4_m))) +vint8m4_t vxor(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m8))) +vint8m8_t vxor(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8m8_m))) +vint8m8_t vxor(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8mf2))) +vint8mf2_t vxor(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8mf2_m))) +vint8mf2_t vxor(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8mf4))) +vint8mf4_t vxor(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8mf4_m))) +vint8mf4_t vxor(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8mf8))) +vint8mf8_t vxor(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i8mf8_m))) +vint8mf8_t vxor(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m1))) +vint16m1_t vxor(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m1_m))) +vint16m1_t vxor(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m2))) +vint16m2_t vxor(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m2_m))) +vint16m2_t vxor(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m4))) +vint16m4_t vxor(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m4_m))) +vint16m4_t vxor(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m8))) +vint16m8_t vxor(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16m8_m))) +vint16m8_t vxor(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16mf2))) +vint16mf2_t vxor(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16mf2_m))) +vint16mf2_t vxor(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16mf4))) +vint16mf4_t vxor(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i16mf4_m))) +vint16mf4_t vxor(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m1))) +vint32m1_t vxor(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m1_m))) +vint32m1_t vxor(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m2))) +vint32m2_t vxor(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m2_m))) +vint32m2_t vxor(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m4))) +vint32m4_t vxor(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m4_m))) +vint32m4_t vxor(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m8))) +vint32m8_t vxor(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32m8_m))) +vint32m8_t vxor(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32mf2))) +vint32mf2_t vxor(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i32mf2_m))) +vint32mf2_t vxor(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m1))) +vint64m1_t vxor(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m1_m))) +vint64m1_t vxor(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m2))) +vint64m2_t vxor(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m2_m))) +vint64m2_t vxor(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m4))) +vint64m4_t vxor(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m4_m))) +vint64m4_t vxor(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m8))) +vint64m8_t vxor(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_i64m8_m))) +vint64m8_t vxor(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m1))) +void vsuxei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m1_m))) +void vsuxei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m2))) +void vsuxei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m2_m))) +void vsuxei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m4))) +void vsuxei16(int16_t * op0, vuint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m4_m))) +void vsuxei16(vbool4_t op0, int16_t * op1, vuint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m8))) +void vsuxei16(int16_t * op0, vuint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16m8_m))) +void vsuxei16(vbool2_t op0, int16_t * op1, vuint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16mf2))) +void vsuxei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16mf2_m))) +void vsuxei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16mf4))) +void vsuxei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i16mf4_m))) +void vsuxei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m1))) +vuint8m1_t vxor(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m1_m))) +vuint8m1_t vxor(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m2))) +vuint8m2_t vxor(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m2_m))) +vuint8m2_t vxor(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m4))) +vuint8m4_t vxor(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m4_m))) +vuint8m4_t vxor(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m8))) +vuint8m8_t vxor(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8m8_m))) +vuint8m8_t vxor(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8mf2))) +vuint8mf2_t vxor(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8mf2_m))) +vuint8mf2_t vxor(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8mf4))) +vuint8mf4_t vxor(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8mf4_m))) +vuint8mf4_t vxor(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8mf8))) +vuint8mf8_t vxor(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u8mf8_m))) +vuint8mf8_t vxor(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m1))) +vuint16m1_t vxor(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m1_m))) +vuint16m1_t vxor(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m2))) +vuint16m2_t vxor(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m2_m))) +vuint16m2_t vxor(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m4))) +vuint16m4_t vxor(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m4_m))) +vuint16m4_t vxor(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m8))) +vuint16m8_t vxor(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16m8_m))) +vuint16m8_t vxor(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16mf2))) +vuint16mf2_t vxor(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16mf2_m))) +vuint16mf2_t vxor(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16mf4))) +vuint16mf4_t vxor(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u16mf4_m))) +vuint16mf4_t vxor(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m1))) +vuint32m1_t vxor(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m1_m))) +vuint32m1_t vxor(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m2))) +vuint32m2_t vxor(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m2_m))) +vuint32m2_t vxor(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m4))) +vuint32m4_t vxor(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m4_m))) +vuint32m4_t vxor(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m8))) +vuint32m8_t vxor(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32m8_m))) +vuint32m8_t vxor(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32mf2))) +vuint32mf2_t vxor(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u32mf2_m))) +vuint32mf2_t vxor(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m1))) +vuint64m1_t vxor(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m1_m))) +vuint64m1_t vxor(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m2))) +vuint64m2_t vxor(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m2_m))) +vuint64m2_t vxor(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m4))) +vuint64m4_t vxor(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m4_m))) +vuint64m4_t vxor(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m8))) +vuint64m8_t vxor(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vv_u64m8_m))) +vuint64m8_t vxor(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m1))) +vuint8m1_t vxor(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m1_m))) +vuint8m1_t vxor(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m2))) +vuint8m2_t vxor(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m2_m))) +vuint8m2_t vxor(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m4))) +vuint8m4_t vxor(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m4_m))) +vuint8m4_t vxor(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m8))) +vuint8m8_t vxor(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8m8_m))) +vuint8m8_t vxor(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8mf2))) +vuint8mf2_t vxor(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8mf2_m))) +vuint8mf2_t vxor(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8mf4))) +vuint8mf4_t vxor(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8mf4_m))) +vuint8mf4_t vxor(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8mf8))) +vuint8mf8_t vxor(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u8mf8_m))) +vuint8mf8_t vxor(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m1))) +vuint16m1_t vxor(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m1_m))) +vuint16m1_t vxor(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m2))) +vuint16m2_t vxor(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m2_m))) +vuint16m2_t vxor(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m4))) +vuint16m4_t vxor(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m4_m))) +vuint16m4_t vxor(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m8))) +vuint16m8_t vxor(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16m8_m))) +vuint16m8_t vxor(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16mf2))) +vuint16mf2_t vxor(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16mf2_m))) +vuint16mf2_t vxor(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16mf4))) +vuint16mf4_t vxor(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u16mf4_m))) +vuint16mf4_t vxor(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m1))) +vuint32m1_t vxor(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m1_m))) +vuint32m1_t vxor(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m2))) +vuint32m2_t vxor(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m2_m))) +vuint32m2_t vxor(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m4))) +vuint32m4_t vxor(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m4_m))) +vuint32m4_t vxor(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m8))) +vuint32m8_t vxor(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32m8_m))) +vuint32m8_t vxor(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32mf2))) +vuint32mf2_t vxor(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u32mf2_m))) +vuint32mf2_t vxor(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m1))) +vuint64m1_t vxor(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m1_m))) +vuint64m1_t vxor(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m2))) +vuint64m2_t vxor(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m2_m))) +vuint64m2_t vxor(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m4))) +vuint64m4_t vxor(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m4_m))) +vuint64m4_t vxor(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m8))) +vuint64m8_t vxor(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vxor_vx_u64m8_m))) +vuint64m8_t vxor(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m1))) +vint8m1_t vor(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m1_m))) +vint8m1_t vor(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m2))) +vint8m2_t vor(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m2_m))) +vint8m2_t vor(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m4))) +vint8m4_t vor(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m4_m))) +vint8m4_t vor(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m8))) +vint8m8_t vor(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8m8_m))) +vint8m8_t vor(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8mf2))) +vint8mf2_t vor(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8mf2_m))) +vint8mf2_t vor(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8mf4))) +vint8mf4_t vor(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8mf4_m))) +vint8mf4_t vor(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8mf8))) +vint8mf8_t vor(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i8mf8_m))) +vint8mf8_t vor(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m1))) +vint16m1_t vor(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m1_m))) +vint16m1_t vor(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m2))) +vint16m2_t vor(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m2_m))) +vint16m2_t vor(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m4))) +vint16m4_t vor(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m4_m))) +vint16m4_t vor(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m8))) +vint16m8_t vor(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16m8_m))) +vint16m8_t vor(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16mf2))) +vint16mf2_t vor(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16mf2_m))) +vint16mf2_t vor(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16mf4))) +vint16mf4_t vor(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i16mf4_m))) +vint16mf4_t vor(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m1))) +vint32m1_t vor(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m1_m))) +vint32m1_t vor(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m2))) +vint32m2_t vor(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m2_m))) +vint32m2_t vor(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m4))) +vint32m4_t vor(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m4_m))) +vint32m4_t vor(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m8))) +vint32m8_t vor(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32m8_m))) +vint32m8_t vor(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32mf2))) +vint32mf2_t vor(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i32mf2_m))) +vint32mf2_t vor(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m1))) +vint64m1_t vor(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m1_m))) +vint64m1_t vor(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m2))) +vint64m2_t vor(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m2_m))) +vint64m2_t vor(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m4))) +vint64m4_t vor(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m4_m))) +vint64m4_t vor(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m8))) +vint64m8_t vor(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_i64m8_m))) +vint64m8_t vor(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m1))) +vint8m1_t vor(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m1_m))) +vint8m1_t vor(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m2))) +vint8m2_t vor(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m2_m))) +vint8m2_t vor(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m4))) +vint8m4_t vor(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m4_m))) +vint8m4_t vor(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m8))) +vint8m8_t vor(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8m8_m))) +vint8m8_t vor(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8mf2))) +vint8mf2_t vor(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8mf2_m))) +vint8mf2_t vor(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8mf4))) +vint8mf4_t vor(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8mf4_m))) +vint8mf4_t vor(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8mf8))) +vint8mf8_t vor(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i8mf8_m))) +vint8mf8_t vor(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m1))) +vint16m1_t vor(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m1_m))) +vint16m1_t vor(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m2))) +vint16m2_t vor(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m2_m))) +vint16m2_t vor(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m4))) +vint16m4_t vor(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m4_m))) +vint16m4_t vor(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m8))) +vint16m8_t vor(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16m8_m))) +vint16m8_t vor(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16mf2))) +vint16mf2_t vor(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16mf2_m))) +vint16mf2_t vor(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16mf4))) +vint16mf4_t vor(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i16mf4_m))) +vint16mf4_t vor(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m1))) +vint32m1_t vor(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m1_m))) +vint32m1_t vor(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m2))) +vint32m2_t vor(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m2_m))) +vint32m2_t vor(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m4))) +vint32m4_t vor(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m4_m))) +vint32m4_t vor(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m8))) +vint32m8_t vor(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32m8_m))) +vint32m8_t vor(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32mf2))) +vint32mf2_t vor(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i32mf2_m))) +vint32mf2_t vor(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m1))) +vint64m1_t vor(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m1_m))) +vint64m1_t vor(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m2))) +vint64m2_t vor(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m2_m))) +vint64m2_t vor(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m4))) +vint64m4_t vor(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m4_m))) +vint64m4_t vor(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m8))) +vint64m8_t vor(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_i64m8_m))) +vint64m8_t vor(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m1))) +vuint8m1_t vor(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m1_m))) +vuint8m1_t vor(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m2))) +vuint8m2_t vor(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m2_m))) +vuint8m2_t vor(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m4))) +vuint8m4_t vor(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m4_m))) +vuint8m4_t vor(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m8))) +vuint8m8_t vor(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8m8_m))) +vuint8m8_t vor(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8mf2))) +vuint8mf2_t vor(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8mf2_m))) +vuint8mf2_t vor(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8mf4))) +vuint8mf4_t vor(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8mf4_m))) +vuint8mf4_t vor(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8mf8))) +vuint8mf8_t vor(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u8mf8_m))) +vuint8mf8_t vor(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m1))) +vuint16m1_t vor(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m1_m))) +vuint16m1_t vor(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m2))) +vuint16m2_t vor(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m2_m))) +vuint16m2_t vor(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m4))) +vuint16m4_t vor(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m4_m))) +vuint16m4_t vor(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m8))) +vuint16m8_t vor(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16m8_m))) +vuint16m8_t vor(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16mf2))) +vuint16mf2_t vor(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16mf2_m))) +vuint16mf2_t vor(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16mf4))) +vuint16mf4_t vor(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u16mf4_m))) +vuint16mf4_t vor(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m1))) +vuint32m1_t vor(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m1_m))) +vuint32m1_t vor(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m2))) +vuint32m2_t vor(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m2_m))) +vuint32m2_t vor(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m4))) +vuint32m4_t vor(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m4_m))) +vuint32m4_t vor(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m8))) +vuint32m8_t vor(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32m8_m))) +vuint32m8_t vor(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32mf2))) +vuint32mf2_t vor(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u32mf2_m))) +vuint32mf2_t vor(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m1))) +vuint64m1_t vor(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m1_m))) +vuint64m1_t vor(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m2))) +vuint64m2_t vor(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m2_m))) +vuint64m2_t vor(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m4))) +vuint64m4_t vor(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m4_m))) +vuint64m4_t vor(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m8))) +vuint64m8_t vor(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vv_u64m8_m))) +vuint64m8_t vor(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m1))) +vuint8m1_t vor(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m1_m))) +vuint8m1_t vor(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m2))) +vuint8m2_t vor(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m2_m))) +vuint8m2_t vor(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m4))) +vuint8m4_t vor(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m4_m))) +vuint8m4_t vor(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m8))) +vuint8m8_t vor(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8m8_m))) +vuint8m8_t vor(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8mf2))) +vuint8mf2_t vor(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8mf2_m))) +vuint8mf2_t vor(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8mf4))) +vuint8mf4_t vor(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8mf4_m))) +vuint8mf4_t vor(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8mf8))) +vuint8mf8_t vor(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u8mf8_m))) +vuint8mf8_t vor(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m1))) +vuint16m1_t vor(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m1_m))) +vuint16m1_t vor(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m2))) +vuint16m2_t vor(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m2_m))) +vuint16m2_t vor(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m4))) +vuint16m4_t vor(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m4_m))) +vuint16m4_t vor(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m8))) +vuint16m8_t vor(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16m8_m))) +vuint16m8_t vor(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16mf2))) +vuint16mf2_t vor(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16mf2_m))) +vuint16mf2_t vor(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16mf4))) +vuint16mf4_t vor(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u16mf4_m))) +vuint16mf4_t vor(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m1))) +vuint32m1_t vor(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m1_m))) +vuint32m1_t vor(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m2))) +vuint32m2_t vor(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m2_m))) +vuint32m2_t vor(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m4))) +vuint32m4_t vor(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m4_m))) +vuint32m4_t vor(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m8))) +vuint32m8_t vor(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32m8_m))) +vuint32m8_t vor(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32mf2))) +vuint32mf2_t vor(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u32mf2_m))) +vuint32mf2_t vor(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m1))) +vuint64m1_t vor(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m1_m))) +vuint64m1_t vor(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m2))) +vuint64m2_t vor(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m2_m))) +vuint64m2_t vor(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m4))) +vuint64m4_t vor(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m4_m))) +vuint64m4_t vor(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m8))) +vuint64m8_t vor(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vor_vx_u64m8_m))) +vuint64m8_t vor(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m1))) +vint8m1_t vsll(vint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m1_m))) +vint8m1_t vsll(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m2))) +vint8m2_t vsll(vint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m2_m))) +vint8m2_t vsll(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m4))) +vint8m4_t vsll(vint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m4_m))) +vint8m4_t vsll(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m8))) +vint8m8_t vsll(vint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8m8_m))) +vint8m8_t vsll(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8mf2))) +vint8mf2_t vsll(vint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8mf2_m))) +vint8mf2_t vsll(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8mf4))) +vint8mf4_t vsll(vint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8mf4_m))) +vint8mf4_t vsll(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8mf8))) +vint8mf8_t vsll(vint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i8mf8_m))) +vint8mf8_t vsll(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m1))) +vint16m1_t vsll(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m1_m))) +vint16m1_t vsll(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m2))) +vint16m2_t vsll(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m2_m))) +vint16m2_t vsll(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m4))) +vint16m4_t vsll(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m4_m))) +vint16m4_t vsll(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m8))) +vint16m8_t vsll(vint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16m8_m))) +vint16m8_t vsll(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16mf2))) +vint16mf2_t vsll(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16mf2_m))) +vint16mf2_t vsll(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16mf4))) +vint16mf4_t vsll(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i16mf4_m))) +vint16mf4_t vsll(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m1))) +vint32m1_t vsll(vint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m1_m))) +vint32m1_t vsll(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m2))) +vint32m2_t vsll(vint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m2_m))) +vint32m2_t vsll(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m4))) +vint32m4_t vsll(vint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m4_m))) +vint32m4_t vsll(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m8))) +vint32m8_t vsll(vint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32m8_m))) +vint32m8_t vsll(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32mf2))) +vint32mf2_t vsll(vint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i32mf2_m))) +vint32mf2_t vsll(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m1))) +vint64m1_t vsll(vint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m1_m))) +vint64m1_t vsll(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m2))) +vint64m2_t vsll(vint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m2_m))) +vint64m2_t vsll(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m4))) +vint64m4_t vsll(vint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m4_m))) +vint64m4_t vsll(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m8))) +vint64m8_t vsll(vint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_i64m8_m))) +vint64m8_t vsll(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m1))) +vint8m1_t vsll(vint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m1_m))) +vint8m1_t vsll(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m2))) +vint8m2_t vsll(vint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m2_m))) +vint8m2_t vsll(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m4))) +vint8m4_t vsll(vint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m4_m))) +vint8m4_t vsll(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m8))) +vint8m8_t vsll(vint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8m8_m))) +vint8m8_t vsll(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8mf2))) +vint8mf2_t vsll(vint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8mf2_m))) +vint8mf2_t vsll(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8mf4))) +vint8mf4_t vsll(vint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8mf4_m))) +vint8mf4_t vsll(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8mf8))) +vint8mf8_t vsll(vint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i8mf8_m))) +vint8mf8_t vsll(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m1))) +vint16m1_t vsll(vint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m1_m))) +vint16m1_t vsll(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m2))) +vint16m2_t vsll(vint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m2_m))) +vint16m2_t vsll(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m4))) +vint16m4_t vsll(vint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m4_m))) +vint16m4_t vsll(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m8))) +vint16m8_t vsll(vint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16m8_m))) +vint16m8_t vsll(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16mf2))) +vint16mf2_t vsll(vint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16mf2_m))) +vint16mf2_t vsll(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16mf4))) +vint16mf4_t vsll(vint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i16mf4_m))) +vint16mf4_t vsll(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m1))) +vint32m1_t vsll(vint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m1_m))) +vint32m1_t vsll(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m2))) +vint32m2_t vsll(vint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m2_m))) +vint32m2_t vsll(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m4))) +vint32m4_t vsll(vint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m4_m))) +vint32m4_t vsll(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m8))) +vint32m8_t vsll(vint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32m8_m))) +vint32m8_t vsll(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32mf2))) +vint32mf2_t vsll(vint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i32mf2_m))) +vint32mf2_t vsll(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m1))) +vint64m1_t vsll(vint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m1_m))) +vint64m1_t vsll(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m2))) +vint64m2_t vsll(vint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m2_m))) +vint64m2_t vsll(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m4))) +vint64m4_t vsll(vint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m4_m))) +vint64m4_t vsll(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m8))) +vint64m8_t vsll(vint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_i64m8_m))) +vint64m8_t vsll(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m1))) +vuint8m1_t vsll(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m1_m))) +vuint8m1_t vsll(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m2))) +vuint8m2_t vsll(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m2_m))) +vuint8m2_t vsll(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m4))) +vuint8m4_t vsll(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m4_m))) +vuint8m4_t vsll(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m8))) +vuint8m8_t vsll(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8m8_m))) +vuint8m8_t vsll(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8mf2))) +vuint8mf2_t vsll(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8mf2_m))) +vuint8mf2_t vsll(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8mf4))) +vuint8mf4_t vsll(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8mf4_m))) +vuint8mf4_t vsll(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8mf8))) +vuint8mf8_t vsll(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u8mf8_m))) +vuint8mf8_t vsll(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m1))) +vuint16m1_t vsll(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m1_m))) +vuint16m1_t vsll(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m2))) +vuint16m2_t vsll(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m2_m))) +vuint16m2_t vsll(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m4))) +vuint16m4_t vsll(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m4_m))) +vuint16m4_t vsll(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m8))) +vuint16m8_t vsll(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16m8_m))) +vuint16m8_t vsll(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16mf2))) +vuint16mf2_t vsll(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16mf2_m))) +vuint16mf2_t vsll(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16mf4))) +vuint16mf4_t vsll(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u16mf4_m))) +vuint16mf4_t vsll(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m1))) +vuint32m1_t vsll(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m1_m))) +vuint32m1_t vsll(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m2))) +vuint32m2_t vsll(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m2_m))) +vuint32m2_t vsll(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m4))) +vuint32m4_t vsll(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m4_m))) +vuint32m4_t vsll(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m8))) +vuint32m8_t vsll(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32m8_m))) +vuint32m8_t vsll(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32mf2))) +vuint32mf2_t vsll(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u32mf2_m))) +vuint32mf2_t vsll(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m1))) +vuint64m1_t vsll(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m1_m))) +vuint64m1_t vsll(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m2))) +vuint64m2_t vsll(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m2_m))) +vuint64m2_t vsll(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m4))) +vuint64m4_t vsll(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m4_m))) +vuint64m4_t vsll(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m8))) +vuint64m8_t vsll(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vv_u64m8_m))) +vuint64m8_t vsll(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m1))) +vuint8m1_t vsll(vuint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m1_m))) +vuint8m1_t vsll(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m2))) +vuint8m2_t vsll(vuint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m2_m))) +vuint8m2_t vsll(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m4))) +vuint8m4_t vsll(vuint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m4_m))) +vuint8m4_t vsll(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m8))) +vuint8m8_t vsll(vuint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8m8_m))) +vuint8m8_t vsll(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8mf2))) +vuint8mf2_t vsll(vuint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8mf2_m))) +vuint8mf2_t vsll(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8mf4))) +vuint8mf4_t vsll(vuint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8mf4_m))) +vuint8mf4_t vsll(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8mf8))) +vuint8mf8_t vsll(vuint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u8mf8_m))) +vuint8mf8_t vsll(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m1))) +vuint16m1_t vsll(vuint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m1_m))) +vuint16m1_t vsll(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m2))) +vuint16m2_t vsll(vuint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m2_m))) +vuint16m2_t vsll(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m4))) +vuint16m4_t vsll(vuint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m4_m))) +vuint16m4_t vsll(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m8))) +vuint16m8_t vsll(vuint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16m8_m))) +vuint16m8_t vsll(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16mf2))) +vuint16mf2_t vsll(vuint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16mf2_m))) +vuint16mf2_t vsll(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16mf4))) +vuint16mf4_t vsll(vuint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u16mf4_m))) +vuint16mf4_t vsll(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m1))) +vuint32m1_t vsll(vuint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m1_m))) +vuint32m1_t vsll(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m2))) +vuint32m2_t vsll(vuint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m2_m))) +vuint32m2_t vsll(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m4))) +vuint32m4_t vsll(vuint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m4_m))) +vuint32m4_t vsll(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m8))) +vuint32m8_t vsll(vuint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32m8_m))) +vuint32m8_t vsll(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32mf2))) +vuint32mf2_t vsll(vuint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u32mf2_m))) +vuint32mf2_t vsll(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m1))) +vuint64m1_t vsll(vuint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m1_m))) +vuint64m1_t vsll(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m2))) +vuint64m2_t vsll(vuint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m2_m))) +vuint64m2_t vsll(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m4))) +vuint64m4_t vsll(vuint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m4_m))) +vuint64m4_t vsll(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m8))) +vuint64m8_t vsll(vuint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsll_vx_u64m8_m))) +vuint64m8_t vsll(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m1))) +void vsuxei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m1_m))) +void vsuxei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m2))) +void vsuxei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m2_m))) +void vsuxei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m4))) +void vsuxei16(uint16_t * op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m4_m))) +void vsuxei16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m8))) +void vsuxei16(uint16_t * op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16m8_m))) +void vsuxei16(vbool2_t op0, uint16_t * op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16mf2))) +void vsuxei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16mf2_m))) +void vsuxei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16mf4))) +void vsuxei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u16mf4_m))) +void vsuxei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m1))) +vuint8m1_t vsrl(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m1_m))) +vuint8m1_t vsrl(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m2))) +vuint8m2_t vsrl(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m2_m))) +vuint8m2_t vsrl(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m4))) +vuint8m4_t vsrl(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m4_m))) +vuint8m4_t vsrl(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m8))) +vuint8m8_t vsrl(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8m8_m))) +vuint8m8_t vsrl(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8mf2))) +vuint8mf2_t vsrl(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8mf2_m))) +vuint8mf2_t vsrl(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8mf4))) +vuint8mf4_t vsrl(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8mf4_m))) +vuint8mf4_t vsrl(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8mf8))) +vuint8mf8_t vsrl(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u8mf8_m))) +vuint8mf8_t vsrl(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m1))) +vuint16m1_t vsrl(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m1_m))) +vuint16m1_t vsrl(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m2))) +vuint16m2_t vsrl(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m2_m))) +vuint16m2_t vsrl(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m4))) +vuint16m4_t vsrl(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m4_m))) +vuint16m4_t vsrl(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m8))) +vuint16m8_t vsrl(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16m8_m))) +vuint16m8_t vsrl(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16mf2))) +vuint16mf2_t vsrl(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16mf2_m))) +vuint16mf2_t vsrl(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16mf4))) +vuint16mf4_t vsrl(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u16mf4_m))) +vuint16mf4_t vsrl(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m1))) +vuint32m1_t vsrl(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m1_m))) +vuint32m1_t vsrl(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m2))) +vuint32m2_t vsrl(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m2_m))) +vuint32m2_t vsrl(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m4))) +vuint32m4_t vsrl(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m4_m))) +vuint32m4_t vsrl(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m8))) +vuint32m8_t vsrl(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32m8_m))) +vuint32m8_t vsrl(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32mf2))) +vuint32mf2_t vsrl(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u32mf2_m))) +vuint32mf2_t vsrl(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m1))) +vuint64m1_t vsrl(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m1_m))) +vuint64m1_t vsrl(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m2))) +vuint64m2_t vsrl(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m2_m))) +vuint64m2_t vsrl(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m4))) +vuint64m4_t vsrl(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m4_m))) +vuint64m4_t vsrl(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m8))) +vuint64m8_t vsrl(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vv_u64m8_m))) +vuint64m8_t vsrl(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m1))) +vuint8m1_t vsrl(vuint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m1_m))) +vuint8m1_t vsrl(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m2))) +vuint8m2_t vsrl(vuint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m2_m))) +vuint8m2_t vsrl(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m4))) +vuint8m4_t vsrl(vuint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m4_m))) +vuint8m4_t vsrl(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m8))) +vuint8m8_t vsrl(vuint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8m8_m))) +vuint8m8_t vsrl(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8mf2))) +vuint8mf2_t vsrl(vuint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8mf2_m))) +vuint8mf2_t vsrl(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8mf4))) +vuint8mf4_t vsrl(vuint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8mf4_m))) +vuint8mf4_t vsrl(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8mf8))) +vuint8mf8_t vsrl(vuint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u8mf8_m))) +vuint8mf8_t vsrl(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m1))) +vuint16m1_t vsrl(vuint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m1_m))) +vuint16m1_t vsrl(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m2))) +vuint16m2_t vsrl(vuint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m2_m))) +vuint16m2_t vsrl(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m4))) +vuint16m4_t vsrl(vuint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m4_m))) +vuint16m4_t vsrl(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m8))) +vuint16m8_t vsrl(vuint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16m8_m))) +vuint16m8_t vsrl(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16mf2))) +vuint16mf2_t vsrl(vuint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16mf2_m))) +vuint16mf2_t vsrl(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16mf4))) +vuint16mf4_t vsrl(vuint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u16mf4_m))) +vuint16mf4_t vsrl(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m1))) +vuint32m1_t vsrl(vuint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m1_m))) +vuint32m1_t vsrl(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m2))) +vuint32m2_t vsrl(vuint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m2_m))) +vuint32m2_t vsrl(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m4))) +vuint32m4_t vsrl(vuint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m4_m))) +vuint32m4_t vsrl(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m8))) +vuint32m8_t vsrl(vuint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32m8_m))) +vuint32m8_t vsrl(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32mf2))) +vuint32mf2_t vsrl(vuint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u32mf2_m))) +vuint32mf2_t vsrl(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m1))) +vuint64m1_t vsrl(vuint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m1_m))) +vuint64m1_t vsrl(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m2))) +vuint64m2_t vsrl(vuint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m2_m))) +vuint64m2_t vsrl(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m4))) +vuint64m4_t vsrl(vuint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m4_m))) +vuint64m4_t vsrl(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m8))) +vuint64m8_t vsrl(vuint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsrl_vx_u64m8_m))) +vuint64m8_t vsrl(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m1))) +vint8m1_t vsra(vint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m1_m))) +vint8m1_t vsra(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m2))) +vint8m2_t vsra(vint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m2_m))) +vint8m2_t vsra(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m4))) +vint8m4_t vsra(vint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m4_m))) +vint8m4_t vsra(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m8))) +vint8m8_t vsra(vint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8m8_m))) +vint8m8_t vsra(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8mf2))) +vint8mf2_t vsra(vint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8mf2_m))) +vint8mf2_t vsra(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8mf4))) +vint8mf4_t vsra(vint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8mf4_m))) +vint8mf4_t vsra(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8mf8))) +vint8mf8_t vsra(vint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i8mf8_m))) +vint8mf8_t vsra(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m1))) +vint16m1_t vsra(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m1_m))) +vint16m1_t vsra(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m2))) +vint16m2_t vsra(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m2_m))) +vint16m2_t vsra(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m4))) +vint16m4_t vsra(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m4_m))) +vint16m4_t vsra(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m8))) +vint16m8_t vsra(vint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16m8_m))) +vint16m8_t vsra(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16mf2))) +vint16mf2_t vsra(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16mf2_m))) +vint16mf2_t vsra(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16mf4))) +vint16mf4_t vsra(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i16mf4_m))) +vint16mf4_t vsra(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m1))) +vint32m1_t vsra(vint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m1_m))) +vint32m1_t vsra(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m2))) +vint32m2_t vsra(vint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m2_m))) +vint32m2_t vsra(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m4))) +vint32m4_t vsra(vint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m4_m))) +vint32m4_t vsra(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m8))) +vint32m8_t vsra(vint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32m8_m))) +vint32m8_t vsra(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32mf2))) +vint32mf2_t vsra(vint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i32mf2_m))) +vint32mf2_t vsra(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m1))) +vint64m1_t vsra(vint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m1_m))) +vint64m1_t vsra(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m2))) +vint64m2_t vsra(vint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m2_m))) +vint64m2_t vsra(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m4))) +vint64m4_t vsra(vint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m4_m))) +vint64m4_t vsra(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m8))) +vint64m8_t vsra(vint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vv_i64m8_m))) +vint64m8_t vsra(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m1))) +vint8m1_t vsra(vint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m1_m))) +vint8m1_t vsra(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m2))) +vint8m2_t vsra(vint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m2_m))) +vint8m2_t vsra(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m4))) +vint8m4_t vsra(vint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m4_m))) +vint8m4_t vsra(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m8))) +vint8m8_t vsra(vint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8m8_m))) +vint8m8_t vsra(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8mf2))) +vint8mf2_t vsra(vint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8mf2_m))) +vint8mf2_t vsra(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8mf4))) +vint8mf4_t vsra(vint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8mf4_m))) +vint8mf4_t vsra(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8mf8))) +vint8mf8_t vsra(vint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i8mf8_m))) +vint8mf8_t vsra(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m1))) +vint16m1_t vsra(vint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m1_m))) +vint16m1_t vsra(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m2))) +vint16m2_t vsra(vint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m2_m))) +vint16m2_t vsra(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m4))) +vint16m4_t vsra(vint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m4_m))) +vint16m4_t vsra(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m8))) +vint16m8_t vsra(vint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16m8_m))) +vint16m8_t vsra(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16mf2))) +vint16mf2_t vsra(vint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16mf2_m))) +vint16mf2_t vsra(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16mf4))) +vint16mf4_t vsra(vint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i16mf4_m))) +vint16mf4_t vsra(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m1))) +vint32m1_t vsra(vint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m1_m))) +vint32m1_t vsra(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m2))) +vint32m2_t vsra(vint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m2_m))) +vint32m2_t vsra(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m4))) +vint32m4_t vsra(vint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m4_m))) +vint32m4_t vsra(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m8))) +vint32m8_t vsra(vint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32m8_m))) +vint32m8_t vsra(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32mf2))) +vint32mf2_t vsra(vint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i32mf2_m))) +vint32mf2_t vsra(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m1))) +vint64m1_t vsra(vint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m1_m))) +vint64m1_t vsra(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m2))) +vint64m2_t vsra(vint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m2_m))) +vint64m2_t vsra(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m4))) +vint64m4_t vsra(vint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m4_m))) +vint64m4_t vsra(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m8))) +vint64m8_t vsra(vint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsra_vx_i64m8_m))) +vint64m8_t vsra(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8m1))) +vuint8m1_t vnsrl(vuint16m2_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8m1_m))) +vuint8m1_t vnsrl(vbool8_t op0, vuint8m1_t op1, vuint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8m2))) +vuint8m2_t vnsrl(vuint16m4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8m2_m))) +vuint8m2_t vnsrl(vbool4_t op0, vuint8m2_t op1, vuint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8m4))) +vuint8m4_t vnsrl(vuint16m8_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8m4_m))) +vuint8m4_t vnsrl(vbool2_t op0, vuint8m4_t op1, vuint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8mf2))) +vuint8mf2_t vnsrl(vuint16m1_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8mf2_m))) +vuint8mf2_t vnsrl(vbool16_t op0, vuint8mf2_t op1, vuint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8mf4))) +vuint8mf4_t vnsrl(vuint16mf2_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8mf4_m))) +vuint8mf4_t vnsrl(vbool32_t op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8mf8))) +vuint8mf8_t vnsrl(vuint16mf4_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u8mf8_m))) +vuint8mf8_t vnsrl(vbool64_t op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16m1))) +vuint16m1_t vnsrl(vuint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16m1_m))) +vuint16m1_t vnsrl(vbool16_t op0, vuint16m1_t op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16m2))) +vuint16m2_t vnsrl(vuint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16m2_m))) +vuint16m2_t vnsrl(vbool8_t op0, vuint16m2_t op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16m4))) +vuint16m4_t vnsrl(vuint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16m4_m))) +vuint16m4_t vnsrl(vbool4_t op0, vuint16m4_t op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16mf2))) +vuint16mf2_t vnsrl(vuint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16mf2_m))) +vuint16mf2_t vnsrl(vbool32_t op0, vuint16mf2_t op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16mf4))) +vuint16mf4_t vnsrl(vuint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u16mf4_m))) +vuint16mf4_t vnsrl(vbool64_t op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32m1))) +vuint32m1_t vnsrl(vuint64m2_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32m1_m))) +vuint32m1_t vnsrl(vbool32_t op0, vuint32m1_t op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32m2))) +vuint32m2_t vnsrl(vuint64m4_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32m2_m))) +vuint32m2_t vnsrl(vbool16_t op0, vuint32m2_t op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32m4))) +vuint32m4_t vnsrl(vuint64m8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32m4_m))) +vuint32m4_t vnsrl(vbool8_t op0, vuint32m4_t op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32mf2))) +vuint32mf2_t vnsrl(vuint64m1_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wv_u32mf2_m))) +vuint32mf2_t vnsrl(vbool64_t op0, vuint32mf2_t op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8m1))) +vuint8m1_t vnsrl(vuint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8m1_m))) +vuint8m1_t vnsrl(vbool8_t op0, vuint8m1_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8m2))) +vuint8m2_t vnsrl(vuint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8m2_m))) +vuint8m2_t vnsrl(vbool4_t op0, vuint8m2_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8m4))) +vuint8m4_t vnsrl(vuint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8m4_m))) +vuint8m4_t vnsrl(vbool2_t op0, vuint8m4_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8mf2))) +vuint8mf2_t vnsrl(vuint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8mf2_m))) +vuint8mf2_t vnsrl(vbool16_t op0, vuint8mf2_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8mf4))) +vuint8mf4_t vnsrl(vuint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8mf4_m))) +vuint8mf4_t vnsrl(vbool32_t op0, vuint8mf4_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8mf8))) +vuint8mf8_t vnsrl(vuint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u8mf8_m))) +vuint8mf8_t vnsrl(vbool64_t op0, vuint8mf8_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16m1))) +vuint16m1_t vnsrl(vuint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16m1_m))) +vuint16m1_t vnsrl(vbool16_t op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16m2))) +vuint16m2_t vnsrl(vuint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16m2_m))) +vuint16m2_t vnsrl(vbool8_t op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16m4))) +vuint16m4_t vnsrl(vuint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16m4_m))) +vuint16m4_t vnsrl(vbool4_t op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16mf2))) +vuint16mf2_t vnsrl(vuint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16mf2_m))) +vuint16mf2_t vnsrl(vbool32_t op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16mf4))) +vuint16mf4_t vnsrl(vuint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u16mf4_m))) +vuint16mf4_t vnsrl(vbool64_t op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32m1))) +vuint32m1_t vnsrl(vuint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32m1_m))) +vuint32m1_t vnsrl(vbool32_t op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32m2))) +vuint32m2_t vnsrl(vuint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32m2_m))) +vuint32m2_t vnsrl(vbool16_t op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32m4))) +vuint32m4_t vnsrl(vuint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32m4_m))) +vuint32m4_t vnsrl(vbool8_t op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32mf2))) +vuint32mf2_t vnsrl(vuint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsrl_wx_u32mf2_m))) +vuint32mf2_t vnsrl(vbool64_t op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8m1))) +vint8m1_t vnsra(vint16m2_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8m1_m))) +vint8m1_t vnsra(vbool8_t op0, vint8m1_t op1, vint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8m2))) +vint8m2_t vnsra(vint16m4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8m2_m))) +vint8m2_t vnsra(vbool4_t op0, vint8m2_t op1, vint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8m4))) +vint8m4_t vnsra(vint16m8_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8m4_m))) +vint8m4_t vnsra(vbool2_t op0, vint8m4_t op1, vint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8mf2))) +vint8mf2_t vnsra(vint16m1_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8mf2_m))) +vint8mf2_t vnsra(vbool16_t op0, vint8mf2_t op1, vint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8mf4))) +vint8mf4_t vnsra(vint16mf2_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8mf4_m))) +vint8mf4_t vnsra(vbool32_t op0, vint8mf4_t op1, vint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8mf8))) +vint8mf8_t vnsra(vint16mf4_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i8mf8_m))) +vint8mf8_t vnsra(vbool64_t op0, vint8mf8_t op1, vint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16m1))) +vint16m1_t vnsra(vint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16m1_m))) +vint16m1_t vnsra(vbool16_t op0, vint16m1_t op1, vint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16m2))) +vint16m2_t vnsra(vint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16m2_m))) +vint16m2_t vnsra(vbool8_t op0, vint16m2_t op1, vint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16m4))) +vint16m4_t vnsra(vint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16m4_m))) +vint16m4_t vnsra(vbool4_t op0, vint16m4_t op1, vint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16mf2))) +vint16mf2_t vnsra(vint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16mf2_m))) +vint16mf2_t vnsra(vbool32_t op0, vint16mf2_t op1, vint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16mf4))) +vint16mf4_t vnsra(vint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i16mf4_m))) +vint16mf4_t vnsra(vbool64_t op0, vint16mf4_t op1, vint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32m1))) +vint32m1_t vnsra(vint64m2_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32m1_m))) +vint32m1_t vnsra(vbool32_t op0, vint32m1_t op1, vint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32m2))) +vint32m2_t vnsra(vint64m4_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32m2_m))) +vint32m2_t vnsra(vbool16_t op0, vint32m2_t op1, vint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32m4))) +vint32m4_t vnsra(vint64m8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32m4_m))) +vint32m4_t vnsra(vbool8_t op0, vint32m4_t op1, vint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32mf2))) +vint32mf2_t vnsra(vint64m1_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wv_i32mf2_m))) +vint32mf2_t vnsra(vbool64_t op0, vint32mf2_t op1, vint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8m1))) +vint8m1_t vnsra(vint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8m1_m))) +vint8m1_t vnsra(vbool8_t op0, vint8m1_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8m2))) +vint8m2_t vnsra(vint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8m2_m))) +vint8m2_t vnsra(vbool4_t op0, vint8m2_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8m4))) +vint8m4_t vnsra(vint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8m4_m))) +vint8m4_t vnsra(vbool2_t op0, vint8m4_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8mf2))) +vint8mf2_t vnsra(vint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8mf2_m))) +vint8mf2_t vnsra(vbool16_t op0, vint8mf2_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8mf4))) +vint8mf4_t vnsra(vint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8mf4_m))) +vint8mf4_t vnsra(vbool32_t op0, vint8mf4_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8mf8))) +vint8mf8_t vnsra(vint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i8mf8_m))) +vint8mf8_t vnsra(vbool64_t op0, vint8mf8_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16m1))) +vint16m1_t vnsra(vint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16m1_m))) +vint16m1_t vnsra(vbool16_t op0, vint16m1_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16m2))) +vint16m2_t vnsra(vint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16m2_m))) +vint16m2_t vnsra(vbool8_t op0, vint16m2_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16m4))) +vint16m4_t vnsra(vint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16m4_m))) +vint16m4_t vnsra(vbool4_t op0, vint16m4_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16mf2))) +vint16mf2_t vnsra(vint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16mf2_m))) +vint16mf2_t vnsra(vbool32_t op0, vint16mf2_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16mf4))) +vint16mf4_t vnsra(vint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i16mf4_m))) +vint16mf4_t vnsra(vbool64_t op0, vint16mf4_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32m1))) +vint32m1_t vnsra(vint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32m1_m))) +vint32m1_t vnsra(vbool32_t op0, vint32m1_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32m2))) +vint32m2_t vnsra(vint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32m2_m))) +vint32m2_t vnsra(vbool16_t op0, vint32m2_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32m4))) +vint32m4_t vnsra(vint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32m4_m))) +vint32m4_t vnsra(vbool8_t op0, vint32m4_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32mf2))) +vint32mf2_t vnsra(vint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnsra_wx_i32mf2_m))) +vint32mf2_t vnsra(vbool64_t op0, vint32mf2_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8m1))) +vuint8m1_t vncvt_x(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8m1_m))) +vuint8m1_t vncvt_x(vbool8_t op0, vuint8m1_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8m2))) +vuint8m2_t vncvt_x(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8m2_m))) +vuint8m2_t vncvt_x(vbool4_t op0, vuint8m2_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8m4))) +vuint8m4_t vncvt_x(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8m4_m))) +vuint8m4_t vncvt_x(vbool2_t op0, vuint8m4_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8mf2))) +vuint8mf2_t vncvt_x(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8mf2_m))) +vuint8mf2_t vncvt_x(vbool16_t op0, vuint8mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8mf4))) +vuint8mf4_t vncvt_x(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8mf4_m))) +vuint8mf4_t vncvt_x(vbool32_t op0, vuint8mf4_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8mf8))) +vuint8mf8_t vncvt_x(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u8mf8_m))) +vuint8mf8_t vncvt_x(vbool64_t op0, vuint8mf8_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16m1))) +vuint16m1_t vncvt_x(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16m1_m))) +vuint16m1_t vncvt_x(vbool16_t op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16m2))) +vuint16m2_t vncvt_x(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16m2_m))) +vuint16m2_t vncvt_x(vbool8_t op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16m4))) +vuint16m4_t vncvt_x(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16m4_m))) +vuint16m4_t vncvt_x(vbool4_t op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16mf2))) +vuint16mf2_t vncvt_x(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16mf2_m))) +vuint16mf2_t vncvt_x(vbool32_t op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16mf4))) +vuint16mf4_t vncvt_x(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u16mf4_m))) +vuint16mf4_t vncvt_x(vbool64_t op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32m1))) +vuint32m1_t vncvt_x(vuint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32m1_m))) +vuint32m1_t vncvt_x(vbool32_t op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32m2))) +vuint32m2_t vncvt_x(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32m2_m))) +vuint32m2_t vncvt_x(vbool16_t op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32m4))) +vuint32m4_t vncvt_x(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32m4_m))) +vuint32m4_t vncvt_x(vbool8_t op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32mf2))) +vuint32mf2_t vncvt_x(vuint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_u32mf2_m))) +vuint32mf2_t vncvt_x(vbool64_t op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m1_b8))) +vbool8_t vmseq(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m1_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m2_b4))) +vbool4_t vmseq(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m2_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m4_b2))) +vbool2_t vmseq(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m4_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m8_b1))) +vbool1_t vmseq(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8m8_b1_m))) +vbool1_t vmseq(vbool1_t op0, vbool1_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8mf2_b16))) +vbool16_t vmseq(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8mf2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8mf4_b32))) +vbool32_t vmseq(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8mf4_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8mf8_b64))) +vbool64_t vmseq(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i8mf8_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m1_b16))) +vbool16_t vmseq(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m1_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m2_b8))) +vbool8_t vmseq(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m2_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m4_b4))) +vbool4_t vmseq(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m4_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m8_b2))) +vbool2_t vmseq(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16m8_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16mf2_b32))) +vbool32_t vmseq(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16mf2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16mf4_b64))) +vbool64_t vmseq(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i16mf4_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m1_b32))) +vbool32_t vmseq(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m1_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m2_b16))) +vbool16_t vmseq(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m4_b8))) +vbool8_t vmseq(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m4_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m8_b4))) +vbool4_t vmseq(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32m8_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32mf2_b64))) +vbool64_t vmseq(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i32mf2_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m1_b64))) +vbool64_t vmseq(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m1_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m2_b32))) +vbool32_t vmseq(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m4_b16))) +vbool16_t vmseq(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m4_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m8_b8))) +vbool8_t vmseq(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_i64m8_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m1))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m1_m))) +void vsse8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m2))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m2_m))) +void vsse8(vbool4_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m4))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m4_m))) +void vsse8(vbool2_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m8))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8m8_m))) +void vsse8(vbool1_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8mf2))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8mf2_m))) +void vsse8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8mf4))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8mf4_m))) +void vsse8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8mf8))) +void vsse8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse8_v_u8mf8_m))) +void vsse8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16m1))) +void vsuxei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16m1_m))) +void vsuxei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16m2))) +void vsuxei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16m2_m))) +void vsuxei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16m4))) +void vsuxei32(int16_t * op0, vuint32m8_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16m4_m))) +void vsuxei32(vbool4_t op0, int16_t * op1, vuint32m8_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16mf2))) +void vsuxei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16mf2_m))) +void vsuxei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16mf4))) +void vsuxei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i16mf4_m))) +void vsuxei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m1_b8))) +vbool8_t vmseq(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m1_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m2_b4))) +vbool4_t vmseq(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m2_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m4_b2))) +vbool2_t vmseq(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m4_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m8_b1))) +vbool1_t vmseq(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8m8_b1_m))) +vbool1_t vmseq(vbool1_t op0, vbool1_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8mf2_b16))) +vbool16_t vmseq(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8mf2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8mf4_b32))) +vbool32_t vmseq(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8mf4_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8mf8_b64))) +vbool64_t vmseq(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i8mf8_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m1_b16))) +vbool16_t vmseq(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m1_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m2_b8))) +vbool8_t vmseq(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m2_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m4_b4))) +vbool4_t vmseq(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m4_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m8_b2))) +vbool2_t vmseq(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16m8_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16mf2_b32))) +vbool32_t vmseq(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16mf2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16mf4_b64))) +vbool64_t vmseq(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i16mf4_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m1_b32))) +vbool32_t vmseq(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m1_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m2_b16))) +vbool16_t vmseq(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m4_b8))) +vbool8_t vmseq(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m4_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m8_b4))) +vbool4_t vmseq(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32m8_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32mf2_b64))) +vbool64_t vmseq(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i32mf2_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m1_b64))) +vbool64_t vmseq(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m1_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m2_b32))) +vbool32_t vmseq(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m4_b16))) +vbool16_t vmseq(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m4_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m8_b8))) +vbool8_t vmseq(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_i64m8_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m1_b8))) +vbool8_t vmseq(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m1_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m2_b4))) +vbool4_t vmseq(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m2_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m4_b2))) +vbool2_t vmseq(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m4_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m8_b1))) +vbool1_t vmseq(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8m8_b1_m))) +vbool1_t vmseq(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8mf2_b16))) +vbool16_t vmseq(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8mf2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8mf4_b32))) +vbool32_t vmseq(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8mf4_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8mf8_b64))) +vbool64_t vmseq(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u8mf8_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m1_b16))) +vbool16_t vmseq(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m1_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m2_b8))) +vbool8_t vmseq(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m2_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m4_b4))) +vbool4_t vmseq(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m4_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m8_b2))) +vbool2_t vmseq(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16m8_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16mf2_b32))) +vbool32_t vmseq(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16mf2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16mf4_b64))) +vbool64_t vmseq(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u16mf4_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m1_b32))) +vbool32_t vmseq(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m1_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m2_b16))) +vbool16_t vmseq(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m4_b8))) +vbool8_t vmseq(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m4_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m8_b4))) +vbool4_t vmseq(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32m8_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32mf2_b64))) +vbool64_t vmseq(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u32mf2_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m1_b64))) +vbool64_t vmseq(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m1_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m2_b32))) +vbool32_t vmseq(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m4_b16))) +vbool16_t vmseq(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m4_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m8_b8))) +vbool8_t vmseq(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vv_u64m8_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m1_b8))) +vbool8_t vmseq(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m1_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m2_b4))) +vbool4_t vmseq(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m2_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m4_b2))) +vbool2_t vmseq(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m4_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m8_b1))) +vbool1_t vmseq(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8m8_b1_m))) +vbool1_t vmseq(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8mf2_b16))) +vbool16_t vmseq(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8mf2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8mf4_b32))) +vbool32_t vmseq(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8mf4_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8mf8_b64))) +vbool64_t vmseq(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u8mf8_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m1_b16))) +vbool16_t vmseq(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m1_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m2_b8))) +vbool8_t vmseq(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m2_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m4_b4))) +vbool4_t vmseq(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m4_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m8_b2))) +vbool2_t vmseq(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16m8_b2_m))) +vbool2_t vmseq(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16mf2_b32))) +vbool32_t vmseq(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16mf2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16mf4_b64))) +vbool64_t vmseq(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u16mf4_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m1_b32))) +vbool32_t vmseq(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m1_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m2_b16))) +vbool16_t vmseq(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m2_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m4_b8))) +vbool8_t vmseq(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m4_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m8_b4))) +vbool4_t vmseq(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32m8_b4_m))) +vbool4_t vmseq(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32mf2_b64))) +vbool64_t vmseq(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u32mf2_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m1_b64))) +vbool64_t vmseq(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m1_b64_m))) +vbool64_t vmseq(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m2_b32))) +vbool32_t vmseq(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m2_b32_m))) +vbool32_t vmseq(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m4_b16))) +vbool16_t vmseq(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m4_b16_m))) +vbool16_t vmseq(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m8_b8))) +vbool8_t vmseq(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmseq_vx_u64m8_b8_m))) +vbool8_t vmseq(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m1_b8))) +vbool8_t vmsne(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m1_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m2_b4))) +vbool4_t vmsne(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m2_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m4_b2))) +vbool2_t vmsne(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m4_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m8_b1))) +vbool1_t vmsne(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8m8_b1_m))) +vbool1_t vmsne(vbool1_t op0, vbool1_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8mf2_b16))) +vbool16_t vmsne(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8mf2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8mf4_b32))) +vbool32_t vmsne(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8mf4_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8mf8_b64))) +vbool64_t vmsne(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i8mf8_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m1_b16))) +vbool16_t vmsne(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m1_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m2_b8))) +vbool8_t vmsne(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m2_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m4_b4))) +vbool4_t vmsne(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m4_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m8_b2))) +vbool2_t vmsne(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16m8_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16mf2_b32))) +vbool32_t vmsne(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16mf2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16mf4_b64))) +vbool64_t vmsne(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i16mf4_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m1_b32))) +vbool32_t vmsne(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m1_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m2_b16))) +vbool16_t vmsne(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m4_b8))) +vbool8_t vmsne(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m4_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m8_b4))) +vbool4_t vmsne(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32m8_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32mf2_b64))) +vbool64_t vmsne(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i32mf2_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m1_b64))) +vbool64_t vmsne(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m1_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m2_b32))) +vbool32_t vmsne(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m4_b16))) +vbool16_t vmsne(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m4_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m8_b8))) +vbool8_t vmsne(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_i64m8_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m1_b8))) +vbool8_t vmsne(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m1_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m2_b4))) +vbool4_t vmsne(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m2_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m4_b2))) +vbool2_t vmsne(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m4_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m8_b1))) +vbool1_t vmsne(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8m8_b1_m))) +vbool1_t vmsne(vbool1_t op0, vbool1_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8mf2_b16))) +vbool16_t vmsne(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8mf2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8mf4_b32))) +vbool32_t vmsne(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8mf4_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8mf8_b64))) +vbool64_t vmsne(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i8mf8_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m1_b16))) +vbool16_t vmsne(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m1_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m2_b8))) +vbool8_t vmsne(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m2_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m4_b4))) +vbool4_t vmsne(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m4_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m8_b2))) +vbool2_t vmsne(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16m8_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16mf2_b32))) +vbool32_t vmsne(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16mf2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16mf4_b64))) +vbool64_t vmsne(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i16mf4_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m1_b32))) +vbool32_t vmsne(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m1_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m2_b16))) +vbool16_t vmsne(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m4_b8))) +vbool8_t vmsne(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m4_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m8_b4))) +vbool4_t vmsne(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32m8_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32mf2_b64))) +vbool64_t vmsne(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i32mf2_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m1_b64))) +vbool64_t vmsne(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m1_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m2_b32))) +vbool32_t vmsne(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m4_b16))) +vbool16_t vmsne(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m4_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m8_b8))) +vbool8_t vmsne(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_i64m8_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m1_b8))) +vbool8_t vmsne(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m1_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m2_b4))) +vbool4_t vmsne(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m2_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m4_b2))) +vbool2_t vmsne(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m4_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m8_b1))) +vbool1_t vmsne(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8m8_b1_m))) +vbool1_t vmsne(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8mf2_b16))) +vbool16_t vmsne(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8mf2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8mf4_b32))) +vbool32_t vmsne(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8mf4_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8mf8_b64))) +vbool64_t vmsne(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u8mf8_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m1_b16))) +vbool16_t vmsne(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m1_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m2_b8))) +vbool8_t vmsne(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m2_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m4_b4))) +vbool4_t vmsne(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m4_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m8_b2))) +vbool2_t vmsne(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16m8_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16mf2_b32))) +vbool32_t vmsne(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16mf2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16mf4_b64))) +vbool64_t vmsne(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u16mf4_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m1_b32))) +vbool32_t vmsne(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m1_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m2_b16))) +vbool16_t vmsne(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m4_b8))) +vbool8_t vmsne(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m4_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m8_b4))) +vbool4_t vmsne(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32m8_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32mf2_b64))) +vbool64_t vmsne(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u32mf2_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m1_b64))) +vbool64_t vmsne(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m1_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m2_b32))) +vbool32_t vmsne(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m4_b16))) +vbool16_t vmsne(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m4_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m8_b8))) +vbool8_t vmsne(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vv_u64m8_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m1_b8))) +vbool8_t vmsne(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m1_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m2_b4))) +vbool4_t vmsne(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m2_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m4_b2))) +vbool2_t vmsne(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m4_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m8_b1))) +vbool1_t vmsne(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8m8_b1_m))) +vbool1_t vmsne(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8mf2_b16))) +vbool16_t vmsne(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8mf2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8mf4_b32))) +vbool32_t vmsne(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8mf4_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8mf8_b64))) +vbool64_t vmsne(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u8mf8_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m1_b16))) +vbool16_t vmsne(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m1_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m2_b8))) +vbool8_t vmsne(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m2_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m4_b4))) +vbool4_t vmsne(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m4_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m8_b2))) +vbool2_t vmsne(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16m8_b2_m))) +vbool2_t vmsne(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16mf2_b32))) +vbool32_t vmsne(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16mf2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16mf4_b64))) +vbool64_t vmsne(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u16mf4_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m1_b32))) +vbool32_t vmsne(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m1_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m2_b16))) +vbool16_t vmsne(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m2_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m4_b8))) +vbool8_t vmsne(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m4_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m8_b4))) +vbool4_t vmsne(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32m8_b4_m))) +vbool4_t vmsne(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32mf2_b64))) +vbool64_t vmsne(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u32mf2_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m1_b64))) +vbool64_t vmsne(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m1_b64_m))) +vbool64_t vmsne(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m2_b32))) +vbool32_t vmsne(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m2_b32_m))) +vbool32_t vmsne(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m4_b16))) +vbool16_t vmsne(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m4_b16_m))) +vbool16_t vmsne(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m8_b8))) +vbool8_t vmsne(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsne_vx_u64m8_b8_m))) +vbool8_t vmsne(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m1_b8))) +vbool8_t vmsltu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m1_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m2_b4))) +vbool4_t vmsltu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m2_b4_m))) +vbool4_t vmsltu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m4_b2))) +vbool2_t vmsltu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m4_b2_m))) +vbool2_t vmsltu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m8_b1))) +vbool1_t vmsltu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8m8_b1_m))) +vbool1_t vmsltu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8mf2_b16))) +vbool16_t vmsltu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8mf2_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8mf4_b32))) +vbool32_t vmsltu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8mf4_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8mf8_b64))) +vbool64_t vmsltu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u8mf8_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m1_b16))) +vbool16_t vmsltu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m1_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m2_b8))) +vbool8_t vmsltu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m2_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m4_b4))) +vbool4_t vmsltu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m4_b4_m))) +vbool4_t vmsltu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m8_b2))) +vbool2_t vmsltu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16m8_b2_m))) +vbool2_t vmsltu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16mf2_b32))) +vbool32_t vmsltu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16mf2_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16mf4_b64))) +vbool64_t vmsltu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u16mf4_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m1_b32))) +vbool32_t vmsltu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m1_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m2_b16))) +vbool16_t vmsltu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m2_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m4_b8))) +vbool8_t vmsltu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m4_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m8_b4))) +vbool4_t vmsltu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32m8_b4_m))) +vbool4_t vmsltu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32mf2_b64))) +vbool64_t vmsltu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u32mf2_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m1_b64))) +vbool64_t vmsltu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m1_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m2_b32))) +vbool32_t vmsltu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m2_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m4_b16))) +vbool16_t vmsltu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m4_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m8_b8))) +vbool8_t vmsltu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vv_u64m8_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m1_b8))) +vbool8_t vmsltu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m1_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m2_b4))) +vbool4_t vmsltu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m2_b4_m))) +vbool4_t vmsltu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m4_b2))) +vbool2_t vmsltu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m4_b2_m))) +vbool2_t vmsltu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m8_b1))) +vbool1_t vmsltu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8m8_b1_m))) +vbool1_t vmsltu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8mf2_b16))) +vbool16_t vmsltu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8mf2_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8mf4_b32))) +vbool32_t vmsltu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8mf4_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8mf8_b64))) +vbool64_t vmsltu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u8mf8_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m1_b16))) +vbool16_t vmsltu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m1_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m2_b8))) +vbool8_t vmsltu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m2_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m4_b4))) +vbool4_t vmsltu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m4_b4_m))) +vbool4_t vmsltu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m8_b2))) +vbool2_t vmsltu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16m8_b2_m))) +vbool2_t vmsltu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16mf2_b32))) +vbool32_t vmsltu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16mf2_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16mf4_b64))) +vbool64_t vmsltu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u16mf4_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m1_b32))) +vbool32_t vmsltu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m1_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m2_b16))) +vbool16_t vmsltu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m2_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m4_b8))) +vbool8_t vmsltu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m4_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m8_b4))) +vbool4_t vmsltu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32m8_b4_m))) +vbool4_t vmsltu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32mf2_b64))) +vbool64_t vmsltu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u32mf2_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m1_b64))) +vbool64_t vmsltu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m1_b64_m))) +vbool64_t vmsltu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m2_b32))) +vbool32_t vmsltu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m2_b32_m))) +vbool32_t vmsltu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m4_b16))) +vbool16_t vmsltu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m4_b16_m))) +vbool16_t vmsltu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m8_b8))) +vbool8_t vmsltu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsltu_vx_u64m8_b8_m))) +vbool8_t vmsltu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m1_b8))) +vbool8_t vmslt(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m1_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m2_b4))) +vbool4_t vmslt(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m2_b4_m))) +vbool4_t vmslt(vbool4_t op0, vbool4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m4_b2))) +vbool2_t vmslt(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m4_b2_m))) +vbool2_t vmslt(vbool2_t op0, vbool2_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m8_b1))) +vbool1_t vmslt(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8m8_b1_m))) +vbool1_t vmslt(vbool1_t op0, vbool1_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8mf2_b16))) +vbool16_t vmslt(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8mf2_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8mf4_b32))) +vbool32_t vmslt(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8mf4_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8mf8_b64))) +vbool64_t vmslt(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i8mf8_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m1_b16))) +vbool16_t vmslt(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m1_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m2_b8))) +vbool8_t vmslt(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m2_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m4_b4))) +vbool4_t vmslt(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m4_b4_m))) +vbool4_t vmslt(vbool4_t op0, vbool4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m8_b2))) +vbool2_t vmslt(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16m8_b2_m))) +vbool2_t vmslt(vbool2_t op0, vbool2_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16mf2_b32))) +vbool32_t vmslt(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16mf2_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16mf4_b64))) +vbool64_t vmslt(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i16mf4_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m1_b32))) +vbool32_t vmslt(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m1_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m2_b16))) +vbool16_t vmslt(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m2_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m4_b8))) +vbool8_t vmslt(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m4_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m8_b4))) +vbool4_t vmslt(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32m8_b4_m))) +vbool4_t vmslt(vbool4_t op0, vbool4_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32mf2_b64))) +vbool64_t vmslt(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i32mf2_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m1_b64))) +vbool64_t vmslt(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m1_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m2_b32))) +vbool32_t vmslt(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m2_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m4_b16))) +vbool16_t vmslt(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m4_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m8_b8))) +vbool8_t vmslt(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vv_i64m8_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16m1))) +void vsuxei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16m1_m))) +void vsuxei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16m2))) +void vsuxei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16m2_m))) +void vsuxei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16m4))) +void vsuxei32(uint16_t * op0, vuint32m8_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16m4_m))) +void vsuxei32(vbool4_t op0, uint16_t * op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16mf2))) +void vsuxei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16mf2_m))) +void vsuxei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16mf4))) +void vsuxei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u16mf4_m))) +void vsuxei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m1_b8))) +vbool8_t vmslt(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m1_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m2_b4))) +vbool4_t vmslt(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m2_b4_m))) +vbool4_t vmslt(vbool4_t op0, vbool4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m4_b2))) +vbool2_t vmslt(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m4_b2_m))) +vbool2_t vmslt(vbool2_t op0, vbool2_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m8_b1))) +vbool1_t vmslt(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8m8_b1_m))) +vbool1_t vmslt(vbool1_t op0, vbool1_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8mf2_b16))) +vbool16_t vmslt(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8mf2_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8mf4_b32))) +vbool32_t vmslt(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8mf4_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8mf8_b64))) +vbool64_t vmslt(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i8mf8_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m1_b16))) +vbool16_t vmslt(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m1_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m2_b8))) +vbool8_t vmslt(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m2_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m4_b4))) +vbool4_t vmslt(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m4_b4_m))) +vbool4_t vmslt(vbool4_t op0, vbool4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m8_b2))) +vbool2_t vmslt(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16m8_b2_m))) +vbool2_t vmslt(vbool2_t op0, vbool2_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16mf2_b32))) +vbool32_t vmslt(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16mf2_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16mf4_b64))) +vbool64_t vmslt(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i16mf4_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m1_b32))) +vbool32_t vmslt(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m1_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m2_b16))) +vbool16_t vmslt(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m2_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m4_b8))) +vbool8_t vmslt(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m4_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m8_b4))) +vbool4_t vmslt(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32m8_b4_m))) +vbool4_t vmslt(vbool4_t op0, vbool4_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32mf2_b64))) +vbool64_t vmslt(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i32mf2_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m1_b64))) +vbool64_t vmslt(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m1_b64_m))) +vbool64_t vmslt(vbool64_t op0, vbool64_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m2_b32))) +vbool32_t vmslt(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m2_b32_m))) +vbool32_t vmslt(vbool32_t op0, vbool32_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m4_b16))) +vbool16_t vmslt(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m4_b16_m))) +vbool16_t vmslt(vbool16_t op0, vbool16_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m8_b8))) +vbool8_t vmslt(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmslt_vx_i64m8_b8_m))) +vbool8_t vmslt(vbool8_t op0, vbool8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m1_b8))) +vbool8_t vmsleu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m1_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m2_b4))) +vbool4_t vmsleu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m2_b4_m))) +vbool4_t vmsleu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m4_b2))) +vbool2_t vmsleu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m4_b2_m))) +vbool2_t vmsleu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m8_b1))) +vbool1_t vmsleu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8m8_b1_m))) +vbool1_t vmsleu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8mf2_b16))) +vbool16_t vmsleu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8mf2_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8mf4_b32))) +vbool32_t vmsleu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8mf4_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8mf8_b64))) +vbool64_t vmsleu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u8mf8_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m1_b16))) +vbool16_t vmsleu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m1_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m2_b8))) +vbool8_t vmsleu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m2_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m4_b4))) +vbool4_t vmsleu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m4_b4_m))) +vbool4_t vmsleu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m8_b2))) +vbool2_t vmsleu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16m8_b2_m))) +vbool2_t vmsleu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16mf2_b32))) +vbool32_t vmsleu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16mf2_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16mf4_b64))) +vbool64_t vmsleu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u16mf4_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m1_b32))) +vbool32_t vmsleu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m1_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m2_b16))) +vbool16_t vmsleu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m2_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m4_b8))) +vbool8_t vmsleu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m4_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m8_b4))) +vbool4_t vmsleu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32m8_b4_m))) +vbool4_t vmsleu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32mf2_b64))) +vbool64_t vmsleu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u32mf2_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m1_b64))) +vbool64_t vmsleu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m1_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m2_b32))) +vbool32_t vmsleu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m2_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m4_b16))) +vbool16_t vmsleu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m4_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m8_b8))) +vbool8_t vmsleu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vv_u64m8_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m1_b8))) +vbool8_t vmsleu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m1_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m2_b4))) +vbool4_t vmsleu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m2_b4_m))) +vbool4_t vmsleu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m4_b2))) +vbool2_t vmsleu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m4_b2_m))) +vbool2_t vmsleu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m8_b1))) +vbool1_t vmsleu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8m8_b1_m))) +vbool1_t vmsleu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8mf2_b16))) +vbool16_t vmsleu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8mf2_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8mf4_b32))) +vbool32_t vmsleu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8mf4_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8mf8_b64))) +vbool64_t vmsleu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u8mf8_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m1_b16))) +vbool16_t vmsleu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m1_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m2_b8))) +vbool8_t vmsleu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m2_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m4_b4))) +vbool4_t vmsleu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m4_b4_m))) +vbool4_t vmsleu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m8_b2))) +vbool2_t vmsleu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16m8_b2_m))) +vbool2_t vmsleu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16mf2_b32))) +vbool32_t vmsleu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16mf2_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16mf4_b64))) +vbool64_t vmsleu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u16mf4_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m1_b32))) +vbool32_t vmsleu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m1_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m2_b16))) +vbool16_t vmsleu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m2_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m4_b8))) +vbool8_t vmsleu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m4_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m8_b4))) +vbool4_t vmsleu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32m8_b4_m))) +vbool4_t vmsleu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32mf2_b64))) +vbool64_t vmsleu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u32mf2_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m1_b64))) +vbool64_t vmsleu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m1_b64_m))) +vbool64_t vmsleu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m2_b32))) +vbool32_t vmsleu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m2_b32_m))) +vbool32_t vmsleu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m4_b16))) +vbool16_t vmsleu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m4_b16_m))) +vbool16_t vmsleu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m8_b8))) +vbool8_t vmsleu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsleu_vx_u64m8_b8_m))) +vbool8_t vmsleu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m1_b8))) +vbool8_t vmsle(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m1_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m2_b4))) +vbool4_t vmsle(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m2_b4_m))) +vbool4_t vmsle(vbool4_t op0, vbool4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m4_b2))) +vbool2_t vmsle(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m4_b2_m))) +vbool2_t vmsle(vbool2_t op0, vbool2_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m8_b1))) +vbool1_t vmsle(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8m8_b1_m))) +vbool1_t vmsle(vbool1_t op0, vbool1_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8mf2_b16))) +vbool16_t vmsle(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8mf2_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8mf4_b32))) +vbool32_t vmsle(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8mf4_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8mf8_b64))) +vbool64_t vmsle(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i8mf8_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m1_b16))) +vbool16_t vmsle(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m1_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m2_b8))) +vbool8_t vmsle(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m2_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m4_b4))) +vbool4_t vmsle(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m4_b4_m))) +vbool4_t vmsle(vbool4_t op0, vbool4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m8_b2))) +vbool2_t vmsle(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16m8_b2_m))) +vbool2_t vmsle(vbool2_t op0, vbool2_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16mf2_b32))) +vbool32_t vmsle(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16mf2_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16mf4_b64))) +vbool64_t vmsle(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i16mf4_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m1_b32))) +vbool32_t vmsle(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m1_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m2_b16))) +vbool16_t vmsle(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m2_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m4_b8))) +vbool8_t vmsle(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m4_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m8_b4))) +vbool4_t vmsle(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32m8_b4_m))) +vbool4_t vmsle(vbool4_t op0, vbool4_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32mf2_b64))) +vbool64_t vmsle(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i32mf2_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m1_b64))) +vbool64_t vmsle(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m1_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m2_b32))) +vbool32_t vmsle(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m2_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m4_b16))) +vbool16_t vmsle(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m4_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m8_b8))) +vbool8_t vmsle(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vv_i64m8_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m1_b8))) +vbool8_t vmsle(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m1_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m2_b4))) +vbool4_t vmsle(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m2_b4_m))) +vbool4_t vmsle(vbool4_t op0, vbool4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m4_b2))) +vbool2_t vmsle(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m4_b2_m))) +vbool2_t vmsle(vbool2_t op0, vbool2_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m8_b1))) +vbool1_t vmsle(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8m8_b1_m))) +vbool1_t vmsle(vbool1_t op0, vbool1_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8mf2_b16))) +vbool16_t vmsle(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8mf2_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8mf4_b32))) +vbool32_t vmsle(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8mf4_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8mf8_b64))) +vbool64_t vmsle(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i8mf8_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m1_b16))) +vbool16_t vmsle(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m1_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m2_b8))) +vbool8_t vmsle(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m2_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m4_b4))) +vbool4_t vmsle(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m4_b4_m))) +vbool4_t vmsle(vbool4_t op0, vbool4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m8_b2))) +vbool2_t vmsle(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16m8_b2_m))) +vbool2_t vmsle(vbool2_t op0, vbool2_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16mf2_b32))) +vbool32_t vmsle(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16mf2_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16mf4_b64))) +vbool64_t vmsle(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i16mf4_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m1_b32))) +vbool32_t vmsle(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m1_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m2_b16))) +vbool16_t vmsle(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m2_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m4_b8))) +vbool8_t vmsle(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m4_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m8_b4))) +vbool4_t vmsle(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32m8_b4_m))) +vbool4_t vmsle(vbool4_t op0, vbool4_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32mf2_b64))) +vbool64_t vmsle(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i32mf2_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m1_b64))) +vbool64_t vmsle(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m1_b64_m))) +vbool64_t vmsle(vbool64_t op0, vbool64_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m2_b32))) +vbool32_t vmsle(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m2_b32_m))) +vbool32_t vmsle(vbool32_t op0, vbool32_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m4_b16))) +vbool16_t vmsle(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m4_b16_m))) +vbool16_t vmsle(vbool16_t op0, vbool16_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m8_b8))) +vbool8_t vmsle(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsle_vx_i64m8_b8_m))) +vbool8_t vmsle(vbool8_t op0, vbool8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m1_b8))) +vbool8_t vmsgtu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m1_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m2_b4))) +vbool4_t vmsgtu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m2_b4_m))) +vbool4_t vmsgtu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m4_b2))) +vbool2_t vmsgtu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m4_b2_m))) +vbool2_t vmsgtu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m8_b1))) +vbool1_t vmsgtu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8m8_b1_m))) +vbool1_t vmsgtu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8mf2_b16))) +vbool16_t vmsgtu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8mf2_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8mf4_b32))) +vbool32_t vmsgtu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8mf4_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8mf8_b64))) +vbool64_t vmsgtu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u8mf8_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m1_b16))) +vbool16_t vmsgtu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m1_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m2_b8))) +vbool8_t vmsgtu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m2_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m4_b4))) +vbool4_t vmsgtu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m4_b4_m))) +vbool4_t vmsgtu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m8_b2))) +vbool2_t vmsgtu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16m8_b2_m))) +vbool2_t vmsgtu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16mf2_b32))) +vbool32_t vmsgtu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16mf2_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16mf4_b64))) +vbool64_t vmsgtu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u16mf4_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m1_b32))) +vbool32_t vmsgtu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m1_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m2_b16))) +vbool16_t vmsgtu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m2_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m4_b8))) +vbool8_t vmsgtu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m4_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m8_b4))) +vbool4_t vmsgtu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32m8_b4_m))) +vbool4_t vmsgtu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32mf2_b64))) +vbool64_t vmsgtu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u32mf2_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m1_b64))) +vbool64_t vmsgtu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m1_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m2_b32))) +vbool32_t vmsgtu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m2_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m4_b16))) +vbool16_t vmsgtu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m4_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m8_b8))) +vbool8_t vmsgtu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vv_u64m8_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m1_b8))) +vbool8_t vmsgtu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m1_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m2_b4))) +vbool4_t vmsgtu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m2_b4_m))) +vbool4_t vmsgtu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m4_b2))) +vbool2_t vmsgtu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m4_b2_m))) +vbool2_t vmsgtu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m8_b1))) +vbool1_t vmsgtu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8m8_b1_m))) +vbool1_t vmsgtu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8mf2_b16))) +vbool16_t vmsgtu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8mf2_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8mf4_b32))) +vbool32_t vmsgtu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8mf4_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8mf8_b64))) +vbool64_t vmsgtu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u8mf8_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m1_b16))) +vbool16_t vmsgtu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m1_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m2_b8))) +vbool8_t vmsgtu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m2_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m4_b4))) +vbool4_t vmsgtu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m4_b4_m))) +vbool4_t vmsgtu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m8_b2))) +vbool2_t vmsgtu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16m8_b2_m))) +vbool2_t vmsgtu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16mf2_b32))) +vbool32_t vmsgtu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16mf2_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16mf4_b64))) +vbool64_t vmsgtu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u16mf4_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m1_b32))) +vbool32_t vmsgtu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m1_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m2_b16))) +vbool16_t vmsgtu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m2_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m4_b8))) +vbool8_t vmsgtu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m4_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m8_b4))) +vbool4_t vmsgtu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32m8_b4_m))) +vbool4_t vmsgtu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32mf2_b64))) +vbool64_t vmsgtu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u32mf2_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m1_b64))) +vbool64_t vmsgtu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m1_b64_m))) +vbool64_t vmsgtu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m2_b32))) +vbool32_t vmsgtu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m2_b32_m))) +vbool32_t vmsgtu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m4_b16))) +vbool16_t vmsgtu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m4_b16_m))) +vbool16_t vmsgtu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m8_b8))) +vbool8_t vmsgtu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgtu_vx_u64m8_b8_m))) +vbool8_t vmsgtu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m1_b8))) +vbool8_t vmsgt(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m1_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m2_b4))) +vbool4_t vmsgt(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m2_b4_m))) +vbool4_t vmsgt(vbool4_t op0, vbool4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m4_b2))) +vbool2_t vmsgt(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m4_b2_m))) +vbool2_t vmsgt(vbool2_t op0, vbool2_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m8_b1))) +vbool1_t vmsgt(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8m8_b1_m))) +vbool1_t vmsgt(vbool1_t op0, vbool1_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8mf2_b16))) +vbool16_t vmsgt(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8mf2_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8mf4_b32))) +vbool32_t vmsgt(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8mf4_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8mf8_b64))) +vbool64_t vmsgt(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i8mf8_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m1_b16))) +vbool16_t vmsgt(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m1_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m2_b8))) +vbool8_t vmsgt(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m2_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m4_b4))) +vbool4_t vmsgt(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m4_b4_m))) +vbool4_t vmsgt(vbool4_t op0, vbool4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m8_b2))) +vbool2_t vmsgt(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16m8_b2_m))) +vbool2_t vmsgt(vbool2_t op0, vbool2_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16mf2_b32))) +vbool32_t vmsgt(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16mf2_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16mf4_b64))) +vbool64_t vmsgt(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i16mf4_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m1_b32))) +vbool32_t vmsgt(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m1_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m2_b16))) +vbool16_t vmsgt(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m2_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m4_b8))) +vbool8_t vmsgt(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m4_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m8_b4))) +vbool4_t vmsgt(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32m8_b4_m))) +vbool4_t vmsgt(vbool4_t op0, vbool4_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32mf2_b64))) +vbool64_t vmsgt(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i32mf2_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m1_b64))) +vbool64_t vmsgt(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m1_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m2_b32))) +vbool32_t vmsgt(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m2_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m4_b16))) +vbool16_t vmsgt(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m4_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m8_b8))) +vbool8_t vmsgt(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vv_i64m8_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m1_b8))) +vbool8_t vmsgt(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m1_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m2_b4))) +vbool4_t vmsgt(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m2_b4_m))) +vbool4_t vmsgt(vbool4_t op0, vbool4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m4_b2))) +vbool2_t vmsgt(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m4_b2_m))) +vbool2_t vmsgt(vbool2_t op0, vbool2_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m8_b1))) +vbool1_t vmsgt(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8m8_b1_m))) +vbool1_t vmsgt(vbool1_t op0, vbool1_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8mf2_b16))) +vbool16_t vmsgt(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8mf2_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8mf4_b32))) +vbool32_t vmsgt(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8mf4_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8mf8_b64))) +vbool64_t vmsgt(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i8mf8_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m1_b16))) +vbool16_t vmsgt(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m1_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m2_b8))) +vbool8_t vmsgt(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m2_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m4_b4))) +vbool4_t vmsgt(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m4_b4_m))) +vbool4_t vmsgt(vbool4_t op0, vbool4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m8_b2))) +vbool2_t vmsgt(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16m8_b2_m))) +vbool2_t vmsgt(vbool2_t op0, vbool2_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16mf2_b32))) +vbool32_t vmsgt(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16mf2_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16mf4_b64))) +vbool64_t vmsgt(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i16mf4_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m1_b32))) +vbool32_t vmsgt(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m1_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m2_b16))) +vbool16_t vmsgt(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m2_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m4_b8))) +vbool8_t vmsgt(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m4_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m8_b4))) +vbool4_t vmsgt(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32m8_b4_m))) +vbool4_t vmsgt(vbool4_t op0, vbool4_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32mf2_b64))) +vbool64_t vmsgt(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i32mf2_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m1_b64))) +vbool64_t vmsgt(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m1_b64_m))) +vbool64_t vmsgt(vbool64_t op0, vbool64_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m2_b32))) +vbool32_t vmsgt(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m2_b32_m))) +vbool32_t vmsgt(vbool32_t op0, vbool32_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m4_b16))) +vbool16_t vmsgt(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m4_b16_m))) +vbool16_t vmsgt(vbool16_t op0, vbool16_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m8_b8))) +vbool8_t vmsgt(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgt_vx_i64m8_b8_m))) +vbool8_t vmsgt(vbool8_t op0, vbool8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m1_b8))) +vbool8_t vmsgeu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m1_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m2_b4))) +vbool4_t vmsgeu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m2_b4_m))) +vbool4_t vmsgeu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m4_b2))) +vbool2_t vmsgeu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m4_b2_m))) +vbool2_t vmsgeu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m8_b1))) +vbool1_t vmsgeu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8m8_b1_m))) +vbool1_t vmsgeu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8mf2_b16))) +vbool16_t vmsgeu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8mf2_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8mf4_b32))) +vbool32_t vmsgeu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8mf4_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8mf8_b64))) +vbool64_t vmsgeu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u8mf8_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m1_b16))) +vbool16_t vmsgeu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m1_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m2_b8))) +vbool8_t vmsgeu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m2_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m4_b4))) +vbool4_t vmsgeu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m4_b4_m))) +vbool4_t vmsgeu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m8_b2))) +vbool2_t vmsgeu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16m8_b2_m))) +vbool2_t vmsgeu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16mf2_b32))) +vbool32_t vmsgeu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16mf2_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16mf4_b64))) +vbool64_t vmsgeu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u16mf4_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m1_b32))) +vbool32_t vmsgeu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m1_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m2_b16))) +vbool16_t vmsgeu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m2_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m4_b8))) +vbool8_t vmsgeu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m4_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m8_b4))) +vbool4_t vmsgeu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32m8_b4_m))) +vbool4_t vmsgeu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32mf2_b64))) +vbool64_t vmsgeu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u32mf2_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m1_b64))) +vbool64_t vmsgeu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m1_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m2_b32))) +vbool32_t vmsgeu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m2_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m4_b16))) +vbool16_t vmsgeu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m4_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m8_b8))) +vbool8_t vmsgeu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vv_u64m8_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16m1))) +void vsuxei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16m1_m))) +void vsuxei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16m2))) +void vsuxei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16m2_m))) +void vsuxei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16mf2))) +void vsuxei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16mf2_m))) +void vsuxei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16mf4))) +void vsuxei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i16mf4_m))) +void vsuxei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m1_b8))) +vbool8_t vmsgeu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m1_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m2_b4))) +vbool4_t vmsgeu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m2_b4_m))) +vbool4_t vmsgeu(vbool4_t op0, vbool4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m4_b2))) +vbool2_t vmsgeu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m4_b2_m))) +vbool2_t vmsgeu(vbool2_t op0, vbool2_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m8_b1))) +vbool1_t vmsgeu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8m8_b1_m))) +vbool1_t vmsgeu(vbool1_t op0, vbool1_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8mf2_b16))) +vbool16_t vmsgeu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8mf2_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8mf4_b32))) +vbool32_t vmsgeu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8mf4_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8mf8_b64))) +vbool64_t vmsgeu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u8mf8_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m1_b16))) +vbool16_t vmsgeu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m1_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m2_b8))) +vbool8_t vmsgeu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m2_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m4_b4))) +vbool4_t vmsgeu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m4_b4_m))) +vbool4_t vmsgeu(vbool4_t op0, vbool4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m8_b2))) +vbool2_t vmsgeu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16m8_b2_m))) +vbool2_t vmsgeu(vbool2_t op0, vbool2_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16mf2_b32))) +vbool32_t vmsgeu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16mf2_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16mf4_b64))) +vbool64_t vmsgeu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u16mf4_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m1_b32))) +vbool32_t vmsgeu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m1_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m2_b16))) +vbool16_t vmsgeu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m2_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m4_b8))) +vbool8_t vmsgeu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m4_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m8_b4))) +vbool4_t vmsgeu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32m8_b4_m))) +vbool4_t vmsgeu(vbool4_t op0, vbool4_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32mf2_b64))) +vbool64_t vmsgeu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u32mf2_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m1_b64))) +vbool64_t vmsgeu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m1_b64_m))) +vbool64_t vmsgeu(vbool64_t op0, vbool64_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m2_b32))) +vbool32_t vmsgeu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m2_b32_m))) +vbool32_t vmsgeu(vbool32_t op0, vbool32_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m4_b16))) +vbool16_t vmsgeu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m4_b16_m))) +vbool16_t vmsgeu(vbool16_t op0, vbool16_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m8_b8))) +vbool8_t vmsgeu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsgeu_vx_u64m8_b8_m))) +vbool8_t vmsgeu(vbool8_t op0, vbool8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m1_b8))) +vbool8_t vmsge(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m1_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m2_b4))) +vbool4_t vmsge(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m2_b4_m))) +vbool4_t vmsge(vbool4_t op0, vbool4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m4_b2))) +vbool2_t vmsge(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m4_b2_m))) +vbool2_t vmsge(vbool2_t op0, vbool2_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m8_b1))) +vbool1_t vmsge(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8m8_b1_m))) +vbool1_t vmsge(vbool1_t op0, vbool1_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8mf2_b16))) +vbool16_t vmsge(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8mf2_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8mf4_b32))) +vbool32_t vmsge(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8mf4_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8mf8_b64))) +vbool64_t vmsge(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i8mf8_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m1_b16))) +vbool16_t vmsge(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m1_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m2_b8))) +vbool8_t vmsge(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m2_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m4_b4))) +vbool4_t vmsge(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m4_b4_m))) +vbool4_t vmsge(vbool4_t op0, vbool4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m8_b2))) +vbool2_t vmsge(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16m8_b2_m))) +vbool2_t vmsge(vbool2_t op0, vbool2_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16mf2_b32))) +vbool32_t vmsge(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16mf2_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16mf4_b64))) +vbool64_t vmsge(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i16mf4_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m1_b32))) +vbool32_t vmsge(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m1_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m2_b16))) +vbool16_t vmsge(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m2_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m4_b8))) +vbool8_t vmsge(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m4_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m8_b4))) +vbool4_t vmsge(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32m8_b4_m))) +vbool4_t vmsge(vbool4_t op0, vbool4_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32mf2_b64))) +vbool64_t vmsge(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i32mf2_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m1_b64))) +vbool64_t vmsge(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m1_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m2_b32))) +vbool32_t vmsge(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m2_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m4_b16))) +vbool16_t vmsge(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m4_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m8_b8))) +vbool8_t vmsge(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vv_i64m8_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m1_b8))) +vbool8_t vmsge(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m1_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m2_b4))) +vbool4_t vmsge(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m2_b4_m))) +vbool4_t vmsge(vbool4_t op0, vbool4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m4_b2))) +vbool2_t vmsge(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m4_b2_m))) +vbool2_t vmsge(vbool2_t op0, vbool2_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m8_b1))) +vbool1_t vmsge(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8m8_b1_m))) +vbool1_t vmsge(vbool1_t op0, vbool1_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8mf2_b16))) +vbool16_t vmsge(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8mf2_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8mf4_b32))) +vbool32_t vmsge(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8mf4_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8mf8_b64))) +vbool64_t vmsge(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i8mf8_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m1_b16))) +vbool16_t vmsge(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m1_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m2_b8))) +vbool8_t vmsge(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m2_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m4_b4))) +vbool4_t vmsge(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m4_b4_m))) +vbool4_t vmsge(vbool4_t op0, vbool4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m8_b2))) +vbool2_t vmsge(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16m8_b2_m))) +vbool2_t vmsge(vbool2_t op0, vbool2_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16mf2_b32))) +vbool32_t vmsge(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16mf2_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16mf4_b64))) +vbool64_t vmsge(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i16mf4_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m1_b32))) +vbool32_t vmsge(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m1_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m2_b16))) +vbool16_t vmsge(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m2_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m4_b8))) +vbool8_t vmsge(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m4_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m8_b4))) +vbool4_t vmsge(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32m8_b4_m))) +vbool4_t vmsge(vbool4_t op0, vbool4_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32mf2_b64))) +vbool64_t vmsge(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i32mf2_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m1_b64))) +vbool64_t vmsge(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m1_b64_m))) +vbool64_t vmsge(vbool64_t op0, vbool64_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m2_b32))) +vbool32_t vmsge(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m2_b32_m))) +vbool32_t vmsge(vbool32_t op0, vbool32_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m4_b16))) +vbool16_t vmsge(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m4_b16_m))) +vbool16_t vmsge(vbool16_t op0, vbool16_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m8_b8))) +vbool8_t vmsge(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsge_vx_i64m8_b8_m))) +vbool8_t vmsge(vbool8_t op0, vbool8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m1))) +vuint8m1_t vminu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m1_m))) +vuint8m1_t vminu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m2))) +vuint8m2_t vminu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m2_m))) +vuint8m2_t vminu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m4))) +vuint8m4_t vminu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m4_m))) +vuint8m4_t vminu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m8))) +vuint8m8_t vminu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8m8_m))) +vuint8m8_t vminu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8mf2))) +vuint8mf2_t vminu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8mf2_m))) +vuint8mf2_t vminu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8mf4))) +vuint8mf4_t vminu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8mf4_m))) +vuint8mf4_t vminu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8mf8))) +vuint8mf8_t vminu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u8mf8_m))) +vuint8mf8_t vminu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m1))) +vuint16m1_t vminu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m1_m))) +vuint16m1_t vminu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m2))) +vuint16m2_t vminu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m2_m))) +vuint16m2_t vminu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m4))) +vuint16m4_t vminu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m4_m))) +vuint16m4_t vminu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m8))) +vuint16m8_t vminu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16m8_m))) +vuint16m8_t vminu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16mf2))) +vuint16mf2_t vminu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16mf2_m))) +vuint16mf2_t vminu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16mf4))) +vuint16mf4_t vminu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u16mf4_m))) +vuint16mf4_t vminu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m1))) +vuint32m1_t vminu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m1_m))) +vuint32m1_t vminu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m2))) +vuint32m2_t vminu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m2_m))) +vuint32m2_t vminu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m4))) +vuint32m4_t vminu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m4_m))) +vuint32m4_t vminu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m8))) +vuint32m8_t vminu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32m8_m))) +vuint32m8_t vminu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32mf2))) +vuint32mf2_t vminu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u32mf2_m))) +vuint32mf2_t vminu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m1))) +vuint64m1_t vminu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m1_m))) +vuint64m1_t vminu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m2))) +vuint64m2_t vminu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m2_m))) +vuint64m2_t vminu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m4))) +vuint64m4_t vminu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m4_m))) +vuint64m4_t vminu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m8))) +vuint64m8_t vminu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vv_u64m8_m))) +vuint64m8_t vminu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m1))) +vuint8m1_t vminu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m1_m))) +vuint8m1_t vminu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m2))) +vuint8m2_t vminu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m2_m))) +vuint8m2_t vminu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m4))) +vuint8m4_t vminu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m4_m))) +vuint8m4_t vminu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m8))) +vuint8m8_t vminu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8m8_m))) +vuint8m8_t vminu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8mf2))) +vuint8mf2_t vminu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8mf2_m))) +vuint8mf2_t vminu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8mf4))) +vuint8mf4_t vminu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8mf4_m))) +vuint8mf4_t vminu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8mf8))) +vuint8mf8_t vminu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u8mf8_m))) +vuint8mf8_t vminu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m1))) +vuint16m1_t vminu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m1_m))) +vuint16m1_t vminu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m2))) +vuint16m2_t vminu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m2_m))) +vuint16m2_t vminu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m4))) +vuint16m4_t vminu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m4_m))) +vuint16m4_t vminu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m8))) +vuint16m8_t vminu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16m8_m))) +vuint16m8_t vminu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16mf2))) +vuint16mf2_t vminu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16mf2_m))) +vuint16mf2_t vminu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16mf4))) +vuint16mf4_t vminu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u16mf4_m))) +vuint16mf4_t vminu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m1))) +vuint32m1_t vminu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m1_m))) +vuint32m1_t vminu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m2))) +vuint32m2_t vminu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m2_m))) +vuint32m2_t vminu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m4))) +vuint32m4_t vminu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m4_m))) +vuint32m4_t vminu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m8))) +vuint32m8_t vminu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32m8_m))) +vuint32m8_t vminu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32mf2))) +vuint32mf2_t vminu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u32mf2_m))) +vuint32mf2_t vminu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m1))) +vuint64m1_t vminu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m1_m))) +vuint64m1_t vminu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m2))) +vuint64m2_t vminu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m2_m))) +vuint64m2_t vminu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m4))) +vuint64m4_t vminu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m4_m))) +vuint64m4_t vminu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m8))) +vuint64m8_t vminu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vminu_vx_u64m8_m))) +vuint64m8_t vminu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m1))) +vint8m1_t vmin(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m1_m))) +vint8m1_t vmin(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m2))) +vint8m2_t vmin(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m2_m))) +vint8m2_t vmin(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m4))) +vint8m4_t vmin(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m4_m))) +vint8m4_t vmin(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m8))) +vint8m8_t vmin(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8m8_m))) +vint8m8_t vmin(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8mf2))) +vint8mf2_t vmin(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8mf2_m))) +vint8mf2_t vmin(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8mf4))) +vint8mf4_t vmin(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8mf4_m))) +vint8mf4_t vmin(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8mf8))) +vint8mf8_t vmin(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i8mf8_m))) +vint8mf8_t vmin(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m1))) +vint16m1_t vmin(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m1_m))) +vint16m1_t vmin(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m2))) +vint16m2_t vmin(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m2_m))) +vint16m2_t vmin(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m4))) +vint16m4_t vmin(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m4_m))) +vint16m4_t vmin(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m8))) +vint16m8_t vmin(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16m8_m))) +vint16m8_t vmin(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16mf2))) +vint16mf2_t vmin(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16mf2_m))) +vint16mf2_t vmin(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16mf4))) +vint16mf4_t vmin(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i16mf4_m))) +vint16mf4_t vmin(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m1))) +vint32m1_t vmin(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m1_m))) +vint32m1_t vmin(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m2))) +vint32m2_t vmin(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m2_m))) +vint32m2_t vmin(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m4))) +vint32m4_t vmin(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m4_m))) +vint32m4_t vmin(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m8))) +vint32m8_t vmin(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32m8_m))) +vint32m8_t vmin(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32mf2))) +vint32mf2_t vmin(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i32mf2_m))) +vint32mf2_t vmin(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m1))) +vint64m1_t vmin(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m1_m))) +vint64m1_t vmin(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m2))) +vint64m2_t vmin(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m2_m))) +vint64m2_t vmin(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m4))) +vint64m4_t vmin(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m4_m))) +vint64m4_t vmin(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m8))) +vint64m8_t vmin(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vv_i64m8_m))) +vint64m8_t vmin(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m1))) +vint8m1_t vmin(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m1_m))) +vint8m1_t vmin(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m2))) +vint8m2_t vmin(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m2_m))) +vint8m2_t vmin(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m4))) +vint8m4_t vmin(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m4_m))) +vint8m4_t vmin(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m8))) +vint8m8_t vmin(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8m8_m))) +vint8m8_t vmin(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8mf2))) +vint8mf2_t vmin(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8mf2_m))) +vint8mf2_t vmin(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8mf4))) +vint8mf4_t vmin(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8mf4_m))) +vint8mf4_t vmin(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8mf8))) +vint8mf8_t vmin(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i8mf8_m))) +vint8mf8_t vmin(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m1))) +vint16m1_t vmin(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m1_m))) +vint16m1_t vmin(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m2))) +vint16m2_t vmin(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m2_m))) +vint16m2_t vmin(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m4))) +vint16m4_t vmin(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m4_m))) +vint16m4_t vmin(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m8))) +vint16m8_t vmin(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16m8_m))) +vint16m8_t vmin(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16mf2))) +vint16mf2_t vmin(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16mf2_m))) +vint16mf2_t vmin(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16mf4))) +vint16mf4_t vmin(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i16mf4_m))) +vint16mf4_t vmin(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m1))) +vint32m1_t vmin(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m1_m))) +vint32m1_t vmin(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m2))) +vint32m2_t vmin(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m2_m))) +vint32m2_t vmin(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m4))) +vint32m4_t vmin(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m4_m))) +vint32m4_t vmin(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m8))) +vint32m8_t vmin(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32m8_m))) +vint32m8_t vmin(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32mf2))) +vint32mf2_t vmin(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i32mf2_m))) +vint32mf2_t vmin(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m1))) +vint64m1_t vmin(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m1_m))) +vint64m1_t vmin(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m2))) +vint64m2_t vmin(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m2_m))) +vint64m2_t vmin(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m4))) +vint64m4_t vmin(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m4_m))) +vint64m4_t vmin(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m8))) +vint64m8_t vmin(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmin_vx_i64m8_m))) +vint64m8_t vmin(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m1))) +vuint8m1_t vmaxu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m1_m))) +vuint8m1_t vmaxu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m2))) +vuint8m2_t vmaxu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m2_m))) +vuint8m2_t vmaxu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m4))) +vuint8m4_t vmaxu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m4_m))) +vuint8m4_t vmaxu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m8))) +vuint8m8_t vmaxu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8m8_m))) +vuint8m8_t vmaxu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8mf2))) +vuint8mf2_t vmaxu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8mf2_m))) +vuint8mf2_t vmaxu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8mf4))) +vuint8mf4_t vmaxu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8mf4_m))) +vuint8mf4_t vmaxu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8mf8))) +vuint8mf8_t vmaxu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u8mf8_m))) +vuint8mf8_t vmaxu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m1))) +vuint16m1_t vmaxu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m1_m))) +vuint16m1_t vmaxu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m2))) +vuint16m2_t vmaxu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m2_m))) +vuint16m2_t vmaxu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m4))) +vuint16m4_t vmaxu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m4_m))) +vuint16m4_t vmaxu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m8))) +vuint16m8_t vmaxu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16m8_m))) +vuint16m8_t vmaxu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16mf2))) +vuint16mf2_t vmaxu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16mf2_m))) +vuint16mf2_t vmaxu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16mf4))) +vuint16mf4_t vmaxu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u16mf4_m))) +vuint16mf4_t vmaxu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m1))) +vuint32m1_t vmaxu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m1_m))) +vuint32m1_t vmaxu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m2))) +vuint32m2_t vmaxu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m2_m))) +vuint32m2_t vmaxu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m4))) +vuint32m4_t vmaxu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m4_m))) +vuint32m4_t vmaxu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m8))) +vuint32m8_t vmaxu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32m8_m))) +vuint32m8_t vmaxu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32mf2))) +vuint32mf2_t vmaxu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u32mf2_m))) +vuint32mf2_t vmaxu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m1))) +vuint64m1_t vmaxu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m1_m))) +vuint64m1_t vmaxu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m2))) +vuint64m2_t vmaxu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m2_m))) +vuint64m2_t vmaxu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m4))) +vuint64m4_t vmaxu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m4_m))) +vuint64m4_t vmaxu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m8))) +vuint64m8_t vmaxu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vv_u64m8_m))) +vuint64m8_t vmaxu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m1))) +vuint8m1_t vmaxu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m1_m))) +vuint8m1_t vmaxu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m2))) +vuint8m2_t vmaxu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m2_m))) +vuint8m2_t vmaxu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m4))) +vuint8m4_t vmaxu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m4_m))) +vuint8m4_t vmaxu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m8))) +vuint8m8_t vmaxu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8m8_m))) +vuint8m8_t vmaxu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8mf2))) +vuint8mf2_t vmaxu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8mf2_m))) +vuint8mf2_t vmaxu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8mf4))) +vuint8mf4_t vmaxu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8mf4_m))) +vuint8mf4_t vmaxu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8mf8))) +vuint8mf8_t vmaxu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u8mf8_m))) +vuint8mf8_t vmaxu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m1))) +vuint16m1_t vmaxu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m1_m))) +vuint16m1_t vmaxu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m2))) +vuint16m2_t vmaxu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m2_m))) +vuint16m2_t vmaxu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m4))) +vuint16m4_t vmaxu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m4_m))) +vuint16m4_t vmaxu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m8))) +vuint16m8_t vmaxu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16m8_m))) +vuint16m8_t vmaxu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16mf2))) +vuint16mf2_t vmaxu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16mf2_m))) +vuint16mf2_t vmaxu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16mf4))) +vuint16mf4_t vmaxu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u16mf4_m))) +vuint16mf4_t vmaxu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m1))) +vuint32m1_t vmaxu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m1_m))) +vuint32m1_t vmaxu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m2))) +vuint32m2_t vmaxu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m2_m))) +vuint32m2_t vmaxu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m4))) +vuint32m4_t vmaxu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m4_m))) +vuint32m4_t vmaxu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m8))) +vuint32m8_t vmaxu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32m8_m))) +vuint32m8_t vmaxu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32mf2))) +vuint32mf2_t vmaxu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u32mf2_m))) +vuint32mf2_t vmaxu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m1))) +vuint64m1_t vmaxu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m1_m))) +vuint64m1_t vmaxu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m2))) +vuint64m2_t vmaxu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m2_m))) +vuint64m2_t vmaxu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m4))) +vuint64m4_t vmaxu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m4_m))) +vuint64m4_t vmaxu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m8))) +vuint64m8_t vmaxu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmaxu_vx_u64m8_m))) +vuint64m8_t vmaxu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m1))) +vint8m1_t vmax(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m1_m))) +vint8m1_t vmax(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m2))) +vint8m2_t vmax(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m2_m))) +vint8m2_t vmax(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m4))) +vint8m4_t vmax(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m4_m))) +vint8m4_t vmax(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m8))) +vint8m8_t vmax(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8m8_m))) +vint8m8_t vmax(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8mf2))) +vint8mf2_t vmax(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8mf2_m))) +vint8mf2_t vmax(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8mf4))) +vint8mf4_t vmax(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8mf4_m))) +vint8mf4_t vmax(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8mf8))) +vint8mf8_t vmax(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i8mf8_m))) +vint8mf8_t vmax(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m1))) +vint16m1_t vmax(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m1_m))) +vint16m1_t vmax(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m2))) +vint16m2_t vmax(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m2_m))) +vint16m2_t vmax(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m4))) +vint16m4_t vmax(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m4_m))) +vint16m4_t vmax(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m8))) +vint16m8_t vmax(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16m8_m))) +vint16m8_t vmax(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16mf2))) +vint16mf2_t vmax(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16mf2_m))) +vint16mf2_t vmax(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16mf4))) +vint16mf4_t vmax(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i16mf4_m))) +vint16mf4_t vmax(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m1))) +vint32m1_t vmax(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m1_m))) +vint32m1_t vmax(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m2))) +vint32m2_t vmax(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m2_m))) +vint32m2_t vmax(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m4))) +vint32m4_t vmax(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m4_m))) +vint32m4_t vmax(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m8))) +vint32m8_t vmax(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32m8_m))) +vint32m8_t vmax(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32mf2))) +vint32mf2_t vmax(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i32mf2_m))) +vint32mf2_t vmax(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m1))) +vint64m1_t vmax(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m1_m))) +vint64m1_t vmax(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m2))) +vint64m2_t vmax(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m2_m))) +vint64m2_t vmax(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m4))) +vint64m4_t vmax(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m4_m))) +vint64m4_t vmax(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m8))) +vint64m8_t vmax(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vv_i64m8_m))) +vint64m8_t vmax(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16m1))) +void vsuxei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16m1_m))) +void vsuxei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16m2))) +void vsuxei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16m2_m))) +void vsuxei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16mf2))) +void vsuxei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16mf2_m))) +void vsuxei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16mf4))) +void vsuxei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u16mf4_m))) +void vsuxei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m1))) +vint8m1_t vmax(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m1_m))) +vint8m1_t vmax(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m2))) +vint8m2_t vmax(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m2_m))) +vint8m2_t vmax(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m4))) +vint8m4_t vmax(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m4_m))) +vint8m4_t vmax(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m8))) +vint8m8_t vmax(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8m8_m))) +vint8m8_t vmax(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8mf2))) +vint8mf2_t vmax(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8mf2_m))) +vint8mf2_t vmax(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8mf4))) +vint8mf4_t vmax(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8mf4_m))) +vint8mf4_t vmax(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8mf8))) +vint8mf8_t vmax(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i8mf8_m))) +vint8mf8_t vmax(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m1))) +vint16m1_t vmax(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m1_m))) +vint16m1_t vmax(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m2))) +vint16m2_t vmax(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m2_m))) +vint16m2_t vmax(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m4))) +vint16m4_t vmax(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m4_m))) +vint16m4_t vmax(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m8))) +vint16m8_t vmax(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16m8_m))) +vint16m8_t vmax(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16mf2))) +vint16mf2_t vmax(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16mf2_m))) +vint16mf2_t vmax(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16mf4))) +vint16mf4_t vmax(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i16mf4_m))) +vint16mf4_t vmax(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m1))) +vint32m1_t vmax(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m1_m))) +vint32m1_t vmax(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m2))) +vint32m2_t vmax(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m2_m))) +vint32m2_t vmax(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m4))) +vint32m4_t vmax(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m4_m))) +vint32m4_t vmax(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m8))) +vint32m8_t vmax(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32m8_m))) +vint32m8_t vmax(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32mf2))) +vint32mf2_t vmax(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i32mf2_m))) +vint32mf2_t vmax(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m1))) +vint64m1_t vmax(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m1_m))) +vint64m1_t vmax(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m2))) +vint64m2_t vmax(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m2_m))) +vint64m2_t vmax(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m4))) +vint64m4_t vmax(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m4_m))) +vint64m4_t vmax(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m8))) +vint64m8_t vmax(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmax_vx_i64m8_m))) +vint64m8_t vmax(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m1))) +vint8m1_t vmul(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m1_m))) +vint8m1_t vmul(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m2))) +vint8m2_t vmul(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m2_m))) +vint8m2_t vmul(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m4))) +vint8m4_t vmul(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m4_m))) +vint8m4_t vmul(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m8))) +vint8m8_t vmul(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8m8_m))) +vint8m8_t vmul(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8mf2))) +vint8mf2_t vmul(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8mf2_m))) +vint8mf2_t vmul(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8mf4))) +vint8mf4_t vmul(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8mf4_m))) +vint8mf4_t vmul(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8mf8))) +vint8mf8_t vmul(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i8mf8_m))) +vint8mf8_t vmul(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m1))) +vint16m1_t vmul(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m1_m))) +vint16m1_t vmul(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m2))) +vint16m2_t vmul(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m2_m))) +vint16m2_t vmul(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m4))) +vint16m4_t vmul(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m4_m))) +vint16m4_t vmul(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m8))) +vint16m8_t vmul(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16m8_m))) +vint16m8_t vmul(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16mf2))) +vint16mf2_t vmul(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16mf2_m))) +vint16mf2_t vmul(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16mf4))) +vint16mf4_t vmul(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i16mf4_m))) +vint16mf4_t vmul(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m1))) +vint32m1_t vmul(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m1_m))) +vint32m1_t vmul(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m2))) +vint32m2_t vmul(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m2_m))) +vint32m2_t vmul(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m4))) +vint32m4_t vmul(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m4_m))) +vint32m4_t vmul(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m8))) +vint32m8_t vmul(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32m8_m))) +vint32m8_t vmul(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32mf2))) +vint32mf2_t vmul(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i32mf2_m))) +vint32mf2_t vmul(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m1))) +vint64m1_t vmul(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m1_m))) +vint64m1_t vmul(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m2))) +vint64m2_t vmul(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m2_m))) +vint64m2_t vmul(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m4))) +vint64m4_t vmul(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m4_m))) +vint64m4_t vmul(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m8))) +vint64m8_t vmul(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_i64m8_m))) +vint64m8_t vmul(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m1))) +vint8m1_t vmul(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m1_m))) +vint8m1_t vmul(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m2))) +vint8m2_t vmul(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m2_m))) +vint8m2_t vmul(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m4))) +vint8m4_t vmul(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m4_m))) +vint8m4_t vmul(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m8))) +vint8m8_t vmul(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8m8_m))) +vint8m8_t vmul(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8mf2))) +vint8mf2_t vmul(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8mf2_m))) +vint8mf2_t vmul(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8mf4))) +vint8mf4_t vmul(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8mf4_m))) +vint8mf4_t vmul(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8mf8))) +vint8mf8_t vmul(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i8mf8_m))) +vint8mf8_t vmul(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m1))) +vint16m1_t vmul(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m1_m))) +vint16m1_t vmul(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m2))) +vint16m2_t vmul(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m2_m))) +vint16m2_t vmul(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m4))) +vint16m4_t vmul(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m4_m))) +vint16m4_t vmul(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m8))) +vint16m8_t vmul(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16m8_m))) +vint16m8_t vmul(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16mf2))) +vint16mf2_t vmul(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16mf2_m))) +vint16mf2_t vmul(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16mf4))) +vint16mf4_t vmul(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i16mf4_m))) +vint16mf4_t vmul(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m1))) +vint32m1_t vmul(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m1_m))) +vint32m1_t vmul(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m2))) +vint32m2_t vmul(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m2_m))) +vint32m2_t vmul(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m4))) +vint32m4_t vmul(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m4_m))) +vint32m4_t vmul(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m8))) +vint32m8_t vmul(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32m8_m))) +vint32m8_t vmul(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32mf2))) +vint32mf2_t vmul(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i32mf2_m))) +vint32mf2_t vmul(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m1))) +vint64m1_t vmul(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m1_m))) +vint64m1_t vmul(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m2))) +vint64m2_t vmul(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m2_m))) +vint64m2_t vmul(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m4))) +vint64m4_t vmul(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m4_m))) +vint64m4_t vmul(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m8))) +vint64m8_t vmul(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_i64m8_m))) +vint64m8_t vmul(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m1))) +vuint8m1_t vmul(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m1_m))) +vuint8m1_t vmul(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m2))) +vuint8m2_t vmul(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m2_m))) +vuint8m2_t vmul(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m4))) +vuint8m4_t vmul(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m4_m))) +vuint8m4_t vmul(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m8))) +vuint8m8_t vmul(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8m8_m))) +vuint8m8_t vmul(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8mf2))) +vuint8mf2_t vmul(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8mf2_m))) +vuint8mf2_t vmul(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8mf4))) +vuint8mf4_t vmul(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8mf4_m))) +vuint8mf4_t vmul(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8mf8))) +vuint8mf8_t vmul(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u8mf8_m))) +vuint8mf8_t vmul(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m1))) +vuint16m1_t vmul(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m1_m))) +vuint16m1_t vmul(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m2))) +vuint16m2_t vmul(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m2_m))) +vuint16m2_t vmul(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m4))) +vuint16m4_t vmul(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m4_m))) +vuint16m4_t vmul(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m8))) +vuint16m8_t vmul(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16m8_m))) +vuint16m8_t vmul(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16mf2))) +vuint16mf2_t vmul(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16mf2_m))) +vuint16mf2_t vmul(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16mf4))) +vuint16mf4_t vmul(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u16mf4_m))) +vuint16mf4_t vmul(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m1))) +vuint32m1_t vmul(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m1_m))) +vuint32m1_t vmul(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m2))) +vuint32m2_t vmul(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m2_m))) +vuint32m2_t vmul(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m4))) +vuint32m4_t vmul(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m4_m))) +vuint32m4_t vmul(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m8))) +vuint32m8_t vmul(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32m8_m))) +vuint32m8_t vmul(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32mf2))) +vuint32mf2_t vmul(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u32mf2_m))) +vuint32mf2_t vmul(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m1))) +vuint64m1_t vmul(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m1_m))) +vuint64m1_t vmul(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m2))) +vuint64m2_t vmul(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m2_m))) +vuint64m2_t vmul(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m4))) +vuint64m4_t vmul(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m4_m))) +vuint64m4_t vmul(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m8))) +vuint64m8_t vmul(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vv_u64m8_m))) +vuint64m8_t vmul(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m1))) +vuint8m1_t vmul(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m1_m))) +vuint8m1_t vmul(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m2))) +vuint8m2_t vmul(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m2_m))) +vuint8m2_t vmul(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m4))) +vuint8m4_t vmul(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m4_m))) +vuint8m4_t vmul(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m8))) +vuint8m8_t vmul(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8m8_m))) +vuint8m8_t vmul(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8mf2))) +vuint8mf2_t vmul(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8mf2_m))) +vuint8mf2_t vmul(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8mf4))) +vuint8mf4_t vmul(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8mf4_m))) +vuint8mf4_t vmul(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8mf8))) +vuint8mf8_t vmul(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u8mf8_m))) +vuint8mf8_t vmul(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m1))) +vuint16m1_t vmul(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m1_m))) +vuint16m1_t vmul(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m2))) +vuint16m2_t vmul(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m2_m))) +vuint16m2_t vmul(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m4))) +vuint16m4_t vmul(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m4_m))) +vuint16m4_t vmul(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m8))) +vuint16m8_t vmul(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16m8_m))) +vuint16m8_t vmul(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16mf2))) +vuint16mf2_t vmul(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16mf2_m))) +vuint16mf2_t vmul(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16mf4))) +vuint16mf4_t vmul(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u16mf4_m))) +vuint16mf4_t vmul(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m1))) +vuint32m1_t vmul(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m1_m))) +vuint32m1_t vmul(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m2))) +vuint32m2_t vmul(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m2_m))) +vuint32m2_t vmul(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m4))) +vuint32m4_t vmul(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m4_m))) +vuint32m4_t vmul(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m8))) +vuint32m8_t vmul(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32m8_m))) +vuint32m8_t vmul(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32mf2))) +vuint32mf2_t vmul(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u32mf2_m))) +vuint32mf2_t vmul(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m1))) +vuint64m1_t vmul(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m1_m))) +vuint64m1_t vmul(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m2))) +vuint64m2_t vmul(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m2_m))) +vuint64m2_t vmul(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m4))) +vuint64m4_t vmul(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m4_m))) +vuint64m4_t vmul(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m8))) +vuint64m8_t vmul(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmul_vx_u64m8_m))) +vuint64m8_t vmul(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m1))) +vint8m1_t vmulh(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m1_m))) +vint8m1_t vmulh(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m2))) +vint8m2_t vmulh(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m2_m))) +vint8m2_t vmulh(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m4))) +vint8m4_t vmulh(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m4_m))) +vint8m4_t vmulh(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m8))) +vint8m8_t vmulh(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8m8_m))) +vint8m8_t vmulh(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8mf2))) +vint8mf2_t vmulh(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8mf2_m))) +vint8mf2_t vmulh(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8mf4))) +vint8mf4_t vmulh(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8mf4_m))) +vint8mf4_t vmulh(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8mf8))) +vint8mf8_t vmulh(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i8mf8_m))) +vint8mf8_t vmulh(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m1))) +vint16m1_t vmulh(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m1_m))) +vint16m1_t vmulh(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m2))) +vint16m2_t vmulh(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m2_m))) +vint16m2_t vmulh(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m4))) +vint16m4_t vmulh(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m4_m))) +vint16m4_t vmulh(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m8))) +vint16m8_t vmulh(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16m8_m))) +vint16m8_t vmulh(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16mf2))) +vint16mf2_t vmulh(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16mf2_m))) +vint16mf2_t vmulh(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16mf4))) +vint16mf4_t vmulh(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i16mf4_m))) +vint16mf4_t vmulh(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m1))) +vint32m1_t vmulh(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m1_m))) +vint32m1_t vmulh(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m2))) +vint32m2_t vmulh(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m2_m))) +vint32m2_t vmulh(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m4))) +vint32m4_t vmulh(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m4_m))) +vint32m4_t vmulh(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m8))) +vint32m8_t vmulh(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32m8_m))) +vint32m8_t vmulh(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32mf2))) +vint32mf2_t vmulh(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i32mf2_m))) +vint32mf2_t vmulh(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m1))) +vint64m1_t vmulh(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m1_m))) +vint64m1_t vmulh(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m2))) +vint64m2_t vmulh(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m2_m))) +vint64m2_t vmulh(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m4))) +vint64m4_t vmulh(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m4_m))) +vint64m4_t vmulh(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m8))) +vint64m8_t vmulh(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vv_i64m8_m))) +vint64m8_t vmulh(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m1))) +vint8m1_t vmulh(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m1_m))) +vint8m1_t vmulh(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m2))) +vint8m2_t vmulh(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m2_m))) +vint8m2_t vmulh(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m4))) +vint8m4_t vmulh(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m4_m))) +vint8m4_t vmulh(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m8))) +vint8m8_t vmulh(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8m8_m))) +vint8m8_t vmulh(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8mf2))) +vint8mf2_t vmulh(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8mf2_m))) +vint8mf2_t vmulh(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8mf4))) +vint8mf4_t vmulh(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8mf4_m))) +vint8mf4_t vmulh(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8mf8))) +vint8mf8_t vmulh(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i8mf8_m))) +vint8mf8_t vmulh(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m1))) +vint16m1_t vmulh(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m1_m))) +vint16m1_t vmulh(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m2))) +vint16m2_t vmulh(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m2_m))) +vint16m2_t vmulh(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m4))) +vint16m4_t vmulh(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m4_m))) +vint16m4_t vmulh(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m8))) +vint16m8_t vmulh(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16m8_m))) +vint16m8_t vmulh(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16mf2))) +vint16mf2_t vmulh(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16mf2_m))) +vint16mf2_t vmulh(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16mf4))) +vint16mf4_t vmulh(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i16mf4_m))) +vint16mf4_t vmulh(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m1))) +vint32m1_t vmulh(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m1_m))) +vint32m1_t vmulh(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m2))) +vint32m2_t vmulh(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m2_m))) +vint32m2_t vmulh(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m4))) +vint32m4_t vmulh(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m4_m))) +vint32m4_t vmulh(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m8))) +vint32m8_t vmulh(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32m8_m))) +vint32m8_t vmulh(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32mf2))) +vint32mf2_t vmulh(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i32mf2_m))) +vint32mf2_t vmulh(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m1))) +vint64m1_t vmulh(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m1_m))) +vint64m1_t vmulh(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m2))) +vint64m2_t vmulh(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m2_m))) +vint64m2_t vmulh(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m4))) +vint64m4_t vmulh(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m4_m))) +vint64m4_t vmulh(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m8))) +vint64m8_t vmulh(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulh_vx_i64m8_m))) +vint64m8_t vmulh(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m1))) +vuint8m1_t vmulhu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m1_m))) +vuint8m1_t vmulhu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m2))) +vuint8m2_t vmulhu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m2_m))) +vuint8m2_t vmulhu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m4))) +vuint8m4_t vmulhu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m4_m))) +vuint8m4_t vmulhu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m8))) +vuint8m8_t vmulhu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8m8_m))) +vuint8m8_t vmulhu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8mf2))) +vuint8mf2_t vmulhu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8mf2_m))) +vuint8mf2_t vmulhu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8mf4))) +vuint8mf4_t vmulhu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8mf4_m))) +vuint8mf4_t vmulhu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8mf8))) +vuint8mf8_t vmulhu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u8mf8_m))) +vuint8mf8_t vmulhu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m1))) +vuint16m1_t vmulhu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m1_m))) +vuint16m1_t vmulhu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m2))) +vuint16m2_t vmulhu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m2_m))) +vuint16m2_t vmulhu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m4))) +vuint16m4_t vmulhu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m4_m))) +vuint16m4_t vmulhu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m8))) +vuint16m8_t vmulhu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16m8_m))) +vuint16m8_t vmulhu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16mf2))) +vuint16mf2_t vmulhu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16mf2_m))) +vuint16mf2_t vmulhu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16mf4))) +vuint16mf4_t vmulhu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u16mf4_m))) +vuint16mf4_t vmulhu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m1))) +vuint32m1_t vmulhu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m1_m))) +vuint32m1_t vmulhu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m2))) +vuint32m2_t vmulhu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m2_m))) +vuint32m2_t vmulhu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m4))) +vuint32m4_t vmulhu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m4_m))) +vuint32m4_t vmulhu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m8))) +vuint32m8_t vmulhu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32m8_m))) +vuint32m8_t vmulhu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32mf2))) +vuint32mf2_t vmulhu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u32mf2_m))) +vuint32mf2_t vmulhu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m1))) +vuint64m1_t vmulhu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m1_m))) +vuint64m1_t vmulhu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m2))) +vuint64m2_t vmulhu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m2_m))) +vuint64m2_t vmulhu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m4))) +vuint64m4_t vmulhu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m4_m))) +vuint64m4_t vmulhu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m8))) +vuint64m8_t vmulhu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vv_u64m8_m))) +vuint64m8_t vmulhu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m1))) +vuint8m1_t vmulhu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m1_m))) +vuint8m1_t vmulhu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m2))) +vuint8m2_t vmulhu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m2_m))) +vuint8m2_t vmulhu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m4))) +vuint8m4_t vmulhu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m4_m))) +vuint8m4_t vmulhu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m8))) +vuint8m8_t vmulhu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8m8_m))) +vuint8m8_t vmulhu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8mf2))) +vuint8mf2_t vmulhu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8mf2_m))) +vuint8mf2_t vmulhu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8mf4))) +vuint8mf4_t vmulhu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8mf4_m))) +vuint8mf4_t vmulhu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8mf8))) +vuint8mf8_t vmulhu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u8mf8_m))) +vuint8mf8_t vmulhu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m1))) +vuint16m1_t vmulhu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m1_m))) +vuint16m1_t vmulhu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m2))) +vuint16m2_t vmulhu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m2_m))) +vuint16m2_t vmulhu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m4))) +vuint16m4_t vmulhu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m4_m))) +vuint16m4_t vmulhu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m8))) +vuint16m8_t vmulhu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16m8_m))) +vuint16m8_t vmulhu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16mf2))) +vuint16mf2_t vmulhu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16mf2_m))) +vuint16mf2_t vmulhu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16mf4))) +vuint16mf4_t vmulhu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u16mf4_m))) +vuint16mf4_t vmulhu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m1))) +vuint32m1_t vmulhu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m1_m))) +vuint32m1_t vmulhu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m2))) +vuint32m2_t vmulhu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m2_m))) +vuint32m2_t vmulhu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m4))) +vuint32m4_t vmulhu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m4_m))) +vuint32m4_t vmulhu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m8))) +vuint32m8_t vmulhu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32m8_m))) +vuint32m8_t vmulhu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32mf2))) +vuint32mf2_t vmulhu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u32mf2_m))) +vuint32mf2_t vmulhu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m1))) +vuint64m1_t vmulhu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m1_m))) +vuint64m1_t vmulhu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m2))) +vuint64m2_t vmulhu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m2_m))) +vuint64m2_t vmulhu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m4))) +vuint64m4_t vmulhu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m4_m))) +vuint64m4_t vmulhu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m8))) +vuint64m8_t vmulhu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhu_vx_u64m8_m))) +vuint64m8_t vmulhu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m1))) +vint8m1_t vmulhsu(vint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m1_m))) +vint8m1_t vmulhsu(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m2))) +vint8m2_t vmulhsu(vint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m2_m))) +vint8m2_t vmulhsu(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m4))) +vint8m4_t vmulhsu(vint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m4_m))) +vint8m4_t vmulhsu(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m8))) +vint8m8_t vmulhsu(vint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8m8_m))) +vint8m8_t vmulhsu(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8mf2))) +vint8mf2_t vmulhsu(vint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8mf2_m))) +vint8mf2_t vmulhsu(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8mf4))) +vint8mf4_t vmulhsu(vint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8mf4_m))) +vint8mf4_t vmulhsu(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8mf8))) +vint8mf8_t vmulhsu(vint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i8mf8_m))) +vint8mf8_t vmulhsu(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m1))) +vint16m1_t vmulhsu(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m1_m))) +vint16m1_t vmulhsu(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m2))) +vint16m2_t vmulhsu(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m2_m))) +vint16m2_t vmulhsu(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m4))) +vint16m4_t vmulhsu(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m4_m))) +vint16m4_t vmulhsu(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m8))) +vint16m8_t vmulhsu(vint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16m8_m))) +vint16m8_t vmulhsu(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16mf2))) +vint16mf2_t vmulhsu(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16mf2_m))) +vint16mf2_t vmulhsu(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16mf4))) +vint16mf4_t vmulhsu(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i16mf4_m))) +vint16mf4_t vmulhsu(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m1))) +vint32m1_t vmulhsu(vint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m1_m))) +vint32m1_t vmulhsu(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m2))) +vint32m2_t vmulhsu(vint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m2_m))) +vint32m2_t vmulhsu(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m4))) +vint32m4_t vmulhsu(vint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m4_m))) +vint32m4_t vmulhsu(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m8))) +vint32m8_t vmulhsu(vint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32m8_m))) +vint32m8_t vmulhsu(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32mf2))) +vint32mf2_t vmulhsu(vint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i32mf2_m))) +vint32mf2_t vmulhsu(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m1))) +vint64m1_t vmulhsu(vint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m1_m))) +vint64m1_t vmulhsu(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m2))) +vint64m2_t vmulhsu(vint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m2_m))) +vint64m2_t vmulhsu(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m4))) +vint64m4_t vmulhsu(vint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m4_m))) +vint64m4_t vmulhsu(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m8))) +vint64m8_t vmulhsu(vint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vv_i64m8_m))) +vint64m8_t vmulhsu(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m1))) +void vsuxei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m1_m))) +void vsuxei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m2))) +void vsuxei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m2_m))) +void vsuxei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m4))) +void vsuxei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m4_m))) +void vsuxei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m8))) +void vsuxei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32m8_m))) +void vsuxei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32mf2))) +void vsuxei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i32mf2_m))) +void vsuxei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m1))) +vint8m1_t vmulhsu(vint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m1_m))) +vint8m1_t vmulhsu(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m2))) +vint8m2_t vmulhsu(vint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m2_m))) +vint8m2_t vmulhsu(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m4))) +vint8m4_t vmulhsu(vint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m4_m))) +vint8m4_t vmulhsu(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m8))) +vint8m8_t vmulhsu(vint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8m8_m))) +vint8m8_t vmulhsu(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8mf2))) +vint8mf2_t vmulhsu(vint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8mf2_m))) +vint8mf2_t vmulhsu(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8mf4))) +vint8mf4_t vmulhsu(vint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8mf4_m))) +vint8mf4_t vmulhsu(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8mf8))) +vint8mf8_t vmulhsu(vint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i8mf8_m))) +vint8mf8_t vmulhsu(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m1))) +vint16m1_t vmulhsu(vint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m1_m))) +vint16m1_t vmulhsu(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m2))) +vint16m2_t vmulhsu(vint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m2_m))) +vint16m2_t vmulhsu(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m4))) +vint16m4_t vmulhsu(vint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m4_m))) +vint16m4_t vmulhsu(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m8))) +vint16m8_t vmulhsu(vint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16m8_m))) +vint16m8_t vmulhsu(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16mf2))) +vint16mf2_t vmulhsu(vint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16mf2_m))) +vint16mf2_t vmulhsu(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16mf4))) +vint16mf4_t vmulhsu(vint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i16mf4_m))) +vint16mf4_t vmulhsu(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m1))) +vint32m1_t vmulhsu(vint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m1_m))) +vint32m1_t vmulhsu(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m2))) +vint32m2_t vmulhsu(vint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m2_m))) +vint32m2_t vmulhsu(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m4))) +vint32m4_t vmulhsu(vint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m4_m))) +vint32m4_t vmulhsu(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m8))) +vint32m8_t vmulhsu(vint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32m8_m))) +vint32m8_t vmulhsu(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32mf2))) +vint32mf2_t vmulhsu(vint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i32mf2_m))) +vint32mf2_t vmulhsu(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m1))) +vint64m1_t vmulhsu(vint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m1_m))) +vint64m1_t vmulhsu(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m2))) +vint64m2_t vmulhsu(vint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m2_m))) +vint64m2_t vmulhsu(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m4))) +vint64m4_t vmulhsu(vint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m4_m))) +vint64m4_t vmulhsu(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m8))) +vint64m8_t vmulhsu(vint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmulhsu_vx_i64m8_m))) +vint64m8_t vmulhsu(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m1))) +vuint8m1_t vdivu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m1_m))) +vuint8m1_t vdivu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m2))) +vuint8m2_t vdivu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m2_m))) +vuint8m2_t vdivu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m4))) +vuint8m4_t vdivu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m4_m))) +vuint8m4_t vdivu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m8))) +vuint8m8_t vdivu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8m8_m))) +vuint8m8_t vdivu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8mf2))) +vuint8mf2_t vdivu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8mf2_m))) +vuint8mf2_t vdivu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8mf4))) +vuint8mf4_t vdivu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8mf4_m))) +vuint8mf4_t vdivu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8mf8))) +vuint8mf8_t vdivu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u8mf8_m))) +vuint8mf8_t vdivu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m1))) +vuint16m1_t vdivu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m1_m))) +vuint16m1_t vdivu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m2))) +vuint16m2_t vdivu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m2_m))) +vuint16m2_t vdivu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m4))) +vuint16m4_t vdivu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m4_m))) +vuint16m4_t vdivu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m8))) +vuint16m8_t vdivu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16m8_m))) +vuint16m8_t vdivu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16mf2))) +vuint16mf2_t vdivu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16mf2_m))) +vuint16mf2_t vdivu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16mf4))) +vuint16mf4_t vdivu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u16mf4_m))) +vuint16mf4_t vdivu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m1))) +vuint32m1_t vdivu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m1_m))) +vuint32m1_t vdivu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m2))) +vuint32m2_t vdivu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m2_m))) +vuint32m2_t vdivu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m4))) +vuint32m4_t vdivu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m4_m))) +vuint32m4_t vdivu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m8))) +vuint32m8_t vdivu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32m8_m))) +vuint32m8_t vdivu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32mf2))) +vuint32mf2_t vdivu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u32mf2_m))) +vuint32mf2_t vdivu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m1))) +vuint64m1_t vdivu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m1_m))) +vuint64m1_t vdivu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m2))) +vuint64m2_t vdivu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m2_m))) +vuint64m2_t vdivu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m4))) +vuint64m4_t vdivu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m4_m))) +vuint64m4_t vdivu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m8))) +vuint64m8_t vdivu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vv_u64m8_m))) +vuint64m8_t vdivu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m1))) +vuint8m1_t vdivu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m1_m))) +vuint8m1_t vdivu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m2))) +vuint8m2_t vdivu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m2_m))) +vuint8m2_t vdivu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m4))) +vuint8m4_t vdivu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m4_m))) +vuint8m4_t vdivu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m8))) +vuint8m8_t vdivu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8m8_m))) +vuint8m8_t vdivu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8mf2))) +vuint8mf2_t vdivu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8mf2_m))) +vuint8mf2_t vdivu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8mf4))) +vuint8mf4_t vdivu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8mf4_m))) +vuint8mf4_t vdivu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8mf8))) +vuint8mf8_t vdivu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u8mf8_m))) +vuint8mf8_t vdivu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m1))) +vuint16m1_t vdivu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m1_m))) +vuint16m1_t vdivu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m2))) +vuint16m2_t vdivu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m2_m))) +vuint16m2_t vdivu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m4))) +vuint16m4_t vdivu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m4_m))) +vuint16m4_t vdivu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m8))) +vuint16m8_t vdivu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16m8_m))) +vuint16m8_t vdivu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16mf2))) +vuint16mf2_t vdivu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16mf2_m))) +vuint16mf2_t vdivu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16mf4))) +vuint16mf4_t vdivu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u16mf4_m))) +vuint16mf4_t vdivu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m1))) +vuint32m1_t vdivu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m1_m))) +vuint32m1_t vdivu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m2))) +vuint32m2_t vdivu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m2_m))) +vuint32m2_t vdivu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m4))) +vuint32m4_t vdivu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m4_m))) +vuint32m4_t vdivu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m8))) +vuint32m8_t vdivu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32m8_m))) +vuint32m8_t vdivu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32mf2))) +vuint32mf2_t vdivu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u32mf2_m))) +vuint32mf2_t vdivu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m1))) +vuint64m1_t vdivu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m1_m))) +vuint64m1_t vdivu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m2))) +vuint64m2_t vdivu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m2_m))) +vuint64m2_t vdivu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m4))) +vuint64m4_t vdivu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m4_m))) +vuint64m4_t vdivu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m8))) +vuint64m8_t vdivu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdivu_vx_u64m8_m))) +vuint64m8_t vdivu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m1))) +vint8m1_t vdiv(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m1_m))) +vint8m1_t vdiv(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m2))) +vint8m2_t vdiv(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m2_m))) +vint8m2_t vdiv(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m4))) +vint8m4_t vdiv(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m4_m))) +vint8m4_t vdiv(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m8))) +vint8m8_t vdiv(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8m8_m))) +vint8m8_t vdiv(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8mf2))) +vint8mf2_t vdiv(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8mf2_m))) +vint8mf2_t vdiv(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8mf4))) +vint8mf4_t vdiv(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8mf4_m))) +vint8mf4_t vdiv(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8mf8))) +vint8mf8_t vdiv(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i8mf8_m))) +vint8mf8_t vdiv(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m1))) +vint16m1_t vdiv(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m1_m))) +vint16m1_t vdiv(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m2))) +vint16m2_t vdiv(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m2_m))) +vint16m2_t vdiv(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m4))) +vint16m4_t vdiv(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m4_m))) +vint16m4_t vdiv(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m8))) +vint16m8_t vdiv(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16m8_m))) +vint16m8_t vdiv(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16mf2))) +vint16mf2_t vdiv(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16mf2_m))) +vint16mf2_t vdiv(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16mf4))) +vint16mf4_t vdiv(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i16mf4_m))) +vint16mf4_t vdiv(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m1))) +vint32m1_t vdiv(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m1_m))) +vint32m1_t vdiv(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m2))) +vint32m2_t vdiv(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m2_m))) +vint32m2_t vdiv(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m4))) +vint32m4_t vdiv(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m4_m))) +vint32m4_t vdiv(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m8))) +vint32m8_t vdiv(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32m8_m))) +vint32m8_t vdiv(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32mf2))) +vint32mf2_t vdiv(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i32mf2_m))) +vint32mf2_t vdiv(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m1))) +vint64m1_t vdiv(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m1_m))) +vint64m1_t vdiv(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m2))) +vint64m2_t vdiv(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m2_m))) +vint64m2_t vdiv(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m4))) +vint64m4_t vdiv(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m4_m))) +vint64m4_t vdiv(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m8))) +vint64m8_t vdiv(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vv_i64m8_m))) +vint64m8_t vdiv(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m1))) +vint8m1_t vdiv(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m1_m))) +vint8m1_t vdiv(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m2))) +vint8m2_t vdiv(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m2_m))) +vint8m2_t vdiv(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m4))) +vint8m4_t vdiv(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m4_m))) +vint8m4_t vdiv(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m8))) +vint8m8_t vdiv(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8m8_m))) +vint8m8_t vdiv(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8mf2))) +vint8mf2_t vdiv(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8mf2_m))) +vint8mf2_t vdiv(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8mf4))) +vint8mf4_t vdiv(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8mf4_m))) +vint8mf4_t vdiv(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8mf8))) +vint8mf8_t vdiv(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i8mf8_m))) +vint8mf8_t vdiv(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m1))) +vint16m1_t vdiv(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m1_m))) +vint16m1_t vdiv(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m2))) +vint16m2_t vdiv(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m2_m))) +vint16m2_t vdiv(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m4))) +vint16m4_t vdiv(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m4_m))) +vint16m4_t vdiv(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m8))) +vint16m8_t vdiv(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16m8_m))) +vint16m8_t vdiv(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16mf2))) +vint16mf2_t vdiv(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16mf2_m))) +vint16mf2_t vdiv(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16mf4))) +vint16mf4_t vdiv(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i16mf4_m))) +vint16mf4_t vdiv(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m1))) +vint32m1_t vdiv(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m1_m))) +vint32m1_t vdiv(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m2))) +vint32m2_t vdiv(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m2_m))) +vint32m2_t vdiv(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m4))) +vint32m4_t vdiv(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m4_m))) +vint32m4_t vdiv(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m8))) +vint32m8_t vdiv(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32m8_m))) +vint32m8_t vdiv(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32mf2))) +vint32mf2_t vdiv(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i32mf2_m))) +vint32mf2_t vdiv(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m1))) +vint64m1_t vdiv(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m1_m))) +vint64m1_t vdiv(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m2))) +vint64m2_t vdiv(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m2_m))) +vint64m2_t vdiv(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m4))) +vint64m4_t vdiv(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m4_m))) +vint64m4_t vdiv(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m8))) +vint64m8_t vdiv(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vdiv_vx_i64m8_m))) +vint64m8_t vdiv(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m1))) +vuint8m1_t vremu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m1_m))) +vuint8m1_t vremu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m2))) +vuint8m2_t vremu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m2_m))) +vuint8m2_t vremu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m4))) +vuint8m4_t vremu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m4_m))) +vuint8m4_t vremu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m8))) +vuint8m8_t vremu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8m8_m))) +vuint8m8_t vremu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8mf2))) +vuint8mf2_t vremu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8mf2_m))) +vuint8mf2_t vremu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8mf4))) +vuint8mf4_t vremu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8mf4_m))) +vuint8mf4_t vremu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8mf8))) +vuint8mf8_t vremu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u8mf8_m))) +vuint8mf8_t vremu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m1))) +vuint16m1_t vremu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m1_m))) +vuint16m1_t vremu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m2))) +vuint16m2_t vremu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m2_m))) +vuint16m2_t vremu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m4))) +vuint16m4_t vremu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m4_m))) +vuint16m4_t vremu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m8))) +vuint16m8_t vremu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16m8_m))) +vuint16m8_t vremu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16mf2))) +vuint16mf2_t vremu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16mf2_m))) +vuint16mf2_t vremu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16mf4))) +vuint16mf4_t vremu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u16mf4_m))) +vuint16mf4_t vremu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m1))) +vuint32m1_t vremu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m1_m))) +vuint32m1_t vremu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m2))) +vuint32m2_t vremu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m2_m))) +vuint32m2_t vremu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m4))) +vuint32m4_t vremu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m4_m))) +vuint32m4_t vremu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m8))) +vuint32m8_t vremu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32m8_m))) +vuint32m8_t vremu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32mf2))) +vuint32mf2_t vremu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u32mf2_m))) +vuint32mf2_t vremu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m1))) +vuint64m1_t vremu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m1_m))) +vuint64m1_t vremu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m2))) +vuint64m2_t vremu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m2_m))) +vuint64m2_t vremu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m4))) +vuint64m4_t vremu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m4_m))) +vuint64m4_t vremu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m8))) +vuint64m8_t vremu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vv_u64m8_m))) +vuint64m8_t vremu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m1))) +vuint8m1_t vremu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m1_m))) +vuint8m1_t vremu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m2))) +vuint8m2_t vremu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m2_m))) +vuint8m2_t vremu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m4))) +vuint8m4_t vremu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m4_m))) +vuint8m4_t vremu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m8))) +vuint8m8_t vremu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8m8_m))) +vuint8m8_t vremu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8mf2))) +vuint8mf2_t vremu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8mf2_m))) +vuint8mf2_t vremu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8mf4))) +vuint8mf4_t vremu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8mf4_m))) +vuint8mf4_t vremu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8mf8))) +vuint8mf8_t vremu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u8mf8_m))) +vuint8mf8_t vremu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m1))) +vuint16m1_t vremu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m1_m))) +vuint16m1_t vremu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m2))) +vuint16m2_t vremu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m2_m))) +vuint16m2_t vremu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m4))) +vuint16m4_t vremu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m4_m))) +vuint16m4_t vremu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m8))) +vuint16m8_t vremu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16m8_m))) +vuint16m8_t vremu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16mf2))) +vuint16mf2_t vremu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16mf2_m))) +vuint16mf2_t vremu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16mf4))) +vuint16mf4_t vremu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u16mf4_m))) +vuint16mf4_t vremu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m1))) +vuint32m1_t vremu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m1_m))) +vuint32m1_t vremu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m2))) +vuint32m2_t vremu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m2_m))) +vuint32m2_t vremu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m4))) +vuint32m4_t vremu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m4_m))) +vuint32m4_t vremu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m8))) +vuint32m8_t vremu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32m8_m))) +vuint32m8_t vremu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32mf2))) +vuint32mf2_t vremu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u32mf2_m))) +vuint32mf2_t vremu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m1))) +vuint64m1_t vremu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m1_m))) +vuint64m1_t vremu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m2))) +vuint64m2_t vremu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m2_m))) +vuint64m2_t vremu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m4))) +vuint64m4_t vremu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m4_m))) +vuint64m4_t vremu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m8))) +vuint64m8_t vremu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vremu_vx_u64m8_m))) +vuint64m8_t vremu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m1))) +vint8m1_t vrem(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m1_m))) +vint8m1_t vrem(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m2))) +vint8m2_t vrem(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m2_m))) +vint8m2_t vrem(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m4))) +vint8m4_t vrem(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m4_m))) +vint8m4_t vrem(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m8))) +vint8m8_t vrem(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8m8_m))) +vint8m8_t vrem(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8mf2))) +vint8mf2_t vrem(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8mf2_m))) +vint8mf2_t vrem(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8mf4))) +vint8mf4_t vrem(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8mf4_m))) +vint8mf4_t vrem(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8mf8))) +vint8mf8_t vrem(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i8mf8_m))) +vint8mf8_t vrem(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m1))) +vint16m1_t vrem(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m1_m))) +vint16m1_t vrem(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m2))) +vint16m2_t vrem(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m2_m))) +vint16m2_t vrem(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m4))) +vint16m4_t vrem(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m4_m))) +vint16m4_t vrem(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m8))) +vint16m8_t vrem(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16m8_m))) +vint16m8_t vrem(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16mf2))) +vint16mf2_t vrem(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16mf2_m))) +vint16mf2_t vrem(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16mf4))) +vint16mf4_t vrem(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i16mf4_m))) +vint16mf4_t vrem(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m1))) +vint32m1_t vrem(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m1_m))) +vint32m1_t vrem(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m2))) +vint32m2_t vrem(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m2_m))) +vint32m2_t vrem(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m4))) +vint32m4_t vrem(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m4_m))) +vint32m4_t vrem(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m8))) +vint32m8_t vrem(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32m8_m))) +vint32m8_t vrem(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32mf2))) +vint32mf2_t vrem(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i32mf2_m))) +vint32mf2_t vrem(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m1))) +vint64m1_t vrem(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m1_m))) +vint64m1_t vrem(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m2))) +vint64m2_t vrem(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m2_m))) +vint64m2_t vrem(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m4))) +vint64m4_t vrem(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m4_m))) +vint64m4_t vrem(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m8))) +vint64m8_t vrem(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vv_i64m8_m))) +vint64m8_t vrem(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m1))) +vint8m1_t vrem(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m1_m))) +vint8m1_t vrem(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m2))) +vint8m2_t vrem(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m2_m))) +vint8m2_t vrem(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m4))) +vint8m4_t vrem(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m4_m))) +vint8m4_t vrem(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m8))) +vint8m8_t vrem(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8m8_m))) +vint8m8_t vrem(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8mf2))) +vint8mf2_t vrem(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8mf2_m))) +vint8mf2_t vrem(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8mf4))) +vint8mf4_t vrem(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8mf4_m))) +vint8mf4_t vrem(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8mf8))) +vint8mf8_t vrem(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i8mf8_m))) +vint8mf8_t vrem(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m1))) +vint16m1_t vrem(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m1_m))) +vint16m1_t vrem(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m2))) +vint16m2_t vrem(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m2_m))) +vint16m2_t vrem(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m4))) +vint16m4_t vrem(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m4_m))) +vint16m4_t vrem(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m8))) +vint16m8_t vrem(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16m8_m))) +vint16m8_t vrem(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16mf2))) +vint16mf2_t vrem(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16mf2_m))) +vint16mf2_t vrem(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16mf4))) +vint16mf4_t vrem(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i16mf4_m))) +vint16mf4_t vrem(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m1))) +vint32m1_t vrem(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m1_m))) +vint32m1_t vrem(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m2))) +vint32m2_t vrem(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m2_m))) +vint32m2_t vrem(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m4))) +vint32m4_t vrem(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m4_m))) +vint32m4_t vrem(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m8))) +vint32m8_t vrem(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32m8_m))) +vint32m8_t vrem(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32mf2))) +vint32mf2_t vrem(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i32mf2_m))) +vint32mf2_t vrem(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m1))) +vint64m1_t vrem(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m1_m))) +vint64m1_t vrem(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m2))) +vint64m2_t vrem(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m2_m))) +vint64m2_t vrem(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m4))) +vint64m4_t vrem(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m4_m))) +vint64m4_t vrem(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m8))) +vint64m8_t vrem(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrem_vx_i64m8_m))) +vint64m8_t vrem(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16mf4))) +vint16mf4_t vwmul(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16mf4_m))) +vint16mf4_t vwmul(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16mf2))) +vint16mf2_t vwmul(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16mf2_m))) +vint16mf2_t vwmul(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m1))) +vint16m1_t vwmul(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m1_m))) +vint16m1_t vwmul(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m2))) +vint16m2_t vwmul(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m2_m))) +vint16m2_t vwmul(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m4))) +vint16m4_t vwmul(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m4_m))) +vint16m4_t vwmul(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m8))) +vint16m8_t vwmul(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i16m8_m))) +vint16m8_t vwmul(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32mf2))) +vint32mf2_t vwmul(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32mf2_m))) +vint32mf2_t vwmul(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m1))) +vint32m1_t vwmul(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m1_m))) +vint32m1_t vwmul(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m2))) +vint32m2_t vwmul(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m2_m))) +vint32m2_t vwmul(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m4))) +vint32m4_t vwmul(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m4_m))) +vint32m4_t vwmul(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m8))) +vint32m8_t vwmul(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i32m8_m))) +vint32m8_t vwmul(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m1))) +vint64m1_t vwmul(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m1_m))) +vint64m1_t vwmul(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m2))) +vint64m2_t vwmul(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m2_m))) +vint64m2_t vwmul(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m4))) +vint64m4_t vwmul(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m4_m))) +vint64m4_t vwmul(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m8))) +vint64m8_t vwmul(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vv_i64m8_m))) +vint64m8_t vwmul(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m1))) +void vsuxei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m1_m))) +void vsuxei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m2))) +void vsuxei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m2_m))) +void vsuxei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m4))) +void vsuxei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m4_m))) +void vsuxei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m8))) +void vsuxei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32m8_m))) +void vsuxei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32mf2))) +void vsuxei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u32mf2_m))) +void vsuxei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16mf4))) +vint16mf4_t vwmul(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16mf4_m))) +vint16mf4_t vwmul(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16mf2))) +vint16mf2_t vwmul(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16mf2_m))) +vint16mf2_t vwmul(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m1))) +vint16m1_t vwmul(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m1_m))) +vint16m1_t vwmul(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m2))) +vint16m2_t vwmul(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m2_m))) +vint16m2_t vwmul(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m4))) +vint16m4_t vwmul(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m4_m))) +vint16m4_t vwmul(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m8))) +vint16m8_t vwmul(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i16m8_m))) +vint16m8_t vwmul(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32mf2))) +vint32mf2_t vwmul(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32mf2_m))) +vint32mf2_t vwmul(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m1))) +vint32m1_t vwmul(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m1_m))) +vint32m1_t vwmul(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m2))) +vint32m2_t vwmul(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m2_m))) +vint32m2_t vwmul(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m4))) +vint32m4_t vwmul(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m4_m))) +vint32m4_t vwmul(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m8))) +vint32m8_t vwmul(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i32m8_m))) +vint32m8_t vwmul(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m1))) +vint64m1_t vwmul(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m1_m))) +vint64m1_t vwmul(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m2))) +vint64m2_t vwmul(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m2_m))) +vint64m2_t vwmul(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m4))) +vint64m4_t vwmul(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m4_m))) +vint64m4_t vwmul(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m8))) +vint64m8_t vwmul(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmul_vx_i64m8_m))) +vint64m8_t vwmul(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16mf4))) +vuint16mf4_t vwmulu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16mf4_m))) +vuint16mf4_t vwmulu(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16mf2))) +vuint16mf2_t vwmulu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16mf2_m))) +vuint16mf2_t vwmulu(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m1))) +vuint16m1_t vwmulu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m1_m))) +vuint16m1_t vwmulu(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m2))) +vuint16m2_t vwmulu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m2_m))) +vuint16m2_t vwmulu(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m4))) +vuint16m4_t vwmulu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m4_m))) +vuint16m4_t vwmulu(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m8))) +vuint16m8_t vwmulu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u16m8_m))) +vuint16m8_t vwmulu(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32mf2))) +vuint32mf2_t vwmulu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32mf2_m))) +vuint32mf2_t vwmulu(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m1))) +vuint32m1_t vwmulu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m1_m))) +vuint32m1_t vwmulu(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m2))) +vuint32m2_t vwmulu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m2_m))) +vuint32m2_t vwmulu(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m4))) +vuint32m4_t vwmulu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m4_m))) +vuint32m4_t vwmulu(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m8))) +vuint32m8_t vwmulu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u32m8_m))) +vuint32m8_t vwmulu(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m1))) +vuint64m1_t vwmulu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m1_m))) +vuint64m1_t vwmulu(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m2))) +vuint64m2_t vwmulu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m2_m))) +vuint64m2_t vwmulu(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m4))) +vuint64m4_t vwmulu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m4_m))) +vuint64m4_t vwmulu(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m8))) +vuint64m8_t vwmulu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vv_u64m8_m))) +vuint64m8_t vwmulu(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16mf4))) +vuint16mf4_t vwmulu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16mf4_m))) +vuint16mf4_t vwmulu(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16mf2))) +vuint16mf2_t vwmulu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16mf2_m))) +vuint16mf2_t vwmulu(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m1))) +vuint16m1_t vwmulu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m1_m))) +vuint16m1_t vwmulu(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m2))) +vuint16m2_t vwmulu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m2_m))) +vuint16m2_t vwmulu(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m4))) +vuint16m4_t vwmulu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m4_m))) +vuint16m4_t vwmulu(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m8))) +vuint16m8_t vwmulu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u16m8_m))) +vuint16m8_t vwmulu(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32mf2))) +vuint32mf2_t vwmulu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32mf2_m))) +vuint32mf2_t vwmulu(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m1))) +vuint32m1_t vwmulu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m1_m))) +vuint32m1_t vwmulu(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m2))) +vuint32m2_t vwmulu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m2_m))) +vuint32m2_t vwmulu(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m4))) +vuint32m4_t vwmulu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m4_m))) +vuint32m4_t vwmulu(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m8))) +vuint32m8_t vwmulu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u32m8_m))) +vuint32m8_t vwmulu(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m1))) +vuint64m1_t vwmulu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m1_m))) +vuint64m1_t vwmulu(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m2))) +vuint64m2_t vwmulu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m2_m))) +vuint64m2_t vwmulu(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m4))) +vuint64m4_t vwmulu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m4_m))) +vuint64m4_t vwmulu(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m8))) +vuint64m8_t vwmulu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulu_vx_u64m8_m))) +vuint64m8_t vwmulu(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16mf4))) +vint16mf4_t vwmulsu(vint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16mf4_m))) +vint16mf4_t vwmulsu(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16mf2))) +vint16mf2_t vwmulsu(vint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16mf2_m))) +vint16mf2_t vwmulsu(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m1))) +vint16m1_t vwmulsu(vint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m1_m))) +vint16m1_t vwmulsu(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m2))) +vint16m2_t vwmulsu(vint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m2_m))) +vint16m2_t vwmulsu(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m4))) +vint16m4_t vwmulsu(vint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m4_m))) +vint16m4_t vwmulsu(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m8))) +vint16m8_t vwmulsu(vint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i16m8_m))) +vint16m8_t vwmulsu(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32mf2))) +vint32mf2_t vwmulsu(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32mf2_m))) +vint32mf2_t vwmulsu(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m1))) +vint32m1_t vwmulsu(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m1_m))) +vint32m1_t vwmulsu(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m2))) +vint32m2_t vwmulsu(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m2_m))) +vint32m2_t vwmulsu(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m4))) +vint32m4_t vwmulsu(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m4_m))) +vint32m4_t vwmulsu(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m8))) +vint32m8_t vwmulsu(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i32m8_m))) +vint32m8_t vwmulsu(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m1))) +vint64m1_t vwmulsu(vint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m1_m))) +vint64m1_t vwmulsu(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m2))) +vint64m2_t vwmulsu(vint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m2_m))) +vint64m2_t vwmulsu(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m4))) +vint64m4_t vwmulsu(vint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m4_m))) +vint64m4_t vwmulsu(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m8))) +vint64m8_t vwmulsu(vint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vv_i64m8_m))) +vint64m8_t vwmulsu(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16mf4))) +vint16mf4_t vwmulsu(vint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16mf4_m))) +vint16mf4_t vwmulsu(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16mf2))) +vint16mf2_t vwmulsu(vint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16mf2_m))) +vint16mf2_t vwmulsu(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m1))) +vint16m1_t vwmulsu(vint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m1_m))) +vint16m1_t vwmulsu(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m2))) +vint16m2_t vwmulsu(vint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m2_m))) +vint16m2_t vwmulsu(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m4))) +vint16m4_t vwmulsu(vint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m4_m))) +vint16m4_t vwmulsu(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m8))) +vint16m8_t vwmulsu(vint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i16m8_m))) +vint16m8_t vwmulsu(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32mf2))) +vint32mf2_t vwmulsu(vint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32mf2_m))) +vint32mf2_t vwmulsu(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m1))) +vint32m1_t vwmulsu(vint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m1_m))) +vint32m1_t vwmulsu(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m2))) +vint32m2_t vwmulsu(vint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m2_m))) +vint32m2_t vwmulsu(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m4))) +vint32m4_t vwmulsu(vint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m4_m))) +vint32m4_t vwmulsu(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m8))) +vint32m8_t vwmulsu(vint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i32m8_m))) +vint32m8_t vwmulsu(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m1))) +vint64m1_t vwmulsu(vint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m1_m))) +vint64m1_t vwmulsu(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m2))) +vint64m2_t vwmulsu(vint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m2_m))) +vint64m2_t vwmulsu(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m4))) +vint64m4_t vwmulsu(vint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m4_m))) +vint64m4_t vwmulsu(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m8))) +vint64m8_t vwmulsu(vint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmulsu_vx_i64m8_m))) +vint64m8_t vwmulsu(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m1))) +vint8m1_t vmacc(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m1_m))) +vint8m1_t vmacc(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m2))) +vint8m2_t vmacc(vint8m2_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m2_m))) +vint8m2_t vmacc(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m4))) +vint8m4_t vmacc(vint8m4_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m4_m))) +vint8m4_t vmacc(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m8))) +vint8m8_t vmacc(vint8m8_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8m8_m))) +vint8m8_t vmacc(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8mf2))) +vint8mf2_t vmacc(vint8mf2_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8mf2_m))) +vint8mf2_t vmacc(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8mf4))) +vint8mf4_t vmacc(vint8mf4_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8mf4_m))) +vint8mf4_t vmacc(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8mf8))) +vint8mf8_t vmacc(vint8mf8_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i8mf8_m))) +vint8mf8_t vmacc(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m1))) +vint16m1_t vmacc(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m1_m))) +vint16m1_t vmacc(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m2))) +vint16m2_t vmacc(vint16m2_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m2_m))) +vint16m2_t vmacc(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m4))) +vint16m4_t vmacc(vint16m4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m4_m))) +vint16m4_t vmacc(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m8))) +vint16m8_t vmacc(vint16m8_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16m8_m))) +vint16m8_t vmacc(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16mf2))) +vint16mf2_t vmacc(vint16mf2_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16mf2_m))) +vint16mf2_t vmacc(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16mf4))) +vint16mf4_t vmacc(vint16mf4_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i16mf4_m))) +vint16mf4_t vmacc(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m1))) +vint32m1_t vmacc(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m1_m))) +vint32m1_t vmacc(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m2))) +vint32m2_t vmacc(vint32m2_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m2_m))) +vint32m2_t vmacc(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m4))) +vint32m4_t vmacc(vint32m4_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m4_m))) +vint32m4_t vmacc(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m8))) +vint32m8_t vmacc(vint32m8_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32m8_m))) +vint32m8_t vmacc(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32mf2))) +vint32mf2_t vmacc(vint32mf2_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i32mf2_m))) +vint32mf2_t vmacc(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m1))) +vint64m1_t vmacc(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m1_m))) +vint64m1_t vmacc(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m2))) +vint64m2_t vmacc(vint64m2_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m2_m))) +vint64m2_t vmacc(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m4))) +vint64m4_t vmacc(vint64m4_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m4_m))) +vint64m4_t vmacc(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m8))) +vint64m8_t vmacc(vint64m8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_i64m8_m))) +vint64m8_t vmacc(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m1))) +vint8m1_t vmacc(vint8m1_t op0, int8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m1_m))) +vint8m1_t vmacc(vbool8_t op0, vint8m1_t op1, int8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m2))) +vint8m2_t vmacc(vint8m2_t op0, int8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m2_m))) +vint8m2_t vmacc(vbool4_t op0, vint8m2_t op1, int8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m4))) +vint8m4_t vmacc(vint8m4_t op0, int8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m4_m))) +vint8m4_t vmacc(vbool2_t op0, vint8m4_t op1, int8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m8))) +vint8m8_t vmacc(vint8m8_t op0, int8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8m8_m))) +vint8m8_t vmacc(vbool1_t op0, vint8m8_t op1, int8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8mf2))) +vint8mf2_t vmacc(vint8mf2_t op0, int8_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8mf2_m))) +vint8mf2_t vmacc(vbool16_t op0, vint8mf2_t op1, int8_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8mf4))) +vint8mf4_t vmacc(vint8mf4_t op0, int8_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8mf4_m))) +vint8mf4_t vmacc(vbool32_t op0, vint8mf4_t op1, int8_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8mf8))) +vint8mf8_t vmacc(vint8mf8_t op0, int8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i8mf8_m))) +vint8mf8_t vmacc(vbool64_t op0, vint8mf8_t op1, int8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m1))) +vint16m1_t vmacc(vint16m1_t op0, int16_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m1_m))) +vint16m1_t vmacc(vbool16_t op0, vint16m1_t op1, int16_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m2))) +vint16m2_t vmacc(vint16m2_t op0, int16_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m2_m))) +vint16m2_t vmacc(vbool8_t op0, vint16m2_t op1, int16_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m4))) +vint16m4_t vmacc(vint16m4_t op0, int16_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m4_m))) +vint16m4_t vmacc(vbool4_t op0, vint16m4_t op1, int16_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m8))) +vint16m8_t vmacc(vint16m8_t op0, int16_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16m8_m))) +vint16m8_t vmacc(vbool2_t op0, vint16m8_t op1, int16_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16mf2))) +vint16mf2_t vmacc(vint16mf2_t op0, int16_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16mf2_m))) +vint16mf2_t vmacc(vbool32_t op0, vint16mf2_t op1, int16_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16mf4))) +vint16mf4_t vmacc(vint16mf4_t op0, int16_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i16mf4_m))) +vint16mf4_t vmacc(vbool64_t op0, vint16mf4_t op1, int16_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m1))) +vint32m1_t vmacc(vint32m1_t op0, int32_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m1_m))) +vint32m1_t vmacc(vbool32_t op0, vint32m1_t op1, int32_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m2))) +vint32m2_t vmacc(vint32m2_t op0, int32_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m2_m))) +vint32m2_t vmacc(vbool16_t op0, vint32m2_t op1, int32_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m4))) +vint32m4_t vmacc(vint32m4_t op0, int32_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m4_m))) +vint32m4_t vmacc(vbool8_t op0, vint32m4_t op1, int32_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m8))) +vint32m8_t vmacc(vint32m8_t op0, int32_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32m8_m))) +vint32m8_t vmacc(vbool4_t op0, vint32m8_t op1, int32_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32mf2))) +vint32mf2_t vmacc(vint32mf2_t op0, int32_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i32mf2_m))) +vint32mf2_t vmacc(vbool64_t op0, vint32mf2_t op1, int32_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m1))) +vint64m1_t vmacc(vint64m1_t op0, int64_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m1_m))) +vint64m1_t vmacc(vbool64_t op0, vint64m1_t op1, int64_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m2))) +vint64m2_t vmacc(vint64m2_t op0, int64_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m2_m))) +vint64m2_t vmacc(vbool32_t op0, vint64m2_t op1, int64_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m4))) +vint64m4_t vmacc(vint64m4_t op0, int64_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m4_m))) +vint64m4_t vmacc(vbool16_t op0, vint64m4_t op1, int64_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m8))) +vint64m8_t vmacc(vint64m8_t op0, int64_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_i64m8_m))) +vint64m8_t vmacc(vbool8_t op0, vint64m8_t op1, int64_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m1))) +vuint8m1_t vmacc(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m1_m))) +vuint8m1_t vmacc(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m2))) +vuint8m2_t vmacc(vuint8m2_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m2_m))) +vuint8m2_t vmacc(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m4))) +vuint8m4_t vmacc(vuint8m4_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m4_m))) +vuint8m4_t vmacc(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m8))) +vuint8m8_t vmacc(vuint8m8_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8m8_m))) +vuint8m8_t vmacc(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8mf2))) +vuint8mf2_t vmacc(vuint8mf2_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8mf2_m))) +vuint8mf2_t vmacc(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8mf4))) +vuint8mf4_t vmacc(vuint8mf4_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8mf4_m))) +vuint8mf4_t vmacc(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8mf8))) +vuint8mf8_t vmacc(vuint8mf8_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u8mf8_m))) +vuint8mf8_t vmacc(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m1))) +vuint16m1_t vmacc(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m1_m))) +vuint16m1_t vmacc(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m2))) +vuint16m2_t vmacc(vuint16m2_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m2_m))) +vuint16m2_t vmacc(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m4))) +vuint16m4_t vmacc(vuint16m4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m4_m))) +vuint16m4_t vmacc(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m8))) +vuint16m8_t vmacc(vuint16m8_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16m8_m))) +vuint16m8_t vmacc(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16mf2))) +vuint16mf2_t vmacc(vuint16mf2_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16mf2_m))) +vuint16mf2_t vmacc(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16mf4))) +vuint16mf4_t vmacc(vuint16mf4_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u16mf4_m))) +vuint16mf4_t vmacc(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m1))) +vuint32m1_t vmacc(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m1_m))) +vuint32m1_t vmacc(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m2))) +vuint32m2_t vmacc(vuint32m2_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m2_m))) +vuint32m2_t vmacc(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m4))) +vuint32m4_t vmacc(vuint32m4_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m4_m))) +vuint32m4_t vmacc(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m8))) +vuint32m8_t vmacc(vuint32m8_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32m8_m))) +vuint32m8_t vmacc(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32mf2))) +vuint32mf2_t vmacc(vuint32mf2_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u32mf2_m))) +vuint32mf2_t vmacc(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m1))) +vuint64m1_t vmacc(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m1_m))) +vuint64m1_t vmacc(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m2))) +vuint64m2_t vmacc(vuint64m2_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m2_m))) +vuint64m2_t vmacc(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m4))) +vuint64m4_t vmacc(vuint64m4_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m4_m))) +vuint64m4_t vmacc(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m8))) +vuint64m8_t vmacc(vuint64m8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vv_u64m8_m))) +vuint64m8_t vmacc(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m1))) +vuint8m1_t vmacc(vuint8m1_t op0, uint8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m1_m))) +vuint8m1_t vmacc(vbool8_t op0, vuint8m1_t op1, uint8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m2))) +vuint8m2_t vmacc(vuint8m2_t op0, uint8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m2_m))) +vuint8m2_t vmacc(vbool4_t op0, vuint8m2_t op1, uint8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m4))) +vuint8m4_t vmacc(vuint8m4_t op0, uint8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m4_m))) +vuint8m4_t vmacc(vbool2_t op0, vuint8m4_t op1, uint8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m8))) +vuint8m8_t vmacc(vuint8m8_t op0, uint8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8m8_m))) +vuint8m8_t vmacc(vbool1_t op0, vuint8m8_t op1, uint8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8mf2))) +vuint8mf2_t vmacc(vuint8mf2_t op0, uint8_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8mf2_m))) +vuint8mf2_t vmacc(vbool16_t op0, vuint8mf2_t op1, uint8_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8mf4))) +vuint8mf4_t vmacc(vuint8mf4_t op0, uint8_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8mf4_m))) +vuint8mf4_t vmacc(vbool32_t op0, vuint8mf4_t op1, uint8_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8mf8))) +vuint8mf8_t vmacc(vuint8mf8_t op0, uint8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u8mf8_m))) +vuint8mf8_t vmacc(vbool64_t op0, vuint8mf8_t op1, uint8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m1))) +vuint16m1_t vmacc(vuint16m1_t op0, uint16_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m1_m))) +vuint16m1_t vmacc(vbool16_t op0, vuint16m1_t op1, uint16_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m2))) +vuint16m2_t vmacc(vuint16m2_t op0, uint16_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m2_m))) +vuint16m2_t vmacc(vbool8_t op0, vuint16m2_t op1, uint16_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m4))) +vuint16m4_t vmacc(vuint16m4_t op0, uint16_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m4_m))) +vuint16m4_t vmacc(vbool4_t op0, vuint16m4_t op1, uint16_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m8))) +vuint16m8_t vmacc(vuint16m8_t op0, uint16_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16m8_m))) +vuint16m8_t vmacc(vbool2_t op0, vuint16m8_t op1, uint16_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16mf2))) +vuint16mf2_t vmacc(vuint16mf2_t op0, uint16_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16mf2_m))) +vuint16mf2_t vmacc(vbool32_t op0, vuint16mf2_t op1, uint16_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16mf4))) +vuint16mf4_t vmacc(vuint16mf4_t op0, uint16_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u16mf4_m))) +vuint16mf4_t vmacc(vbool64_t op0, vuint16mf4_t op1, uint16_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m1))) +vuint32m1_t vmacc(vuint32m1_t op0, uint32_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m1_m))) +vuint32m1_t vmacc(vbool32_t op0, vuint32m1_t op1, uint32_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m2))) +vuint32m2_t vmacc(vuint32m2_t op0, uint32_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m2_m))) +vuint32m2_t vmacc(vbool16_t op0, vuint32m2_t op1, uint32_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m4))) +vuint32m4_t vmacc(vuint32m4_t op0, uint32_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m4_m))) +vuint32m4_t vmacc(vbool8_t op0, vuint32m4_t op1, uint32_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m8))) +vuint32m8_t vmacc(vuint32m8_t op0, uint32_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32m8_m))) +vuint32m8_t vmacc(vbool4_t op0, vuint32m8_t op1, uint32_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32mf2))) +vuint32mf2_t vmacc(vuint32mf2_t op0, uint32_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u32mf2_m))) +vuint32mf2_t vmacc(vbool64_t op0, vuint32mf2_t op1, uint32_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m1))) +vuint64m1_t vmacc(vuint64m1_t op0, uint64_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m1_m))) +vuint64m1_t vmacc(vbool64_t op0, vuint64m1_t op1, uint64_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m2))) +vuint64m2_t vmacc(vuint64m2_t op0, uint64_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m2_m))) +vuint64m2_t vmacc(vbool32_t op0, vuint64m2_t op1, uint64_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m4))) +vuint64m4_t vmacc(vuint64m4_t op0, uint64_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m4_m))) +vuint64m4_t vmacc(vbool16_t op0, vuint64m4_t op1, uint64_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m8))) +vuint64m8_t vmacc(vuint64m8_t op0, uint64_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmacc_vx_u64m8_m))) +vuint64m8_t vmacc(vbool8_t op0, vuint64m8_t op1, uint64_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m1))) +vint8m1_t vnmsac(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m1_m))) +vint8m1_t vnmsac(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m2))) +vint8m2_t vnmsac(vint8m2_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m2_m))) +vint8m2_t vnmsac(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m4))) +vint8m4_t vnmsac(vint8m4_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m4_m))) +vint8m4_t vnmsac(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m8))) +vint8m8_t vnmsac(vint8m8_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8m8_m))) +vint8m8_t vnmsac(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8mf2))) +vint8mf2_t vnmsac(vint8mf2_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8mf2_m))) +vint8mf2_t vnmsac(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8mf4))) +vint8mf4_t vnmsac(vint8mf4_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8mf4_m))) +vint8mf4_t vnmsac(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8mf8))) +vint8mf8_t vnmsac(vint8mf8_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i8mf8_m))) +vint8mf8_t vnmsac(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m1))) +vint16m1_t vnmsac(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m1_m))) +vint16m1_t vnmsac(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m2))) +vint16m2_t vnmsac(vint16m2_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m2_m))) +vint16m2_t vnmsac(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m4))) +vint16m4_t vnmsac(vint16m4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m4_m))) +vint16m4_t vnmsac(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m8))) +vint16m8_t vnmsac(vint16m8_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16m8_m))) +vint16m8_t vnmsac(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16mf2))) +vint16mf2_t vnmsac(vint16mf2_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16mf2_m))) +vint16mf2_t vnmsac(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16mf4))) +vint16mf4_t vnmsac(vint16mf4_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i16mf4_m))) +vint16mf4_t vnmsac(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m1))) +vint32m1_t vnmsac(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m1_m))) +vint32m1_t vnmsac(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m2))) +vint32m2_t vnmsac(vint32m2_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m2_m))) +vint32m2_t vnmsac(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m4))) +vint32m4_t vnmsac(vint32m4_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m4_m))) +vint32m4_t vnmsac(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m8))) +vint32m8_t vnmsac(vint32m8_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32m8_m))) +vint32m8_t vnmsac(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32mf2))) +vint32mf2_t vnmsac(vint32mf2_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i32mf2_m))) +vint32mf2_t vnmsac(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m1))) +vint64m1_t vnmsac(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m1_m))) +vint64m1_t vnmsac(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m2))) +vint64m2_t vnmsac(vint64m2_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m2_m))) +vint64m2_t vnmsac(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m4))) +vint64m4_t vnmsac(vint64m4_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m4_m))) +vint64m4_t vnmsac(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m8))) +vint64m8_t vnmsac(vint64m8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_i64m8_m))) +vint64m8_t vnmsac(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m1))) +void vsuxei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m1_m))) +void vsuxei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m2))) +void vsuxei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m2_m))) +void vsuxei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m4))) +void vsuxei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m4_m))) +void vsuxei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m8))) +void vsuxei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32m8_m))) +void vsuxei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32mf2))) +void vsuxei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i32mf2_m))) +void vsuxei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m1))) +vint8m1_t vnmsac(vint8m1_t op0, int8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m1_m))) +vint8m1_t vnmsac(vbool8_t op0, vint8m1_t op1, int8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m2))) +vint8m2_t vnmsac(vint8m2_t op0, int8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m2_m))) +vint8m2_t vnmsac(vbool4_t op0, vint8m2_t op1, int8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m4))) +vint8m4_t vnmsac(vint8m4_t op0, int8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m4_m))) +vint8m4_t vnmsac(vbool2_t op0, vint8m4_t op1, int8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m8))) +vint8m8_t vnmsac(vint8m8_t op0, int8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8m8_m))) +vint8m8_t vnmsac(vbool1_t op0, vint8m8_t op1, int8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8mf2))) +vint8mf2_t vnmsac(vint8mf2_t op0, int8_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8mf2_m))) +vint8mf2_t vnmsac(vbool16_t op0, vint8mf2_t op1, int8_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8mf4))) +vint8mf4_t vnmsac(vint8mf4_t op0, int8_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8mf4_m))) +vint8mf4_t vnmsac(vbool32_t op0, vint8mf4_t op1, int8_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8mf8))) +vint8mf8_t vnmsac(vint8mf8_t op0, int8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i8mf8_m))) +vint8mf8_t vnmsac(vbool64_t op0, vint8mf8_t op1, int8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m1))) +vint16m1_t vnmsac(vint16m1_t op0, int16_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m1_m))) +vint16m1_t vnmsac(vbool16_t op0, vint16m1_t op1, int16_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m2))) +vint16m2_t vnmsac(vint16m2_t op0, int16_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m2_m))) +vint16m2_t vnmsac(vbool8_t op0, vint16m2_t op1, int16_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m4))) +vint16m4_t vnmsac(vint16m4_t op0, int16_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m4_m))) +vint16m4_t vnmsac(vbool4_t op0, vint16m4_t op1, int16_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m8))) +vint16m8_t vnmsac(vint16m8_t op0, int16_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16m8_m))) +vint16m8_t vnmsac(vbool2_t op0, vint16m8_t op1, int16_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16mf2))) +vint16mf2_t vnmsac(vint16mf2_t op0, int16_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16mf2_m))) +vint16mf2_t vnmsac(vbool32_t op0, vint16mf2_t op1, int16_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16mf4))) +vint16mf4_t vnmsac(vint16mf4_t op0, int16_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i16mf4_m))) +vint16mf4_t vnmsac(vbool64_t op0, vint16mf4_t op1, int16_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m1))) +vint32m1_t vnmsac(vint32m1_t op0, int32_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m1_m))) +vint32m1_t vnmsac(vbool32_t op0, vint32m1_t op1, int32_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m2))) +vint32m2_t vnmsac(vint32m2_t op0, int32_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m2_m))) +vint32m2_t vnmsac(vbool16_t op0, vint32m2_t op1, int32_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m4))) +vint32m4_t vnmsac(vint32m4_t op0, int32_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m4_m))) +vint32m4_t vnmsac(vbool8_t op0, vint32m4_t op1, int32_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m8))) +vint32m8_t vnmsac(vint32m8_t op0, int32_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32m8_m))) +vint32m8_t vnmsac(vbool4_t op0, vint32m8_t op1, int32_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32mf2))) +vint32mf2_t vnmsac(vint32mf2_t op0, int32_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i32mf2_m))) +vint32mf2_t vnmsac(vbool64_t op0, vint32mf2_t op1, int32_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m1))) +vint64m1_t vnmsac(vint64m1_t op0, int64_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m1_m))) +vint64m1_t vnmsac(vbool64_t op0, vint64m1_t op1, int64_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m2))) +vint64m2_t vnmsac(vint64m2_t op0, int64_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m2_m))) +vint64m2_t vnmsac(vbool32_t op0, vint64m2_t op1, int64_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m4))) +vint64m4_t vnmsac(vint64m4_t op0, int64_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m4_m))) +vint64m4_t vnmsac(vbool16_t op0, vint64m4_t op1, int64_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m8))) +vint64m8_t vnmsac(vint64m8_t op0, int64_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_i64m8_m))) +vint64m8_t vnmsac(vbool8_t op0, vint64m8_t op1, int64_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m1))) +vuint8m1_t vnmsac(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m1_m))) +vuint8m1_t vnmsac(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m2))) +vuint8m2_t vnmsac(vuint8m2_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m2_m))) +vuint8m2_t vnmsac(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m4))) +vuint8m4_t vnmsac(vuint8m4_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m4_m))) +vuint8m4_t vnmsac(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m8))) +vuint8m8_t vnmsac(vuint8m8_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8m8_m))) +vuint8m8_t vnmsac(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8mf2))) +vuint8mf2_t vnmsac(vuint8mf2_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8mf2_m))) +vuint8mf2_t vnmsac(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8mf4))) +vuint8mf4_t vnmsac(vuint8mf4_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8mf4_m))) +vuint8mf4_t vnmsac(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8mf8))) +vuint8mf8_t vnmsac(vuint8mf8_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u8mf8_m))) +vuint8mf8_t vnmsac(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m1))) +vuint16m1_t vnmsac(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m1_m))) +vuint16m1_t vnmsac(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m2))) +vuint16m2_t vnmsac(vuint16m2_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m2_m))) +vuint16m2_t vnmsac(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m4))) +vuint16m4_t vnmsac(vuint16m4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m4_m))) +vuint16m4_t vnmsac(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m8))) +vuint16m8_t vnmsac(vuint16m8_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16m8_m))) +vuint16m8_t vnmsac(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16mf2))) +vuint16mf2_t vnmsac(vuint16mf2_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16mf2_m))) +vuint16mf2_t vnmsac(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16mf4))) +vuint16mf4_t vnmsac(vuint16mf4_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u16mf4_m))) +vuint16mf4_t vnmsac(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m1))) +vuint32m1_t vnmsac(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m1_m))) +vuint32m1_t vnmsac(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m2))) +vuint32m2_t vnmsac(vuint32m2_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m2_m))) +vuint32m2_t vnmsac(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m4))) +vuint32m4_t vnmsac(vuint32m4_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m4_m))) +vuint32m4_t vnmsac(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m8))) +vuint32m8_t vnmsac(vuint32m8_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32m8_m))) +vuint32m8_t vnmsac(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32mf2))) +vuint32mf2_t vnmsac(vuint32mf2_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u32mf2_m))) +vuint32mf2_t vnmsac(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m1))) +vuint64m1_t vnmsac(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m1_m))) +vuint64m1_t vnmsac(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m2))) +vuint64m2_t vnmsac(vuint64m2_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m2_m))) +vuint64m2_t vnmsac(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m4))) +vuint64m4_t vnmsac(vuint64m4_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m4_m))) +vuint64m4_t vnmsac(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m8))) +vuint64m8_t vnmsac(vuint64m8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vv_u64m8_m))) +vuint64m8_t vnmsac(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m1))) +vuint8m1_t vnmsac(vuint8m1_t op0, uint8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m1_m))) +vuint8m1_t vnmsac(vbool8_t op0, vuint8m1_t op1, uint8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m2))) +vuint8m2_t vnmsac(vuint8m2_t op0, uint8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m2_m))) +vuint8m2_t vnmsac(vbool4_t op0, vuint8m2_t op1, uint8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m4))) +vuint8m4_t vnmsac(vuint8m4_t op0, uint8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m4_m))) +vuint8m4_t vnmsac(vbool2_t op0, vuint8m4_t op1, uint8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m8))) +vuint8m8_t vnmsac(vuint8m8_t op0, uint8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8m8_m))) +vuint8m8_t vnmsac(vbool1_t op0, vuint8m8_t op1, uint8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8mf2))) +vuint8mf2_t vnmsac(vuint8mf2_t op0, uint8_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8mf2_m))) +vuint8mf2_t vnmsac(vbool16_t op0, vuint8mf2_t op1, uint8_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8mf4))) +vuint8mf4_t vnmsac(vuint8mf4_t op0, uint8_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8mf4_m))) +vuint8mf4_t vnmsac(vbool32_t op0, vuint8mf4_t op1, uint8_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8mf8))) +vuint8mf8_t vnmsac(vuint8mf8_t op0, uint8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u8mf8_m))) +vuint8mf8_t vnmsac(vbool64_t op0, vuint8mf8_t op1, uint8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m1))) +vuint16m1_t vnmsac(vuint16m1_t op0, uint16_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m1_m))) +vuint16m1_t vnmsac(vbool16_t op0, vuint16m1_t op1, uint16_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m2))) +vuint16m2_t vnmsac(vuint16m2_t op0, uint16_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m2_m))) +vuint16m2_t vnmsac(vbool8_t op0, vuint16m2_t op1, uint16_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m4))) +vuint16m4_t vnmsac(vuint16m4_t op0, uint16_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m4_m))) +vuint16m4_t vnmsac(vbool4_t op0, vuint16m4_t op1, uint16_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m8))) +vuint16m8_t vnmsac(vuint16m8_t op0, uint16_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16m8_m))) +vuint16m8_t vnmsac(vbool2_t op0, vuint16m8_t op1, uint16_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16mf2))) +vuint16mf2_t vnmsac(vuint16mf2_t op0, uint16_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16mf2_m))) +vuint16mf2_t vnmsac(vbool32_t op0, vuint16mf2_t op1, uint16_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16mf4))) +vuint16mf4_t vnmsac(vuint16mf4_t op0, uint16_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u16mf4_m))) +vuint16mf4_t vnmsac(vbool64_t op0, vuint16mf4_t op1, uint16_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m1))) +vuint32m1_t vnmsac(vuint32m1_t op0, uint32_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m1_m))) +vuint32m1_t vnmsac(vbool32_t op0, vuint32m1_t op1, uint32_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m2))) +vuint32m2_t vnmsac(vuint32m2_t op0, uint32_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m2_m))) +vuint32m2_t vnmsac(vbool16_t op0, vuint32m2_t op1, uint32_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m4))) +vuint32m4_t vnmsac(vuint32m4_t op0, uint32_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m4_m))) +vuint32m4_t vnmsac(vbool8_t op0, vuint32m4_t op1, uint32_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m8))) +vuint32m8_t vnmsac(vuint32m8_t op0, uint32_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32m8_m))) +vuint32m8_t vnmsac(vbool4_t op0, vuint32m8_t op1, uint32_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32mf2))) +vuint32mf2_t vnmsac(vuint32mf2_t op0, uint32_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u32mf2_m))) +vuint32mf2_t vnmsac(vbool64_t op0, vuint32mf2_t op1, uint32_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m1))) +vuint64m1_t vnmsac(vuint64m1_t op0, uint64_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m1_m))) +vuint64m1_t vnmsac(vbool64_t op0, vuint64m1_t op1, uint64_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m2))) +vuint64m2_t vnmsac(vuint64m2_t op0, uint64_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m2_m))) +vuint64m2_t vnmsac(vbool32_t op0, vuint64m2_t op1, uint64_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m4))) +vuint64m4_t vnmsac(vuint64m4_t op0, uint64_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m4_m))) +vuint64m4_t vnmsac(vbool16_t op0, vuint64m4_t op1, uint64_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m8))) +vuint64m8_t vnmsac(vuint64m8_t op0, uint64_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsac_vx_u64m8_m))) +vuint64m8_t vnmsac(vbool8_t op0, vuint64m8_t op1, uint64_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m1))) +vint8m1_t vmadd(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m1_m))) +vint8m1_t vmadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m2))) +vint8m2_t vmadd(vint8m2_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m2_m))) +vint8m2_t vmadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m4))) +vint8m4_t vmadd(vint8m4_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m4_m))) +vint8m4_t vmadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m8))) +vint8m8_t vmadd(vint8m8_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8m8_m))) +vint8m8_t vmadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8mf2))) +vint8mf2_t vmadd(vint8mf2_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8mf2_m))) +vint8mf2_t vmadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8mf4))) +vint8mf4_t vmadd(vint8mf4_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8mf4_m))) +vint8mf4_t vmadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8mf8))) +vint8mf8_t vmadd(vint8mf8_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i8mf8_m))) +vint8mf8_t vmadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m1))) +vint16m1_t vmadd(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m1_m))) +vint16m1_t vmadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m2))) +vint16m2_t vmadd(vint16m2_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m2_m))) +vint16m2_t vmadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m4))) +vint16m4_t vmadd(vint16m4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m4_m))) +vint16m4_t vmadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m8))) +vint16m8_t vmadd(vint16m8_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16m8_m))) +vint16m8_t vmadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16mf2))) +vint16mf2_t vmadd(vint16mf2_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16mf2_m))) +vint16mf2_t vmadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16mf4))) +vint16mf4_t vmadd(vint16mf4_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i16mf4_m))) +vint16mf4_t vmadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m1))) +vint32m1_t vmadd(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m1_m))) +vint32m1_t vmadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m2))) +vint32m2_t vmadd(vint32m2_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m2_m))) +vint32m2_t vmadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m4))) +vint32m4_t vmadd(vint32m4_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m4_m))) +vint32m4_t vmadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m8))) +vint32m8_t vmadd(vint32m8_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32m8_m))) +vint32m8_t vmadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32mf2))) +vint32mf2_t vmadd(vint32mf2_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i32mf2_m))) +vint32mf2_t vmadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m1))) +vint64m1_t vmadd(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m1_m))) +vint64m1_t vmadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m2))) +vint64m2_t vmadd(vint64m2_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m2_m))) +vint64m2_t vmadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m4))) +vint64m4_t vmadd(vint64m4_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m4_m))) +vint64m4_t vmadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m8))) +vint64m8_t vmadd(vint64m8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_i64m8_m))) +vint64m8_t vmadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m1))) +vint8m1_t vmadd(vint8m1_t op0, int8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m1_m))) +vint8m1_t vmadd(vbool8_t op0, vint8m1_t op1, int8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m2))) +vint8m2_t vmadd(vint8m2_t op0, int8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m2_m))) +vint8m2_t vmadd(vbool4_t op0, vint8m2_t op1, int8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m4))) +vint8m4_t vmadd(vint8m4_t op0, int8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m4_m))) +vint8m4_t vmadd(vbool2_t op0, vint8m4_t op1, int8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m8))) +vint8m8_t vmadd(vint8m8_t op0, int8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8m8_m))) +vint8m8_t vmadd(vbool1_t op0, vint8m8_t op1, int8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8mf2))) +vint8mf2_t vmadd(vint8mf2_t op0, int8_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8mf2_m))) +vint8mf2_t vmadd(vbool16_t op0, vint8mf2_t op1, int8_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8mf4))) +vint8mf4_t vmadd(vint8mf4_t op0, int8_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8mf4_m))) +vint8mf4_t vmadd(vbool32_t op0, vint8mf4_t op1, int8_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8mf8))) +vint8mf8_t vmadd(vint8mf8_t op0, int8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i8mf8_m))) +vint8mf8_t vmadd(vbool64_t op0, vint8mf8_t op1, int8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m1))) +vint16m1_t vmadd(vint16m1_t op0, int16_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m1_m))) +vint16m1_t vmadd(vbool16_t op0, vint16m1_t op1, int16_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m2))) +vint16m2_t vmadd(vint16m2_t op0, int16_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m2_m))) +vint16m2_t vmadd(vbool8_t op0, vint16m2_t op1, int16_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m4))) +vint16m4_t vmadd(vint16m4_t op0, int16_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m4_m))) +vint16m4_t vmadd(vbool4_t op0, vint16m4_t op1, int16_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m8))) +vint16m8_t vmadd(vint16m8_t op0, int16_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16m8_m))) +vint16m8_t vmadd(vbool2_t op0, vint16m8_t op1, int16_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16mf2))) +vint16mf2_t vmadd(vint16mf2_t op0, int16_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16mf2_m))) +vint16mf2_t vmadd(vbool32_t op0, vint16mf2_t op1, int16_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16mf4))) +vint16mf4_t vmadd(vint16mf4_t op0, int16_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i16mf4_m))) +vint16mf4_t vmadd(vbool64_t op0, vint16mf4_t op1, int16_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m1))) +vint32m1_t vmadd(vint32m1_t op0, int32_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m1_m))) +vint32m1_t vmadd(vbool32_t op0, vint32m1_t op1, int32_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m2))) +vint32m2_t vmadd(vint32m2_t op0, int32_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m2_m))) +vint32m2_t vmadd(vbool16_t op0, vint32m2_t op1, int32_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m4))) +vint32m4_t vmadd(vint32m4_t op0, int32_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m4_m))) +vint32m4_t vmadd(vbool8_t op0, vint32m4_t op1, int32_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m8))) +vint32m8_t vmadd(vint32m8_t op0, int32_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32m8_m))) +vint32m8_t vmadd(vbool4_t op0, vint32m8_t op1, int32_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32mf2))) +vint32mf2_t vmadd(vint32mf2_t op0, int32_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i32mf2_m))) +vint32mf2_t vmadd(vbool64_t op0, vint32mf2_t op1, int32_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m1))) +vint64m1_t vmadd(vint64m1_t op0, int64_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m1_m))) +vint64m1_t vmadd(vbool64_t op0, vint64m1_t op1, int64_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m2))) +vint64m2_t vmadd(vint64m2_t op0, int64_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m2_m))) +vint64m2_t vmadd(vbool32_t op0, vint64m2_t op1, int64_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m4))) +vint64m4_t vmadd(vint64m4_t op0, int64_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m4_m))) +vint64m4_t vmadd(vbool16_t op0, vint64m4_t op1, int64_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m8))) +vint64m8_t vmadd(vint64m8_t op0, int64_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_i64m8_m))) +vint64m8_t vmadd(vbool8_t op0, vint64m8_t op1, int64_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m1))) +vuint8m1_t vmadd(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m1_m))) +vuint8m1_t vmadd(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m2))) +vuint8m2_t vmadd(vuint8m2_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m2_m))) +vuint8m2_t vmadd(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m4))) +vuint8m4_t vmadd(vuint8m4_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m4_m))) +vuint8m4_t vmadd(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m8))) +vuint8m8_t vmadd(vuint8m8_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8m8_m))) +vuint8m8_t vmadd(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8mf2))) +vuint8mf2_t vmadd(vuint8mf2_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8mf2_m))) +vuint8mf2_t vmadd(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8mf4))) +vuint8mf4_t vmadd(vuint8mf4_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8mf4_m))) +vuint8mf4_t vmadd(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8mf8))) +vuint8mf8_t vmadd(vuint8mf8_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u8mf8_m))) +vuint8mf8_t vmadd(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m1))) +vuint16m1_t vmadd(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m1_m))) +vuint16m1_t vmadd(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m2))) +vuint16m2_t vmadd(vuint16m2_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m2_m))) +vuint16m2_t vmadd(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m4))) +vuint16m4_t vmadd(vuint16m4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m4_m))) +vuint16m4_t vmadd(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m8))) +vuint16m8_t vmadd(vuint16m8_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16m8_m))) +vuint16m8_t vmadd(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16mf2))) +vuint16mf2_t vmadd(vuint16mf2_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16mf2_m))) +vuint16mf2_t vmadd(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16mf4))) +vuint16mf4_t vmadd(vuint16mf4_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u16mf4_m))) +vuint16mf4_t vmadd(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m1))) +vuint32m1_t vmadd(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m1_m))) +vuint32m1_t vmadd(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m2))) +vuint32m2_t vmadd(vuint32m2_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m2_m))) +vuint32m2_t vmadd(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m4))) +vuint32m4_t vmadd(vuint32m4_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m4_m))) +vuint32m4_t vmadd(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m8))) +vuint32m8_t vmadd(vuint32m8_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32m8_m))) +vuint32m8_t vmadd(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32mf2))) +vuint32mf2_t vmadd(vuint32mf2_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u32mf2_m))) +vuint32mf2_t vmadd(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m1))) +vuint64m1_t vmadd(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m1_m))) +vuint64m1_t vmadd(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m2))) +vuint64m2_t vmadd(vuint64m2_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m2_m))) +vuint64m2_t vmadd(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m4))) +vuint64m4_t vmadd(vuint64m4_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m4_m))) +vuint64m4_t vmadd(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m8))) +vuint64m8_t vmadd(vuint64m8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vv_u64m8_m))) +vuint64m8_t vmadd(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m1))) +vuint8m1_t vmadd(vuint8m1_t op0, uint8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m1_m))) +vuint8m1_t vmadd(vbool8_t op0, vuint8m1_t op1, uint8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m2))) +vuint8m2_t vmadd(vuint8m2_t op0, uint8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m2_m))) +vuint8m2_t vmadd(vbool4_t op0, vuint8m2_t op1, uint8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m4))) +vuint8m4_t vmadd(vuint8m4_t op0, uint8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m4_m))) +vuint8m4_t vmadd(vbool2_t op0, vuint8m4_t op1, uint8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m8))) +vuint8m8_t vmadd(vuint8m8_t op0, uint8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8m8_m))) +vuint8m8_t vmadd(vbool1_t op0, vuint8m8_t op1, uint8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8mf2))) +vuint8mf2_t vmadd(vuint8mf2_t op0, uint8_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8mf2_m))) +vuint8mf2_t vmadd(vbool16_t op0, vuint8mf2_t op1, uint8_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8mf4))) +vuint8mf4_t vmadd(vuint8mf4_t op0, uint8_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8mf4_m))) +vuint8mf4_t vmadd(vbool32_t op0, vuint8mf4_t op1, uint8_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8mf8))) +vuint8mf8_t vmadd(vuint8mf8_t op0, uint8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u8mf8_m))) +vuint8mf8_t vmadd(vbool64_t op0, vuint8mf8_t op1, uint8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m1))) +vuint16m1_t vmadd(vuint16m1_t op0, uint16_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m1_m))) +vuint16m1_t vmadd(vbool16_t op0, vuint16m1_t op1, uint16_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m2))) +vuint16m2_t vmadd(vuint16m2_t op0, uint16_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m2_m))) +vuint16m2_t vmadd(vbool8_t op0, vuint16m2_t op1, uint16_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m4))) +vuint16m4_t vmadd(vuint16m4_t op0, uint16_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m4_m))) +vuint16m4_t vmadd(vbool4_t op0, vuint16m4_t op1, uint16_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m8))) +vuint16m8_t vmadd(vuint16m8_t op0, uint16_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16m8_m))) +vuint16m8_t vmadd(vbool2_t op0, vuint16m8_t op1, uint16_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16mf2))) +vuint16mf2_t vmadd(vuint16mf2_t op0, uint16_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16mf2_m))) +vuint16mf2_t vmadd(vbool32_t op0, vuint16mf2_t op1, uint16_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16mf4))) +vuint16mf4_t vmadd(vuint16mf4_t op0, uint16_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u16mf4_m))) +vuint16mf4_t vmadd(vbool64_t op0, vuint16mf4_t op1, uint16_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m1))) +vuint32m1_t vmadd(vuint32m1_t op0, uint32_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m1_m))) +vuint32m1_t vmadd(vbool32_t op0, vuint32m1_t op1, uint32_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m2))) +vuint32m2_t vmadd(vuint32m2_t op0, uint32_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m2_m))) +vuint32m2_t vmadd(vbool16_t op0, vuint32m2_t op1, uint32_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m4))) +vuint32m4_t vmadd(vuint32m4_t op0, uint32_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m4_m))) +vuint32m4_t vmadd(vbool8_t op0, vuint32m4_t op1, uint32_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m8))) +vuint32m8_t vmadd(vuint32m8_t op0, uint32_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32m8_m))) +vuint32m8_t vmadd(vbool4_t op0, vuint32m8_t op1, uint32_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32mf2))) +vuint32mf2_t vmadd(vuint32mf2_t op0, uint32_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u32mf2_m))) +vuint32mf2_t vmadd(vbool64_t op0, vuint32mf2_t op1, uint32_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m1))) +vuint64m1_t vmadd(vuint64m1_t op0, uint64_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m1_m))) +vuint64m1_t vmadd(vbool64_t op0, vuint64m1_t op1, uint64_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m2))) +vuint64m2_t vmadd(vuint64m2_t op0, uint64_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m2_m))) +vuint64m2_t vmadd(vbool32_t op0, vuint64m2_t op1, uint64_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m4))) +vuint64m4_t vmadd(vuint64m4_t op0, uint64_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m4_m))) +vuint64m4_t vmadd(vbool16_t op0, vuint64m4_t op1, uint64_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m8))) +vuint64m8_t vmadd(vuint64m8_t op0, uint64_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmadd_vx_u64m8_m))) +vuint64m8_t vmadd(vbool8_t op0, vuint64m8_t op1, uint64_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m1))) +vint8m1_t vnmsub(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m1_m))) +vint8m1_t vnmsub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m2))) +vint8m2_t vnmsub(vint8m2_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m2_m))) +vint8m2_t vnmsub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m4))) +vint8m4_t vnmsub(vint8m4_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m4_m))) +vint8m4_t vnmsub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m8))) +vint8m8_t vnmsub(vint8m8_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8m8_m))) +vint8m8_t vnmsub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8mf2))) +vint8mf2_t vnmsub(vint8mf2_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8mf2_m))) +vint8mf2_t vnmsub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8mf4))) +vint8mf4_t vnmsub(vint8mf4_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8mf4_m))) +vint8mf4_t vnmsub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8mf8))) +vint8mf8_t vnmsub(vint8mf8_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i8mf8_m))) +vint8mf8_t vnmsub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m1))) +vint16m1_t vnmsub(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m1_m))) +vint16m1_t vnmsub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m2))) +vint16m2_t vnmsub(vint16m2_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m2_m))) +vint16m2_t vnmsub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m4))) +vint16m4_t vnmsub(vint16m4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m4_m))) +vint16m4_t vnmsub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m8))) +vint16m8_t vnmsub(vint16m8_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16m8_m))) +vint16m8_t vnmsub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16mf2))) +vint16mf2_t vnmsub(vint16mf2_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16mf2_m))) +vint16mf2_t vnmsub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16mf4))) +vint16mf4_t vnmsub(vint16mf4_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i16mf4_m))) +vint16mf4_t vnmsub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m1))) +vint32m1_t vnmsub(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m1_m))) +vint32m1_t vnmsub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m2))) +vint32m2_t vnmsub(vint32m2_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m2_m))) +vint32m2_t vnmsub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m4))) +vint32m4_t vnmsub(vint32m4_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m4_m))) +vint32m4_t vnmsub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m8))) +vint32m8_t vnmsub(vint32m8_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32m8_m))) +vint32m8_t vnmsub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32mf2))) +vint32mf2_t vnmsub(vint32mf2_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i32mf2_m))) +vint32mf2_t vnmsub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m1))) +vint64m1_t vnmsub(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m1_m))) +vint64m1_t vnmsub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m2))) +vint64m2_t vnmsub(vint64m2_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m2_m))) +vint64m2_t vnmsub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m4))) +vint64m4_t vnmsub(vint64m4_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m4_m))) +vint64m4_t vnmsub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m8))) +vint64m8_t vnmsub(vint64m8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_i64m8_m))) +vint64m8_t vnmsub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m1))) +vint8m1_t vnmsub(vint8m1_t op0, int8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m1_m))) +vint8m1_t vnmsub(vbool8_t op0, vint8m1_t op1, int8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m2))) +vint8m2_t vnmsub(vint8m2_t op0, int8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m2_m))) +vint8m2_t vnmsub(vbool4_t op0, vint8m2_t op1, int8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m4))) +vint8m4_t vnmsub(vint8m4_t op0, int8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m4_m))) +vint8m4_t vnmsub(vbool2_t op0, vint8m4_t op1, int8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m8))) +vint8m8_t vnmsub(vint8m8_t op0, int8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8m8_m))) +vint8m8_t vnmsub(vbool1_t op0, vint8m8_t op1, int8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8mf2))) +vint8mf2_t vnmsub(vint8mf2_t op0, int8_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8mf2_m))) +vint8mf2_t vnmsub(vbool16_t op0, vint8mf2_t op1, int8_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8mf4))) +vint8mf4_t vnmsub(vint8mf4_t op0, int8_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8mf4_m))) +vint8mf4_t vnmsub(vbool32_t op0, vint8mf4_t op1, int8_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8mf8))) +vint8mf8_t vnmsub(vint8mf8_t op0, int8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i8mf8_m))) +vint8mf8_t vnmsub(vbool64_t op0, vint8mf8_t op1, int8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m1))) +vint16m1_t vnmsub(vint16m1_t op0, int16_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m1_m))) +vint16m1_t vnmsub(vbool16_t op0, vint16m1_t op1, int16_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m2))) +vint16m2_t vnmsub(vint16m2_t op0, int16_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m2_m))) +vint16m2_t vnmsub(vbool8_t op0, vint16m2_t op1, int16_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m4))) +vint16m4_t vnmsub(vint16m4_t op0, int16_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m4_m))) +vint16m4_t vnmsub(vbool4_t op0, vint16m4_t op1, int16_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m8))) +vint16m8_t vnmsub(vint16m8_t op0, int16_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16m8_m))) +vint16m8_t vnmsub(vbool2_t op0, vint16m8_t op1, int16_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16mf2))) +vint16mf2_t vnmsub(vint16mf2_t op0, int16_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16mf2_m))) +vint16mf2_t vnmsub(vbool32_t op0, vint16mf2_t op1, int16_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16mf4))) +vint16mf4_t vnmsub(vint16mf4_t op0, int16_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i16mf4_m))) +vint16mf4_t vnmsub(vbool64_t op0, vint16mf4_t op1, int16_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m1))) +vint32m1_t vnmsub(vint32m1_t op0, int32_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m1_m))) +vint32m1_t vnmsub(vbool32_t op0, vint32m1_t op1, int32_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m2))) +vint32m2_t vnmsub(vint32m2_t op0, int32_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m2_m))) +vint32m2_t vnmsub(vbool16_t op0, vint32m2_t op1, int32_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m4))) +vint32m4_t vnmsub(vint32m4_t op0, int32_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m4_m))) +vint32m4_t vnmsub(vbool8_t op0, vint32m4_t op1, int32_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m8))) +vint32m8_t vnmsub(vint32m8_t op0, int32_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32m8_m))) +vint32m8_t vnmsub(vbool4_t op0, vint32m8_t op1, int32_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32mf2))) +vint32mf2_t vnmsub(vint32mf2_t op0, int32_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i32mf2_m))) +vint32mf2_t vnmsub(vbool64_t op0, vint32mf2_t op1, int32_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m1))) +vint64m1_t vnmsub(vint64m1_t op0, int64_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m1_m))) +vint64m1_t vnmsub(vbool64_t op0, vint64m1_t op1, int64_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m2))) +vint64m2_t vnmsub(vint64m2_t op0, int64_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m2_m))) +vint64m2_t vnmsub(vbool32_t op0, vint64m2_t op1, int64_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m4))) +vint64m4_t vnmsub(vint64m4_t op0, int64_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m4_m))) +vint64m4_t vnmsub(vbool16_t op0, vint64m4_t op1, int64_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m8))) +vint64m8_t vnmsub(vint64m8_t op0, int64_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_i64m8_m))) +vint64m8_t vnmsub(vbool8_t op0, vint64m8_t op1, int64_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m1))) +vuint8m1_t vnmsub(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m1_m))) +vuint8m1_t vnmsub(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m2))) +vuint8m2_t vnmsub(vuint8m2_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m2_m))) +vuint8m2_t vnmsub(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m4))) +vuint8m4_t vnmsub(vuint8m4_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m4_m))) +vuint8m4_t vnmsub(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m8))) +vuint8m8_t vnmsub(vuint8m8_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8m8_m))) +vuint8m8_t vnmsub(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8mf2))) +vuint8mf2_t vnmsub(vuint8mf2_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8mf2_m))) +vuint8mf2_t vnmsub(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8mf4))) +vuint8mf4_t vnmsub(vuint8mf4_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8mf4_m))) +vuint8mf4_t vnmsub(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8mf8))) +vuint8mf8_t vnmsub(vuint8mf8_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u8mf8_m))) +vuint8mf8_t vnmsub(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m1))) +vuint16m1_t vnmsub(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m1_m))) +vuint16m1_t vnmsub(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m2))) +vuint16m2_t vnmsub(vuint16m2_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m2_m))) +vuint16m2_t vnmsub(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m4))) +vuint16m4_t vnmsub(vuint16m4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m4_m))) +vuint16m4_t vnmsub(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m8))) +vuint16m8_t vnmsub(vuint16m8_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16m8_m))) +vuint16m8_t vnmsub(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16mf2))) +vuint16mf2_t vnmsub(vuint16mf2_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16mf2_m))) +vuint16mf2_t vnmsub(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16mf4))) +vuint16mf4_t vnmsub(vuint16mf4_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u16mf4_m))) +vuint16mf4_t vnmsub(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m1))) +vuint32m1_t vnmsub(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m1_m))) +vuint32m1_t vnmsub(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m2))) +vuint32m2_t vnmsub(vuint32m2_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m2_m))) +vuint32m2_t vnmsub(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m4))) +vuint32m4_t vnmsub(vuint32m4_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m4_m))) +vuint32m4_t vnmsub(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m8))) +vuint32m8_t vnmsub(vuint32m8_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32m8_m))) +vuint32m8_t vnmsub(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32mf2))) +vuint32mf2_t vnmsub(vuint32mf2_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u32mf2_m))) +vuint32mf2_t vnmsub(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m1))) +vuint64m1_t vnmsub(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m1_m))) +vuint64m1_t vnmsub(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m2))) +vuint64m2_t vnmsub(vuint64m2_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m2_m))) +vuint64m2_t vnmsub(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m4))) +vuint64m4_t vnmsub(vuint64m4_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m4_m))) +vuint64m4_t vnmsub(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m8))) +vuint64m8_t vnmsub(vuint64m8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vv_u64m8_m))) +vuint64m8_t vnmsub(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m1))) +void vsuxei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m1_m))) +void vsuxei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m2))) +void vsuxei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m2_m))) +void vsuxei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m4))) +void vsuxei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m4_m))) +void vsuxei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m8))) +void vsuxei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32m8_m))) +void vsuxei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32mf2))) +void vsuxei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u32mf2_m))) +void vsuxei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m1))) +vuint8m1_t vnmsub(vuint8m1_t op0, uint8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m1_m))) +vuint8m1_t vnmsub(vbool8_t op0, vuint8m1_t op1, uint8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m2))) +vuint8m2_t vnmsub(vuint8m2_t op0, uint8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m2_m))) +vuint8m2_t vnmsub(vbool4_t op0, vuint8m2_t op1, uint8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m4))) +vuint8m4_t vnmsub(vuint8m4_t op0, uint8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m4_m))) +vuint8m4_t vnmsub(vbool2_t op0, vuint8m4_t op1, uint8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m8))) +vuint8m8_t vnmsub(vuint8m8_t op0, uint8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8m8_m))) +vuint8m8_t vnmsub(vbool1_t op0, vuint8m8_t op1, uint8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8mf2))) +vuint8mf2_t vnmsub(vuint8mf2_t op0, uint8_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8mf2_m))) +vuint8mf2_t vnmsub(vbool16_t op0, vuint8mf2_t op1, uint8_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8mf4))) +vuint8mf4_t vnmsub(vuint8mf4_t op0, uint8_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8mf4_m))) +vuint8mf4_t vnmsub(vbool32_t op0, vuint8mf4_t op1, uint8_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8mf8))) +vuint8mf8_t vnmsub(vuint8mf8_t op0, uint8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u8mf8_m))) +vuint8mf8_t vnmsub(vbool64_t op0, vuint8mf8_t op1, uint8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m1))) +vuint16m1_t vnmsub(vuint16m1_t op0, uint16_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m1_m))) +vuint16m1_t vnmsub(vbool16_t op0, vuint16m1_t op1, uint16_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m2))) +vuint16m2_t vnmsub(vuint16m2_t op0, uint16_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m2_m))) +vuint16m2_t vnmsub(vbool8_t op0, vuint16m2_t op1, uint16_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m4))) +vuint16m4_t vnmsub(vuint16m4_t op0, uint16_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m4_m))) +vuint16m4_t vnmsub(vbool4_t op0, vuint16m4_t op1, uint16_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m8))) +vuint16m8_t vnmsub(vuint16m8_t op0, uint16_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16m8_m))) +vuint16m8_t vnmsub(vbool2_t op0, vuint16m8_t op1, uint16_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16mf2))) +vuint16mf2_t vnmsub(vuint16mf2_t op0, uint16_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16mf2_m))) +vuint16mf2_t vnmsub(vbool32_t op0, vuint16mf2_t op1, uint16_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16mf4))) +vuint16mf4_t vnmsub(vuint16mf4_t op0, uint16_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u16mf4_m))) +vuint16mf4_t vnmsub(vbool64_t op0, vuint16mf4_t op1, uint16_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m1))) +vuint32m1_t vnmsub(vuint32m1_t op0, uint32_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m1_m))) +vuint32m1_t vnmsub(vbool32_t op0, vuint32m1_t op1, uint32_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m2))) +vuint32m2_t vnmsub(vuint32m2_t op0, uint32_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m2_m))) +vuint32m2_t vnmsub(vbool16_t op0, vuint32m2_t op1, uint32_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m4))) +vuint32m4_t vnmsub(vuint32m4_t op0, uint32_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m4_m))) +vuint32m4_t vnmsub(vbool8_t op0, vuint32m4_t op1, uint32_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m8))) +vuint32m8_t vnmsub(vuint32m8_t op0, uint32_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32m8_m))) +vuint32m8_t vnmsub(vbool4_t op0, vuint32m8_t op1, uint32_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32mf2))) +vuint32mf2_t vnmsub(vuint32mf2_t op0, uint32_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u32mf2_m))) +vuint32mf2_t vnmsub(vbool64_t op0, vuint32mf2_t op1, uint32_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m1))) +vuint64m1_t vnmsub(vuint64m1_t op0, uint64_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m1_m))) +vuint64m1_t vnmsub(vbool64_t op0, vuint64m1_t op1, uint64_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m2))) +vuint64m2_t vnmsub(vuint64m2_t op0, uint64_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m2_m))) +vuint64m2_t vnmsub(vbool32_t op0, vuint64m2_t op1, uint64_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m4))) +vuint64m4_t vnmsub(vuint64m4_t op0, uint64_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m4_m))) +vuint64m4_t vnmsub(vbool16_t op0, vuint64m4_t op1, uint64_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m8))) +vuint64m8_t vnmsub(vuint64m8_t op0, uint64_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnmsub_vx_u64m8_m))) +vuint64m8_t vnmsub(vbool8_t op0, vuint64m8_t op1, uint64_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16mf4))) +vuint16mf4_t vwmaccu(vuint16mf4_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16mf4_m))) +vuint16mf4_t vwmaccu(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16mf2))) +vuint16mf2_t vwmaccu(vuint16mf2_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16mf2_m))) +vuint16mf2_t vwmaccu(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m1))) +vuint16m1_t vwmaccu(vuint16m1_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m1_m))) +vuint16m1_t vwmaccu(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m2))) +vuint16m2_t vwmaccu(vuint16m2_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m2_m))) +vuint16m2_t vwmaccu(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m4))) +vuint16m4_t vwmaccu(vuint16m4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m4_m))) +vuint16m4_t vwmaccu(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m8))) +vuint16m8_t vwmaccu(vuint16m8_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u16m8_m))) +vuint16m8_t vwmaccu(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32mf2))) +vuint32mf2_t vwmaccu(vuint32mf2_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32mf2_m))) +vuint32mf2_t vwmaccu(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m1))) +vuint32m1_t vwmaccu(vuint32m1_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m1_m))) +vuint32m1_t vwmaccu(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m2))) +vuint32m2_t vwmaccu(vuint32m2_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m2_m))) +vuint32m2_t vwmaccu(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m4))) +vuint32m4_t vwmaccu(vuint32m4_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m4_m))) +vuint32m4_t vwmaccu(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m8))) +vuint32m8_t vwmaccu(vuint32m8_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u32m8_m))) +vuint32m8_t vwmaccu(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m1))) +vuint64m1_t vwmaccu(vuint64m1_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m1_m))) +vuint64m1_t vwmaccu(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m2))) +vuint64m2_t vwmaccu(vuint64m2_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m2_m))) +vuint64m2_t vwmaccu(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m4))) +vuint64m4_t vwmaccu(vuint64m4_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m4_m))) +vuint64m4_t vwmaccu(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m8))) +vuint64m8_t vwmaccu(vuint64m8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vv_u64m8_m))) +vuint64m8_t vwmaccu(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16mf4))) +vuint16mf4_t vwmaccu(vuint16mf4_t op0, uint8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16mf4_m))) +vuint16mf4_t vwmaccu(vbool64_t op0, vuint16mf4_t op1, uint8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16mf2))) +vuint16mf2_t vwmaccu(vuint16mf2_t op0, uint8_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16mf2_m))) +vuint16mf2_t vwmaccu(vbool32_t op0, vuint16mf2_t op1, uint8_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m1))) +vuint16m1_t vwmaccu(vuint16m1_t op0, uint8_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m1_m))) +vuint16m1_t vwmaccu(vbool16_t op0, vuint16m1_t op1, uint8_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m2))) +vuint16m2_t vwmaccu(vuint16m2_t op0, uint8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m2_m))) +vuint16m2_t vwmaccu(vbool8_t op0, vuint16m2_t op1, uint8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m4))) +vuint16m4_t vwmaccu(vuint16m4_t op0, uint8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m4_m))) +vuint16m4_t vwmaccu(vbool4_t op0, vuint16m4_t op1, uint8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m8))) +vuint16m8_t vwmaccu(vuint16m8_t op0, uint8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u16m8_m))) +vuint16m8_t vwmaccu(vbool2_t op0, vuint16m8_t op1, uint8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32mf2))) +vuint32mf2_t vwmaccu(vuint32mf2_t op0, uint16_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32mf2_m))) +vuint32mf2_t vwmaccu(vbool64_t op0, vuint32mf2_t op1, uint16_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m1))) +vuint32m1_t vwmaccu(vuint32m1_t op0, uint16_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m1_m))) +vuint32m1_t vwmaccu(vbool32_t op0, vuint32m1_t op1, uint16_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m2))) +vuint32m2_t vwmaccu(vuint32m2_t op0, uint16_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m2_m))) +vuint32m2_t vwmaccu(vbool16_t op0, vuint32m2_t op1, uint16_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m4))) +vuint32m4_t vwmaccu(vuint32m4_t op0, uint16_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m4_m))) +vuint32m4_t vwmaccu(vbool8_t op0, vuint32m4_t op1, uint16_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m8))) +vuint32m8_t vwmaccu(vuint32m8_t op0, uint16_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u32m8_m))) +vuint32m8_t vwmaccu(vbool4_t op0, vuint32m8_t op1, uint16_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m1))) +vuint64m1_t vwmaccu(vuint64m1_t op0, uint32_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m1_m))) +vuint64m1_t vwmaccu(vbool64_t op0, vuint64m1_t op1, uint32_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m2))) +vuint64m2_t vwmaccu(vuint64m2_t op0, uint32_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m2_m))) +vuint64m2_t vwmaccu(vbool32_t op0, vuint64m2_t op1, uint32_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m4))) +vuint64m4_t vwmaccu(vuint64m4_t op0, uint32_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m4_m))) +vuint64m4_t vwmaccu(vbool16_t op0, vuint64m4_t op1, uint32_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m8))) +vuint64m8_t vwmaccu(vuint64m8_t op0, uint32_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccu_vx_u64m8_m))) +vuint64m8_t vwmaccu(vbool8_t op0, vuint64m8_t op1, uint32_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16mf4))) +vint16mf4_t vwmacc(vint16mf4_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16mf4_m))) +vint16mf4_t vwmacc(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16mf2))) +vint16mf2_t vwmacc(vint16mf2_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16mf2_m))) +vint16mf2_t vwmacc(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m1))) +vint16m1_t vwmacc(vint16m1_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m1_m))) +vint16m1_t vwmacc(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m2))) +vint16m2_t vwmacc(vint16m2_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m2_m))) +vint16m2_t vwmacc(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m4))) +vint16m4_t vwmacc(vint16m4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m4_m))) +vint16m4_t vwmacc(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m8))) +vint16m8_t vwmacc(vint16m8_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i16m8_m))) +vint16m8_t vwmacc(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32mf2))) +vint32mf2_t vwmacc(vint32mf2_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32mf2_m))) +vint32mf2_t vwmacc(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m1))) +vint32m1_t vwmacc(vint32m1_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m1_m))) +vint32m1_t vwmacc(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m2))) +vint32m2_t vwmacc(vint32m2_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m2_m))) +vint32m2_t vwmacc(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m4))) +vint32m4_t vwmacc(vint32m4_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m4_m))) +vint32m4_t vwmacc(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m8))) +vint32m8_t vwmacc(vint32m8_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i32m8_m))) +vint32m8_t vwmacc(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m1))) +vint64m1_t vwmacc(vint64m1_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m1_m))) +vint64m1_t vwmacc(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m2))) +vint64m2_t vwmacc(vint64m2_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m2_m))) +vint64m2_t vwmacc(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m4))) +vint64m4_t vwmacc(vint64m4_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m4_m))) +vint64m4_t vwmacc(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m8))) +vint64m8_t vwmacc(vint64m8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vv_i64m8_m))) +vint64m8_t vwmacc(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16mf4))) +vint16mf4_t vwmacc(vint16mf4_t op0, int8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16mf4_m))) +vint16mf4_t vwmacc(vbool64_t op0, vint16mf4_t op1, int8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16mf2))) +vint16mf2_t vwmacc(vint16mf2_t op0, int8_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16mf2_m))) +vint16mf2_t vwmacc(vbool32_t op0, vint16mf2_t op1, int8_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m1))) +vint16m1_t vwmacc(vint16m1_t op0, int8_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m1_m))) +vint16m1_t vwmacc(vbool16_t op0, vint16m1_t op1, int8_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m2))) +vint16m2_t vwmacc(vint16m2_t op0, int8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m2_m))) +vint16m2_t vwmacc(vbool8_t op0, vint16m2_t op1, int8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m4))) +vint16m4_t vwmacc(vint16m4_t op0, int8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m4_m))) +vint16m4_t vwmacc(vbool4_t op0, vint16m4_t op1, int8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m8))) +vint16m8_t vwmacc(vint16m8_t op0, int8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i16m8_m))) +vint16m8_t vwmacc(vbool2_t op0, vint16m8_t op1, int8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32mf2))) +vint32mf2_t vwmacc(vint32mf2_t op0, int16_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32mf2_m))) +vint32mf2_t vwmacc(vbool64_t op0, vint32mf2_t op1, int16_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m1))) +vint32m1_t vwmacc(vint32m1_t op0, int16_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m1_m))) +vint32m1_t vwmacc(vbool32_t op0, vint32m1_t op1, int16_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m2))) +vint32m2_t vwmacc(vint32m2_t op0, int16_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m2_m))) +vint32m2_t vwmacc(vbool16_t op0, vint32m2_t op1, int16_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m4))) +vint32m4_t vwmacc(vint32m4_t op0, int16_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m4_m))) +vint32m4_t vwmacc(vbool8_t op0, vint32m4_t op1, int16_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m8))) +vint32m8_t vwmacc(vint32m8_t op0, int16_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i32m8_m))) +vint32m8_t vwmacc(vbool4_t op0, vint32m8_t op1, int16_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m1))) +vint64m1_t vwmacc(vint64m1_t op0, int32_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m1_m))) +vint64m1_t vwmacc(vbool64_t op0, vint64m1_t op1, int32_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m2))) +vint64m2_t vwmacc(vint64m2_t op0, int32_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m2_m))) +vint64m2_t vwmacc(vbool32_t op0, vint64m2_t op1, int32_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m4))) +vint64m4_t vwmacc(vint64m4_t op0, int32_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m4_m))) +vint64m4_t vwmacc(vbool16_t op0, vint64m4_t op1, int32_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m8))) +vint64m8_t vwmacc(vint64m8_t op0, int32_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmacc_vx_i64m8_m))) +vint64m8_t vwmacc(vbool8_t op0, vint64m8_t op1, int32_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16mf4))) +vint16mf4_t vwmaccsu(vint16mf4_t op0, vint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16mf4_m))) +vint16mf4_t vwmaccsu(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16mf2))) +vint16mf2_t vwmaccsu(vint16mf2_t op0, vint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16mf2_m))) +vint16mf2_t vwmaccsu(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m1))) +vint16m1_t vwmaccsu(vint16m1_t op0, vint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m1_m))) +vint16m1_t vwmaccsu(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m2))) +vint16m2_t vwmaccsu(vint16m2_t op0, vint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m2_m))) +vint16m2_t vwmaccsu(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m4))) +vint16m4_t vwmaccsu(vint16m4_t op0, vint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m4_m))) +vint16m4_t vwmaccsu(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m8))) +vint16m8_t vwmaccsu(vint16m8_t op0, vint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i16m8_m))) +vint16m8_t vwmaccsu(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32mf2))) +vint32mf2_t vwmaccsu(vint32mf2_t op0, vint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32mf2_m))) +vint32mf2_t vwmaccsu(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m1))) +vint32m1_t vwmaccsu(vint32m1_t op0, vint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m1_m))) +vint32m1_t vwmaccsu(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m2))) +vint32m2_t vwmaccsu(vint32m2_t op0, vint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m2_m))) +vint32m2_t vwmaccsu(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m4))) +vint32m4_t vwmaccsu(vint32m4_t op0, vint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m4_m))) +vint32m4_t vwmaccsu(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m8))) +vint32m8_t vwmaccsu(vint32m8_t op0, vint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i32m8_m))) +vint32m8_t vwmaccsu(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m1))) +vint64m1_t vwmaccsu(vint64m1_t op0, vint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m1_m))) +vint64m1_t vwmaccsu(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m2))) +vint64m2_t vwmaccsu(vint64m2_t op0, vint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m2_m))) +vint64m2_t vwmaccsu(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m4))) +vint64m4_t vwmaccsu(vint64m4_t op0, vint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m4_m))) +vint64m4_t vwmaccsu(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m8))) +vint64m8_t vwmaccsu(vint64m8_t op0, vint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vv_i64m8_m))) +vint64m8_t vwmaccsu(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16mf4))) +vint16mf4_t vwmaccsu(vint16mf4_t op0, int8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16mf4_m))) +vint16mf4_t vwmaccsu(vbool64_t op0, vint16mf4_t op1, int8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16mf2))) +vint16mf2_t vwmaccsu(vint16mf2_t op0, int8_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16mf2_m))) +vint16mf2_t vwmaccsu(vbool32_t op0, vint16mf2_t op1, int8_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m1))) +vint16m1_t vwmaccsu(vint16m1_t op0, int8_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m1_m))) +vint16m1_t vwmaccsu(vbool16_t op0, vint16m1_t op1, int8_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m2))) +vint16m2_t vwmaccsu(vint16m2_t op0, int8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m2_m))) +vint16m2_t vwmaccsu(vbool8_t op0, vint16m2_t op1, int8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m4))) +vint16m4_t vwmaccsu(vint16m4_t op0, int8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m4_m))) +vint16m4_t vwmaccsu(vbool4_t op0, vint16m4_t op1, int8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m8))) +vint16m8_t vwmaccsu(vint16m8_t op0, int8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i16m8_m))) +vint16m8_t vwmaccsu(vbool2_t op0, vint16m8_t op1, int8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32mf2))) +vint32mf2_t vwmaccsu(vint32mf2_t op0, int16_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32mf2_m))) +vint32mf2_t vwmaccsu(vbool64_t op0, vint32mf2_t op1, int16_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m1))) +vint32m1_t vwmaccsu(vint32m1_t op0, int16_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m1_m))) +vint32m1_t vwmaccsu(vbool32_t op0, vint32m1_t op1, int16_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m2))) +vint32m2_t vwmaccsu(vint32m2_t op0, int16_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m2_m))) +vint32m2_t vwmaccsu(vbool16_t op0, vint32m2_t op1, int16_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m4))) +vint32m4_t vwmaccsu(vint32m4_t op0, int16_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m4_m))) +vint32m4_t vwmaccsu(vbool8_t op0, vint32m4_t op1, int16_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m8))) +vint32m8_t vwmaccsu(vint32m8_t op0, int16_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i32m8_m))) +vint32m8_t vwmaccsu(vbool4_t op0, vint32m8_t op1, int16_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m1))) +vint64m1_t vwmaccsu(vint64m1_t op0, int32_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m1_m))) +vint64m1_t vwmaccsu(vbool64_t op0, vint64m1_t op1, int32_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m2))) +vint64m2_t vwmaccsu(vint64m2_t op0, int32_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m2_m))) +vint64m2_t vwmaccsu(vbool32_t op0, vint64m2_t op1, int32_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m4))) +vint64m4_t vwmaccsu(vint64m4_t op0, int32_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m4_m))) +vint64m4_t vwmaccsu(vbool16_t op0, vint64m4_t op1, int32_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m8))) +vint64m8_t vwmaccsu(vint64m8_t op0, int32_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccsu_vx_i64m8_m))) +vint64m8_t vwmaccsu(vbool8_t op0, vint64m8_t op1, int32_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16mf4))) +vint16mf4_t vwmaccus(vint16mf4_t op0, uint8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16mf4_m))) +vint16mf4_t vwmaccus(vbool64_t op0, vint16mf4_t op1, uint8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16mf2))) +vint16mf2_t vwmaccus(vint16mf2_t op0, uint8_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16mf2_m))) +vint16mf2_t vwmaccus(vbool32_t op0, vint16mf2_t op1, uint8_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m1))) +vint16m1_t vwmaccus(vint16m1_t op0, uint8_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m1_m))) +vint16m1_t vwmaccus(vbool16_t op0, vint16m1_t op1, uint8_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m2))) +vint16m2_t vwmaccus(vint16m2_t op0, uint8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m2_m))) +vint16m2_t vwmaccus(vbool8_t op0, vint16m2_t op1, uint8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m4))) +vint16m4_t vwmaccus(vint16m4_t op0, uint8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m4_m))) +vint16m4_t vwmaccus(vbool4_t op0, vint16m4_t op1, uint8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m8))) +vint16m8_t vwmaccus(vint16m8_t op0, uint8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i16m8_m))) +vint16m8_t vwmaccus(vbool2_t op0, vint16m8_t op1, uint8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32mf2))) +vint32mf2_t vwmaccus(vint32mf2_t op0, uint16_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32mf2_m))) +vint32mf2_t vwmaccus(vbool64_t op0, vint32mf2_t op1, uint16_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m1))) +vint32m1_t vwmaccus(vint32m1_t op0, uint16_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m1_m))) +vint32m1_t vwmaccus(vbool32_t op0, vint32m1_t op1, uint16_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m2))) +vint32m2_t vwmaccus(vint32m2_t op0, uint16_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m2_m))) +vint32m2_t vwmaccus(vbool16_t op0, vint32m2_t op1, uint16_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m4))) +vint32m4_t vwmaccus(vint32m4_t op0, uint16_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m4_m))) +vint32m4_t vwmaccus(vbool8_t op0, vint32m4_t op1, uint16_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m8))) +vint32m8_t vwmaccus(vint32m8_t op0, uint16_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i32m8_m))) +vint32m8_t vwmaccus(vbool4_t op0, vint32m8_t op1, uint16_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m1))) +vint64m1_t vwmaccus(vint64m1_t op0, uint32_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m1_m))) +vint64m1_t vwmaccus(vbool64_t op0, vint64m1_t op1, uint32_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m2))) +vint64m2_t vwmaccus(vint64m2_t op0, uint32_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m2_m))) +vint64m2_t vwmaccus(vbool32_t op0, vint64m2_t op1, uint32_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m4))) +vint64m4_t vwmaccus(vint64m4_t op0, uint32_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m4_m))) +vint64m4_t vwmaccus(vbool16_t op0, vint64m4_t op1, uint32_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m8))) +vint64m8_t vwmaccus(vint64m8_t op0, uint32_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwmaccus_vx_i64m8_m))) +vint64m8_t vwmaccus(vbool8_t op0, vint64m8_t op1, uint32_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8m1))) +vint8m1_t vmerge(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8m2))) +vint8m2_t vmerge(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8m4))) +vint8m4_t vmerge(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8m8))) +vint8m8_t vmerge(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8mf2))) +vint8mf2_t vmerge(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8mf4))) +vint8mf4_t vmerge(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i8mf8))) +vint8mf8_t vmerge(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i16m1))) +vint16m1_t vmerge(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i16m2))) +vint16m2_t vmerge(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i16m4))) +vint16m4_t vmerge(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i16m8))) +vint16m8_t vmerge(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i16mf2))) +vint16mf2_t vmerge(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i16mf4))) +vint16mf4_t vmerge(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i32m1))) +vint32m1_t vmerge(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i32m2))) +vint32m2_t vmerge(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i32m4))) +vint32m4_t vmerge(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i32m8))) +vint32m8_t vmerge(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i32mf2))) +vint32mf2_t vmerge(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i64m1))) +vint64m1_t vmerge(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i64m2))) +vint64m2_t vmerge(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i64m4))) +vint64m4_t vmerge(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_i64m8))) +vint64m8_t vmerge(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8m1))) +vint8m1_t vmerge(vbool8_t op0, vint8m1_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8m2))) +vint8m2_t vmerge(vbool4_t op0, vint8m2_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8m4))) +vint8m4_t vmerge(vbool2_t op0, vint8m4_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8m8))) +vint8m8_t vmerge(vbool1_t op0, vint8m8_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8mf2))) +vint8mf2_t vmerge(vbool16_t op0, vint8mf2_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8mf4))) +vint8mf4_t vmerge(vbool32_t op0, vint8mf4_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i8mf8))) +vint8mf8_t vmerge(vbool64_t op0, vint8mf8_t op1, int8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i16m1))) +vint16m1_t vmerge(vbool16_t op0, vint16m1_t op1, int16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i16m2))) +vint16m2_t vmerge(vbool8_t op0, vint16m2_t op1, int16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i16m4))) +vint16m4_t vmerge(vbool4_t op0, vint16m4_t op1, int16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i16m8))) +vint16m8_t vmerge(vbool2_t op0, vint16m8_t op1, int16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i16mf2))) +vint16mf2_t vmerge(vbool32_t op0, vint16mf2_t op1, int16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i16mf4))) +vint16mf4_t vmerge(vbool64_t op0, vint16mf4_t op1, int16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i32m1))) +vint32m1_t vmerge(vbool32_t op0, vint32m1_t op1, int32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i32m2))) +vint32m2_t vmerge(vbool16_t op0, vint32m2_t op1, int32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i32m4))) +vint32m4_t vmerge(vbool8_t op0, vint32m4_t op1, int32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i32m8))) +vint32m8_t vmerge(vbool4_t op0, vint32m8_t op1, int32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i32mf2))) +vint32mf2_t vmerge(vbool64_t op0, vint32mf2_t op1, int32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i64m1))) +vint64m1_t vmerge(vbool64_t op0, vint64m1_t op1, int64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i64m2))) +vint64m2_t vmerge(vbool32_t op0, vint64m2_t op1, int64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i64m4))) +vint64m4_t vmerge(vbool16_t op0, vint64m4_t op1, int64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_i64m8))) +vint64m8_t vmerge(vbool8_t op0, vint64m8_t op1, int64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m1))) +void vsuxei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m1_m))) +void vsuxei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m2))) +void vsuxei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m2_m))) +void vsuxei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m4))) +void vsuxei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m4_m))) +void vsuxei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m8))) +void vsuxei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32m8_m))) +void vsuxei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32mf2))) +void vsuxei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i32mf2_m))) +void vsuxei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8m1))) +vuint8m1_t vmerge(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8m2))) +vuint8m2_t vmerge(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8m4))) +vuint8m4_t vmerge(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8m8))) +vuint8m8_t vmerge(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8mf2))) +vuint8mf2_t vmerge(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8mf4))) +vuint8mf4_t vmerge(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u8mf8))) +vuint8mf8_t vmerge(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u16m1))) +vuint16m1_t vmerge(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u16m2))) +vuint16m2_t vmerge(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u16m4))) +vuint16m4_t vmerge(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u16m8))) +vuint16m8_t vmerge(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u16mf2))) +vuint16mf2_t vmerge(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u16mf4))) +vuint16mf4_t vmerge(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u32m1))) +vuint32m1_t vmerge(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u32m2))) +vuint32m2_t vmerge(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u32m4))) +vuint32m4_t vmerge(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u32m8))) +vuint32m8_t vmerge(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u32mf2))) +vuint32mf2_t vmerge(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u64m1))) +vuint64m1_t vmerge(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u64m2))) +vuint64m2_t vmerge(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u64m4))) +vuint64m4_t vmerge(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_u64m8))) +vuint64m8_t vmerge(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8m1))) +vuint8m1_t vmerge(vbool8_t op0, vuint8m1_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8m2))) +vuint8m2_t vmerge(vbool4_t op0, vuint8m2_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8m4))) +vuint8m4_t vmerge(vbool2_t op0, vuint8m4_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8m8))) +vuint8m8_t vmerge(vbool1_t op0, vuint8m8_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8mf2))) +vuint8mf2_t vmerge(vbool16_t op0, vuint8mf2_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8mf4))) +vuint8mf4_t vmerge(vbool32_t op0, vuint8mf4_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u8mf8))) +vuint8mf8_t vmerge(vbool64_t op0, vuint8mf8_t op1, uint8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u16m1))) +vuint16m1_t vmerge(vbool16_t op0, vuint16m1_t op1, uint16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u16m2))) +vuint16m2_t vmerge(vbool8_t op0, vuint16m2_t op1, uint16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u16m4))) +vuint16m4_t vmerge(vbool4_t op0, vuint16m4_t op1, uint16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u16m8))) +vuint16m8_t vmerge(vbool2_t op0, vuint16m8_t op1, uint16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u16mf2))) +vuint16mf2_t vmerge(vbool32_t op0, vuint16mf2_t op1, uint16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u16mf4))) +vuint16mf4_t vmerge(vbool64_t op0, vuint16mf4_t op1, uint16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u32m1))) +vuint32m1_t vmerge(vbool32_t op0, vuint32m1_t op1, uint32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u32m2))) +vuint32m2_t vmerge(vbool16_t op0, vuint32m2_t op1, uint32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u32m4))) +vuint32m4_t vmerge(vbool8_t op0, vuint32m4_t op1, uint32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u32m8))) +vuint32m8_t vmerge(vbool4_t op0, vuint32m8_t op1, uint32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u32mf2))) +vuint32mf2_t vmerge(vbool64_t op0, vuint32mf2_t op1, uint32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u64m1))) +vuint64m1_t vmerge(vbool64_t op0, vuint64m1_t op1, uint64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u64m2))) +vuint64m2_t vmerge(vbool32_t op0, vuint64m2_t op1, uint64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u64m4))) +vuint64m4_t vmerge(vbool16_t op0, vuint64m4_t op1, uint64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vxm_u64m8))) +vuint64m8_t vmerge(vbool8_t op0, vuint64m8_t op1, uint64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8m1))) +vuint8m1_t vmv_v(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8m2))) +vuint8m2_t vmv_v(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8m4))) +vuint8m4_t vmv_v(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8m8))) +vuint8m8_t vmv_v(vuint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8mf2))) +vuint8mf2_t vmv_v(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8mf4))) +vuint8mf4_t vmv_v(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u8mf8))) +vuint8mf8_t vmv_v(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u16m1))) +vuint16m1_t vmv_v(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u16m2))) +vuint16m2_t vmv_v(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u16m4))) +vuint16m4_t vmv_v(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u16m8))) +vuint16m8_t vmv_v(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u16mf2))) +vuint16mf2_t vmv_v(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u16mf4))) +vuint16mf4_t vmv_v(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u32m1))) +vuint32m1_t vmv_v(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u32m2))) +vuint32m2_t vmv_v(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u32m4))) +vuint32m4_t vmv_v(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u32m8))) +vuint32m8_t vmv_v(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u32mf2))) +vuint32mf2_t vmv_v(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u64m1))) +vuint64m1_t vmv_v(vuint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u64m2))) +vuint64m2_t vmv_v(vuint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u64m4))) +vuint64m4_t vmv_v(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_u64m8))) +vuint64m8_t vmv_v(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8m1))) +vint8m1_t vmv_v(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8m2))) +vint8m2_t vmv_v(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8m4))) +vint8m4_t vmv_v(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8m8))) +vint8m8_t vmv_v(vint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8mf2))) +vint8mf2_t vmv_v(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8mf4))) +vint8mf4_t vmv_v(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i8mf8))) +vint8mf8_t vmv_v(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i16m1))) +vint16m1_t vmv_v(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i16m2))) +vint16m2_t vmv_v(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i16m4))) +vint16m4_t vmv_v(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i16m8))) +vint16m8_t vmv_v(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i16mf2))) +vint16mf2_t vmv_v(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i16mf4))) +vint16mf4_t vmv_v(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i32m1))) +vint32m1_t vmv_v(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i32m2))) +vint32m2_t vmv_v(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i32m4))) +vint32m4_t vmv_v(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i32m8))) +vint32m8_t vmv_v(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i32mf2))) +vint32mf2_t vmv_v(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i64m1))) +vint64m1_t vmv_v(vint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i64m2))) +vint64m2_t vmv_v(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i64m4))) +vint64m4_t vmv_v(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_i64m8))) +vint64m8_t vmv_v(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m1))) +vuint8m1_t vsaddu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m1_m))) +vuint8m1_t vsaddu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m2))) +vuint8m2_t vsaddu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m2_m))) +vuint8m2_t vsaddu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m4))) +vuint8m4_t vsaddu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m4_m))) +vuint8m4_t vsaddu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m8))) +vuint8m8_t vsaddu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8m8_m))) +vuint8m8_t vsaddu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8mf2))) +vuint8mf2_t vsaddu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8mf2_m))) +vuint8mf2_t vsaddu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8mf4))) +vuint8mf4_t vsaddu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8mf4_m))) +vuint8mf4_t vsaddu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8mf8))) +vuint8mf8_t vsaddu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u8mf8_m))) +vuint8mf8_t vsaddu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m1))) +vuint16m1_t vsaddu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m1_m))) +vuint16m1_t vsaddu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m2))) +vuint16m2_t vsaddu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m2_m))) +vuint16m2_t vsaddu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m4))) +vuint16m4_t vsaddu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m4_m))) +vuint16m4_t vsaddu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m8))) +vuint16m8_t vsaddu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16m8_m))) +vuint16m8_t vsaddu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16mf2))) +vuint16mf2_t vsaddu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16mf2_m))) +vuint16mf2_t vsaddu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16mf4))) +vuint16mf4_t vsaddu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u16mf4_m))) +vuint16mf4_t vsaddu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m1))) +vuint32m1_t vsaddu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m1_m))) +vuint32m1_t vsaddu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m2))) +vuint32m2_t vsaddu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m2_m))) +vuint32m2_t vsaddu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m4))) +vuint32m4_t vsaddu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m4_m))) +vuint32m4_t vsaddu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m8))) +vuint32m8_t vsaddu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32m8_m))) +vuint32m8_t vsaddu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32mf2))) +vuint32mf2_t vsaddu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u32mf2_m))) +vuint32mf2_t vsaddu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m1))) +vuint64m1_t vsaddu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m1_m))) +vuint64m1_t vsaddu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m2))) +vuint64m2_t vsaddu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m2_m))) +vuint64m2_t vsaddu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m4))) +vuint64m4_t vsaddu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m4_m))) +vuint64m4_t vsaddu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m8))) +vuint64m8_t vsaddu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vv_u64m8_m))) +vuint64m8_t vsaddu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m1))) +vuint8m1_t vsaddu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m1_m))) +vuint8m1_t vsaddu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m2))) +vuint8m2_t vsaddu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m2_m))) +vuint8m2_t vsaddu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m4))) +vuint8m4_t vsaddu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m4_m))) +vuint8m4_t vsaddu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m8))) +vuint8m8_t vsaddu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8m8_m))) +vuint8m8_t vsaddu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8mf2))) +vuint8mf2_t vsaddu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8mf2_m))) +vuint8mf2_t vsaddu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8mf4))) +vuint8mf4_t vsaddu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8mf4_m))) +vuint8mf4_t vsaddu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8mf8))) +vuint8mf8_t vsaddu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u8mf8_m))) +vuint8mf8_t vsaddu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m1))) +vuint16m1_t vsaddu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m1_m))) +vuint16m1_t vsaddu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m2))) +vuint16m2_t vsaddu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m2_m))) +vuint16m2_t vsaddu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m4))) +vuint16m4_t vsaddu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m4_m))) +vuint16m4_t vsaddu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m8))) +vuint16m8_t vsaddu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16m8_m))) +vuint16m8_t vsaddu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16mf2))) +vuint16mf2_t vsaddu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16mf2_m))) +vuint16mf2_t vsaddu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16mf4))) +vuint16mf4_t vsaddu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u16mf4_m))) +vuint16mf4_t vsaddu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m1))) +vuint32m1_t vsaddu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m1_m))) +vuint32m1_t vsaddu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m2))) +vuint32m2_t vsaddu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m2_m))) +vuint32m2_t vsaddu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m4))) +vuint32m4_t vsaddu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m4_m))) +vuint32m4_t vsaddu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m8))) +vuint32m8_t vsaddu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32m8_m))) +vuint32m8_t vsaddu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32mf2))) +vuint32mf2_t vsaddu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u32mf2_m))) +vuint32mf2_t vsaddu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m1))) +vuint64m1_t vsaddu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m1_m))) +vuint64m1_t vsaddu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m2))) +vuint64m2_t vsaddu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m2_m))) +vuint64m2_t vsaddu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m4))) +vuint64m4_t vsaddu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m4_m))) +vuint64m4_t vsaddu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m8))) +vuint64m8_t vsaddu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsaddu_vx_u64m8_m))) +vuint64m8_t vsaddu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m1))) +vint8m1_t vsadd(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m1_m))) +vint8m1_t vsadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m2))) +vint8m2_t vsadd(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m2_m))) +vint8m2_t vsadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m4))) +vint8m4_t vsadd(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m4_m))) +vint8m4_t vsadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m8))) +vint8m8_t vsadd(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8m8_m))) +vint8m8_t vsadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8mf2))) +vint8mf2_t vsadd(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8mf2_m))) +vint8mf2_t vsadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8mf4))) +vint8mf4_t vsadd(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8mf4_m))) +vint8mf4_t vsadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8mf8))) +vint8mf8_t vsadd(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i8mf8_m))) +vint8mf8_t vsadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m1))) +vint16m1_t vsadd(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m1_m))) +vint16m1_t vsadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m2))) +vint16m2_t vsadd(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m2_m))) +vint16m2_t vsadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m4))) +vint16m4_t vsadd(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m4_m))) +vint16m4_t vsadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m8))) +vint16m8_t vsadd(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16m8_m))) +vint16m8_t vsadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16mf2))) +vint16mf2_t vsadd(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16mf2_m))) +vint16mf2_t vsadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16mf4))) +vint16mf4_t vsadd(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i16mf4_m))) +vint16mf4_t vsadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m1))) +vint32m1_t vsadd(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m1_m))) +vint32m1_t vsadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m2))) +vint32m2_t vsadd(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m2_m))) +vint32m2_t vsadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m4))) +vint32m4_t vsadd(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m4_m))) +vint32m4_t vsadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m8))) +vint32m8_t vsadd(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32m8_m))) +vint32m8_t vsadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32mf2))) +vint32mf2_t vsadd(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i32mf2_m))) +vint32mf2_t vsadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m1))) +vint64m1_t vsadd(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m1_m))) +vint64m1_t vsadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m2))) +vint64m2_t vsadd(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m2_m))) +vint64m2_t vsadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m4))) +vint64m4_t vsadd(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m4_m))) +vint64m4_t vsadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m8))) +vint64m8_t vsadd(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vv_i64m8_m))) +vint64m8_t vsadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m1))) +vint8m1_t vsadd(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m1_m))) +vint8m1_t vsadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m2))) +vint8m2_t vsadd(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m2_m))) +vint8m2_t vsadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m4))) +vint8m4_t vsadd(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m4_m))) +vint8m4_t vsadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m8))) +vint8m8_t vsadd(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8m8_m))) +vint8m8_t vsadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8mf2))) +vint8mf2_t vsadd(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8mf2_m))) +vint8mf2_t vsadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8mf4))) +vint8mf4_t vsadd(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8mf4_m))) +vint8mf4_t vsadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8mf8))) +vint8mf8_t vsadd(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i8mf8_m))) +vint8mf8_t vsadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m1))) +vint16m1_t vsadd(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m1_m))) +vint16m1_t vsadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m2))) +vint16m2_t vsadd(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m2_m))) +vint16m2_t vsadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m4))) +vint16m4_t vsadd(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m4_m))) +vint16m4_t vsadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m8))) +vint16m8_t vsadd(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16m8_m))) +vint16m8_t vsadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16mf2))) +vint16mf2_t vsadd(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16mf2_m))) +vint16mf2_t vsadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16mf4))) +vint16mf4_t vsadd(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i16mf4_m))) +vint16mf4_t vsadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m1))) +vint32m1_t vsadd(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m1_m))) +vint32m1_t vsadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m2))) +vint32m2_t vsadd(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m2_m))) +vint32m2_t vsadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m4))) +vint32m4_t vsadd(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m4_m))) +vint32m4_t vsadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m8))) +vint32m8_t vsadd(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32m8_m))) +vint32m8_t vsadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32mf2))) +vint32mf2_t vsadd(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i32mf2_m))) +vint32mf2_t vsadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m1))) +vint64m1_t vsadd(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m1_m))) +vint64m1_t vsadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m2))) +vint64m2_t vsadd(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m2_m))) +vint64m2_t vsadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m4))) +vint64m4_t vsadd(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m4_m))) +vint64m4_t vsadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m8))) +vint64m8_t vsadd(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsadd_vx_i64m8_m))) +vint64m8_t vsadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m1))) +void vsuxei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m1_m))) +void vsuxei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m2))) +void vsuxei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m2_m))) +void vsuxei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m4))) +void vsuxei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m4_m))) +void vsuxei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m8))) +void vsuxei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32m8_m))) +void vsuxei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32mf2))) +void vsuxei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u32mf2_m))) +void vsuxei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m1))) +vuint8m1_t vssubu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m1_m))) +vuint8m1_t vssubu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m2))) +vuint8m2_t vssubu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m2_m))) +vuint8m2_t vssubu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m4))) +vuint8m4_t vssubu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m4_m))) +vuint8m4_t vssubu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m8))) +vuint8m8_t vssubu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8m8_m))) +vuint8m8_t vssubu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8mf2))) +vuint8mf2_t vssubu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8mf2_m))) +vuint8mf2_t vssubu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8mf4))) +vuint8mf4_t vssubu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8mf4_m))) +vuint8mf4_t vssubu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8mf8))) +vuint8mf8_t vssubu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u8mf8_m))) +vuint8mf8_t vssubu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m1))) +vuint16m1_t vssubu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m1_m))) +vuint16m1_t vssubu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m2))) +vuint16m2_t vssubu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m2_m))) +vuint16m2_t vssubu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m4))) +vuint16m4_t vssubu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m4_m))) +vuint16m4_t vssubu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m8))) +vuint16m8_t vssubu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16m8_m))) +vuint16m8_t vssubu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16mf2))) +vuint16mf2_t vssubu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16mf2_m))) +vuint16mf2_t vssubu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16mf4))) +vuint16mf4_t vssubu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u16mf4_m))) +vuint16mf4_t vssubu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m1))) +vuint32m1_t vssubu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m1_m))) +vuint32m1_t vssubu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m2))) +vuint32m2_t vssubu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m2_m))) +vuint32m2_t vssubu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m4))) +vuint32m4_t vssubu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m4_m))) +vuint32m4_t vssubu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m8))) +vuint32m8_t vssubu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32m8_m))) +vuint32m8_t vssubu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32mf2))) +vuint32mf2_t vssubu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u32mf2_m))) +vuint32mf2_t vssubu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m1))) +vuint64m1_t vssubu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m1_m))) +vuint64m1_t vssubu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m2))) +vuint64m2_t vssubu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m2_m))) +vuint64m2_t vssubu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m4))) +vuint64m4_t vssubu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m4_m))) +vuint64m4_t vssubu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m8))) +vuint64m8_t vssubu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vv_u64m8_m))) +vuint64m8_t vssubu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m1))) +vuint8m1_t vssubu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m1_m))) +vuint8m1_t vssubu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m2))) +vuint8m2_t vssubu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m2_m))) +vuint8m2_t vssubu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m4))) +vuint8m4_t vssubu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m4_m))) +vuint8m4_t vssubu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m8))) +vuint8m8_t vssubu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8m8_m))) +vuint8m8_t vssubu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8mf2))) +vuint8mf2_t vssubu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8mf2_m))) +vuint8mf2_t vssubu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8mf4))) +vuint8mf4_t vssubu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8mf4_m))) +vuint8mf4_t vssubu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8mf8))) +vuint8mf8_t vssubu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u8mf8_m))) +vuint8mf8_t vssubu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m1))) +vuint16m1_t vssubu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m1_m))) +vuint16m1_t vssubu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m2))) +vuint16m2_t vssubu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m2_m))) +vuint16m2_t vssubu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m4))) +vuint16m4_t vssubu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m4_m))) +vuint16m4_t vssubu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m8))) +vuint16m8_t vssubu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16m8_m))) +vuint16m8_t vssubu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16mf2))) +vuint16mf2_t vssubu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16mf2_m))) +vuint16mf2_t vssubu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16mf4))) +vuint16mf4_t vssubu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u16mf4_m))) +vuint16mf4_t vssubu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m1))) +vuint32m1_t vssubu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m1_m))) +vuint32m1_t vssubu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m2))) +vuint32m2_t vssubu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m2_m))) +vuint32m2_t vssubu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m4))) +vuint32m4_t vssubu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m4_m))) +vuint32m4_t vssubu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m8))) +vuint32m8_t vssubu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32m8_m))) +vuint32m8_t vssubu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32mf2))) +vuint32mf2_t vssubu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u32mf2_m))) +vuint32mf2_t vssubu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m1))) +vuint64m1_t vssubu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m1_m))) +vuint64m1_t vssubu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m2))) +vuint64m2_t vssubu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m2_m))) +vuint64m2_t vssubu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m4))) +vuint64m4_t vssubu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m4_m))) +vuint64m4_t vssubu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m8))) +vuint64m8_t vssubu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssubu_vx_u64m8_m))) +vuint64m8_t vssubu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m1))) +vint8m1_t vssub(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m1_m))) +vint8m1_t vssub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m2))) +vint8m2_t vssub(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m2_m))) +vint8m2_t vssub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m4))) +vint8m4_t vssub(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m4_m))) +vint8m4_t vssub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m8))) +vint8m8_t vssub(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8m8_m))) +vint8m8_t vssub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8mf2))) +vint8mf2_t vssub(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8mf2_m))) +vint8mf2_t vssub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8mf4))) +vint8mf4_t vssub(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8mf4_m))) +vint8mf4_t vssub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8mf8))) +vint8mf8_t vssub(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i8mf8_m))) +vint8mf8_t vssub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m1))) +vint16m1_t vssub(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m1_m))) +vint16m1_t vssub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m2))) +vint16m2_t vssub(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m2_m))) +vint16m2_t vssub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m4))) +vint16m4_t vssub(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m4_m))) +vint16m4_t vssub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m8))) +vint16m8_t vssub(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16m8_m))) +vint16m8_t vssub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16mf2))) +vint16mf2_t vssub(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16mf2_m))) +vint16mf2_t vssub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16mf4))) +vint16mf4_t vssub(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i16mf4_m))) +vint16mf4_t vssub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m1))) +vint32m1_t vssub(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m1_m))) +vint32m1_t vssub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m2))) +vint32m2_t vssub(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m2_m))) +vint32m2_t vssub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m4))) +vint32m4_t vssub(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m4_m))) +vint32m4_t vssub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m8))) +vint32m8_t vssub(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32m8_m))) +vint32m8_t vssub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32mf2))) +vint32mf2_t vssub(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i32mf2_m))) +vint32mf2_t vssub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m1))) +vint64m1_t vssub(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m1_m))) +vint64m1_t vssub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m2))) +vint64m2_t vssub(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m2_m))) +vint64m2_t vssub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m4))) +vint64m4_t vssub(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m4_m))) +vint64m4_t vssub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m8))) +vint64m8_t vssub(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vv_i64m8_m))) +vint64m8_t vssub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m1))) +vint8m1_t vssub(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m1_m))) +vint8m1_t vssub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m2))) +vint8m2_t vssub(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m2_m))) +vint8m2_t vssub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m4))) +vint8m4_t vssub(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m4_m))) +vint8m4_t vssub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m8))) +vint8m8_t vssub(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8m8_m))) +vint8m8_t vssub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8mf2))) +vint8mf2_t vssub(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8mf2_m))) +vint8mf2_t vssub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8mf4))) +vint8mf4_t vssub(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8mf4_m))) +vint8mf4_t vssub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8mf8))) +vint8mf8_t vssub(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i8mf8_m))) +vint8mf8_t vssub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m1))) +vint16m1_t vssub(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m1_m))) +vint16m1_t vssub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m2))) +vint16m2_t vssub(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m2_m))) +vint16m2_t vssub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m4))) +vint16m4_t vssub(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m4_m))) +vint16m4_t vssub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m8))) +vint16m8_t vssub(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16m8_m))) +vint16m8_t vssub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16mf2))) +vint16mf2_t vssub(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16mf2_m))) +vint16mf2_t vssub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16mf4))) +vint16mf4_t vssub(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i16mf4_m))) +vint16mf4_t vssub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m1))) +vint32m1_t vssub(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m1_m))) +vint32m1_t vssub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m2))) +vint32m2_t vssub(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m2_m))) +vint32m2_t vssub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m4))) +vint32m4_t vssub(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m4_m))) +vint32m4_t vssub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m8))) +vint32m8_t vssub(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32m8_m))) +vint32m8_t vssub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32mf2))) +vint32mf2_t vssub(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i32mf2_m))) +vint32mf2_t vssub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m1))) +vint64m1_t vssub(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m1_m))) +vint64m1_t vssub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m2))) +vint64m2_t vssub(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m2_m))) +vint64m2_t vssub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m4))) +vint64m4_t vssub(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m4_m))) +vint64m4_t vssub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m8))) +vint64m8_t vssub(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssub_vx_i64m8_m))) +vint64m8_t vssub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m1))) +vuint8m1_t vaaddu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m1_m))) +vuint8m1_t vaaddu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m2))) +vuint8m2_t vaaddu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m2_m))) +vuint8m2_t vaaddu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m4))) +vuint8m4_t vaaddu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m4_m))) +vuint8m4_t vaaddu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m8))) +vuint8m8_t vaaddu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8m8_m))) +vuint8m8_t vaaddu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8mf2))) +vuint8mf2_t vaaddu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8mf2_m))) +vuint8mf2_t vaaddu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8mf4))) +vuint8mf4_t vaaddu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8mf4_m))) +vuint8mf4_t vaaddu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8mf8))) +vuint8mf8_t vaaddu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u8mf8_m))) +vuint8mf8_t vaaddu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m1))) +vuint16m1_t vaaddu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m1_m))) +vuint16m1_t vaaddu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m2))) +vuint16m2_t vaaddu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m2_m))) +vuint16m2_t vaaddu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m4))) +vuint16m4_t vaaddu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m4_m))) +vuint16m4_t vaaddu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m8))) +vuint16m8_t vaaddu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16m8_m))) +vuint16m8_t vaaddu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16mf2))) +vuint16mf2_t vaaddu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16mf2_m))) +vuint16mf2_t vaaddu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16mf4))) +vuint16mf4_t vaaddu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u16mf4_m))) +vuint16mf4_t vaaddu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m1))) +vuint32m1_t vaaddu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m1_m))) +vuint32m1_t vaaddu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m2))) +vuint32m2_t vaaddu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m2_m))) +vuint32m2_t vaaddu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m4))) +vuint32m4_t vaaddu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m4_m))) +vuint32m4_t vaaddu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m8))) +vuint32m8_t vaaddu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32m8_m))) +vuint32m8_t vaaddu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32mf2))) +vuint32mf2_t vaaddu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u32mf2_m))) +vuint32mf2_t vaaddu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m1))) +vuint64m1_t vaaddu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m1_m))) +vuint64m1_t vaaddu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m2))) +vuint64m2_t vaaddu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m2_m))) +vuint64m2_t vaaddu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m4))) +vuint64m4_t vaaddu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m4_m))) +vuint64m4_t vaaddu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m8))) +vuint64m8_t vaaddu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vv_u64m8_m))) +vuint64m8_t vaaddu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m1))) +vuint8m1_t vaaddu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m1_m))) +vuint8m1_t vaaddu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m2))) +vuint8m2_t vaaddu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m2_m))) +vuint8m2_t vaaddu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m4))) +vuint8m4_t vaaddu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m4_m))) +vuint8m4_t vaaddu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m8))) +vuint8m8_t vaaddu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8m8_m))) +vuint8m8_t vaaddu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8mf2))) +vuint8mf2_t vaaddu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8mf2_m))) +vuint8mf2_t vaaddu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8mf4))) +vuint8mf4_t vaaddu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8mf4_m))) +vuint8mf4_t vaaddu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8mf8))) +vuint8mf8_t vaaddu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u8mf8_m))) +vuint8mf8_t vaaddu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m1))) +vuint16m1_t vaaddu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m1_m))) +vuint16m1_t vaaddu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m2))) +vuint16m2_t vaaddu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m2_m))) +vuint16m2_t vaaddu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m4))) +vuint16m4_t vaaddu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m4_m))) +vuint16m4_t vaaddu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m8))) +vuint16m8_t vaaddu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16m8_m))) +vuint16m8_t vaaddu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16mf2))) +vuint16mf2_t vaaddu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16mf2_m))) +vuint16mf2_t vaaddu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16mf4))) +vuint16mf4_t vaaddu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u16mf4_m))) +vuint16mf4_t vaaddu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m1))) +vuint32m1_t vaaddu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m1_m))) +vuint32m1_t vaaddu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m2))) +vuint32m2_t vaaddu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m2_m))) +vuint32m2_t vaaddu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m4))) +vuint32m4_t vaaddu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m4_m))) +vuint32m4_t vaaddu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m8))) +vuint32m8_t vaaddu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32m8_m))) +vuint32m8_t vaaddu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32mf2))) +vuint32mf2_t vaaddu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u32mf2_m))) +vuint32mf2_t vaaddu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m1))) +vuint64m1_t vaaddu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m1_m))) +vuint64m1_t vaaddu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m2))) +vuint64m2_t vaaddu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m2_m))) +vuint64m2_t vaaddu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m4))) +vuint64m4_t vaaddu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m4_m))) +vuint64m4_t vaaddu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m8))) +vuint64m8_t vaaddu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaaddu_vx_u64m8_m))) +vuint64m8_t vaaddu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m1))) +vint8m1_t vaadd(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m1_m))) +vint8m1_t vaadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m2))) +vint8m2_t vaadd(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m2_m))) +vint8m2_t vaadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m4))) +vint8m4_t vaadd(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m4_m))) +vint8m4_t vaadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m8))) +vint8m8_t vaadd(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8m8_m))) +vint8m8_t vaadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8mf2))) +vint8mf2_t vaadd(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8mf2_m))) +vint8mf2_t vaadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8mf4))) +vint8mf4_t vaadd(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8mf4_m))) +vint8mf4_t vaadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8mf8))) +vint8mf8_t vaadd(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i8mf8_m))) +vint8mf8_t vaadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m1))) +vint16m1_t vaadd(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m1_m))) +vint16m1_t vaadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m2))) +vint16m2_t vaadd(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m2_m))) +vint16m2_t vaadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m4))) +vint16m4_t vaadd(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m4_m))) +vint16m4_t vaadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m8))) +vint16m8_t vaadd(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16m8_m))) +vint16m8_t vaadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16mf2))) +vint16mf2_t vaadd(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16mf2_m))) +vint16mf2_t vaadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16mf4))) +vint16mf4_t vaadd(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i16mf4_m))) +vint16mf4_t vaadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m1))) +vint32m1_t vaadd(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m1_m))) +vint32m1_t vaadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m2))) +vint32m2_t vaadd(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m2_m))) +vint32m2_t vaadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m4))) +vint32m4_t vaadd(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m4_m))) +vint32m4_t vaadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m8))) +vint32m8_t vaadd(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32m8_m))) +vint32m8_t vaadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32mf2))) +vint32mf2_t vaadd(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i32mf2_m))) +vint32mf2_t vaadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m1))) +vint64m1_t vaadd(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m1_m))) +vint64m1_t vaadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m2))) +vint64m2_t vaadd(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m2_m))) +vint64m2_t vaadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m4))) +vint64m4_t vaadd(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m4_m))) +vint64m4_t vaadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m8))) +vint64m8_t vaadd(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vv_i64m8_m))) +vint64m8_t vaadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m1))) +vint8m1_t vaadd(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m1_m))) +vint8m1_t vaadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m2))) +vint8m2_t vaadd(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m2_m))) +vint8m2_t vaadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m4))) +vint8m4_t vaadd(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m4_m))) +vint8m4_t vaadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m8))) +vint8m8_t vaadd(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8m8_m))) +vint8m8_t vaadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8mf2))) +vint8mf2_t vaadd(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8mf2_m))) +vint8mf2_t vaadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8mf4))) +vint8mf4_t vaadd(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8mf4_m))) +vint8mf4_t vaadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8mf8))) +vint8mf8_t vaadd(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i8mf8_m))) +vint8mf8_t vaadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m1))) +vint16m1_t vaadd(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m1_m))) +vint16m1_t vaadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m2))) +vint16m2_t vaadd(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m2_m))) +vint16m2_t vaadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m4))) +vint16m4_t vaadd(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m4_m))) +vint16m4_t vaadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m8))) +vint16m8_t vaadd(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16m8_m))) +vint16m8_t vaadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16mf2))) +vint16mf2_t vaadd(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16mf2_m))) +vint16mf2_t vaadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16mf4))) +vint16mf4_t vaadd(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i16mf4_m))) +vint16mf4_t vaadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m1))) +vint32m1_t vaadd(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m1_m))) +vint32m1_t vaadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m2))) +vint32m2_t vaadd(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m2_m))) +vint32m2_t vaadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m4))) +vint32m4_t vaadd(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m4_m))) +vint32m4_t vaadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m8))) +vint32m8_t vaadd(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32m8_m))) +vint32m8_t vaadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32mf2))) +vint32mf2_t vaadd(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i32mf2_m))) +vint32mf2_t vaadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m1))) +vint64m1_t vaadd(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m1_m))) +vint64m1_t vaadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m2))) +vint64m2_t vaadd(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m2_m))) +vint64m2_t vaadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m4))) +vint64m4_t vaadd(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m4_m))) +vint64m4_t vaadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m8))) +vint64m8_t vaadd(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vaadd_vx_i64m8_m))) +vint64m8_t vaadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m1))) +vuint8m1_t vasubu(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m1_m))) +vuint8m1_t vasubu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m2))) +vuint8m2_t vasubu(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m2_m))) +vuint8m2_t vasubu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m4))) +vuint8m4_t vasubu(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m4_m))) +vuint8m4_t vasubu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m8))) +vuint8m8_t vasubu(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8m8_m))) +vuint8m8_t vasubu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8mf2))) +vuint8mf2_t vasubu(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8mf2_m))) +vuint8mf2_t vasubu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8mf4))) +vuint8mf4_t vasubu(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8mf4_m))) +vuint8mf4_t vasubu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8mf8))) +vuint8mf8_t vasubu(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u8mf8_m))) +vuint8mf8_t vasubu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m1))) +vuint16m1_t vasubu(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m1_m))) +vuint16m1_t vasubu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m2))) +vuint16m2_t vasubu(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m2_m))) +vuint16m2_t vasubu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m4))) +vuint16m4_t vasubu(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m4_m))) +vuint16m4_t vasubu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m8))) +vuint16m8_t vasubu(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16m8_m))) +vuint16m8_t vasubu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16mf2))) +vuint16mf2_t vasubu(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16mf2_m))) +vuint16mf2_t vasubu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16mf4))) +vuint16mf4_t vasubu(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u16mf4_m))) +vuint16mf4_t vasubu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m1))) +vuint32m1_t vasubu(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m1_m))) +vuint32m1_t vasubu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m2))) +vuint32m2_t vasubu(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m2_m))) +vuint32m2_t vasubu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m4))) +vuint32m4_t vasubu(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m4_m))) +vuint32m4_t vasubu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m8))) +vuint32m8_t vasubu(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32m8_m))) +vuint32m8_t vasubu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32mf2))) +vuint32mf2_t vasubu(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u32mf2_m))) +vuint32mf2_t vasubu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m1))) +vuint64m1_t vasubu(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m1_m))) +vuint64m1_t vasubu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m2))) +vuint64m2_t vasubu(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m2_m))) +vuint64m2_t vasubu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m4))) +vuint64m4_t vasubu(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m4_m))) +vuint64m4_t vasubu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m8))) +vuint64m8_t vasubu(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vv_u64m8_m))) +vuint64m8_t vasubu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m1))) +vuint8m1_t vasubu(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m1_m))) +vuint8m1_t vasubu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m2))) +vuint8m2_t vasubu(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m2_m))) +vuint8m2_t vasubu(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m4))) +vuint8m4_t vasubu(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m4_m))) +vuint8m4_t vasubu(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m8))) +vuint8m8_t vasubu(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8m8_m))) +vuint8m8_t vasubu(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8mf2))) +vuint8mf2_t vasubu(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8mf2_m))) +vuint8mf2_t vasubu(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8mf4))) +vuint8mf4_t vasubu(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8mf4_m))) +vuint8mf4_t vasubu(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8mf8))) +vuint8mf8_t vasubu(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u8mf8_m))) +vuint8mf8_t vasubu(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m1))) +vuint16m1_t vasubu(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m1_m))) +vuint16m1_t vasubu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m2))) +vuint16m2_t vasubu(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m2_m))) +vuint16m2_t vasubu(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m4))) +vuint16m4_t vasubu(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m4_m))) +vuint16m4_t vasubu(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m8))) +vuint16m8_t vasubu(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16m8_m))) +vuint16m8_t vasubu(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16mf2))) +vuint16mf2_t vasubu(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16mf2_m))) +vuint16mf2_t vasubu(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16mf4))) +vuint16mf4_t vasubu(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u16mf4_m))) +vuint16mf4_t vasubu(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m1))) +vuint32m1_t vasubu(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m1_m))) +vuint32m1_t vasubu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m2))) +vuint32m2_t vasubu(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m2_m))) +vuint32m2_t vasubu(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m4))) +vuint32m4_t vasubu(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m4_m))) +vuint32m4_t vasubu(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m8))) +vuint32m8_t vasubu(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32m8_m))) +vuint32m8_t vasubu(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32mf2))) +vuint32mf2_t vasubu(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u32mf2_m))) +vuint32mf2_t vasubu(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m1))) +vuint64m1_t vasubu(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m1_m))) +vuint64m1_t vasubu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m2))) +vuint64m2_t vasubu(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m2_m))) +vuint64m2_t vasubu(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m4))) +vuint64m4_t vasubu(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m4_m))) +vuint64m4_t vasubu(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m8))) +vuint64m8_t vasubu(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasubu_vx_u64m8_m))) +vuint64m8_t vasubu(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m1))) +void vsuxei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m1_m))) +void vsuxei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m2))) +void vsuxei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m2_m))) +void vsuxei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m4))) +void vsuxei8(int8_t * op0, vuint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m4_m))) +void vsuxei8(vbool2_t op0, int8_t * op1, vuint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m8))) +void vsuxei8(int8_t * op0, vuint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8m8_m))) +void vsuxei8(vbool1_t op0, int8_t * op1, vuint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8mf2))) +void vsuxei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8mf2_m))) +void vsuxei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8mf4))) +void vsuxei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8mf4_m))) +void vsuxei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8mf8))) +void vsuxei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i8mf8_m))) +void vsuxei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32m1))) +void vsuxei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32m1_m))) +void vsuxei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32m2))) +void vsuxei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32m2_m))) +void vsuxei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32m4))) +void vsuxei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32m4_m))) +void vsuxei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32mf2))) +void vsuxei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i32mf2_m))) +void vsuxei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m1))) +vint8m1_t vasub(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m1_m))) +vint8m1_t vasub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m2))) +vint8m2_t vasub(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m2_m))) +vint8m2_t vasub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m4))) +vint8m4_t vasub(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m4_m))) +vint8m4_t vasub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m8))) +vint8m8_t vasub(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8m8_m))) +vint8m8_t vasub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8mf2))) +vint8mf2_t vasub(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8mf2_m))) +vint8mf2_t vasub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8mf4))) +vint8mf4_t vasub(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8mf4_m))) +vint8mf4_t vasub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8mf8))) +vint8mf8_t vasub(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i8mf8_m))) +vint8mf8_t vasub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m1))) +vint16m1_t vasub(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m1_m))) +vint16m1_t vasub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m2))) +vint16m2_t vasub(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m2_m))) +vint16m2_t vasub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m4))) +vint16m4_t vasub(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m4_m))) +vint16m4_t vasub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m8))) +vint16m8_t vasub(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16m8_m))) +vint16m8_t vasub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16mf2))) +vint16mf2_t vasub(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16mf2_m))) +vint16mf2_t vasub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16mf4))) +vint16mf4_t vasub(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i16mf4_m))) +vint16mf4_t vasub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m1))) +vint32m1_t vasub(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m1_m))) +vint32m1_t vasub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m2))) +vint32m2_t vasub(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m2_m))) +vint32m2_t vasub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m4))) +vint32m4_t vasub(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m4_m))) +vint32m4_t vasub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m8))) +vint32m8_t vasub(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32m8_m))) +vint32m8_t vasub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32mf2))) +vint32mf2_t vasub(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i32mf2_m))) +vint32mf2_t vasub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m1))) +vint64m1_t vasub(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m1_m))) +vint64m1_t vasub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m2))) +vint64m2_t vasub(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m2_m))) +vint64m2_t vasub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m4))) +vint64m4_t vasub(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m4_m))) +vint64m4_t vasub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m8))) +vint64m8_t vasub(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vv_i64m8_m))) +vint64m8_t vasub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m1))) +vint8m1_t vasub(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m1_m))) +vint8m1_t vasub(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m2))) +vint8m2_t vasub(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m2_m))) +vint8m2_t vasub(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m4))) +vint8m4_t vasub(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m4_m))) +vint8m4_t vasub(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m8))) +vint8m8_t vasub(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8m8_m))) +vint8m8_t vasub(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8mf2))) +vint8mf2_t vasub(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8mf2_m))) +vint8mf2_t vasub(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8mf4))) +vint8mf4_t vasub(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8mf4_m))) +vint8mf4_t vasub(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8mf8))) +vint8mf8_t vasub(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i8mf8_m))) +vint8mf8_t vasub(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m1))) +vint16m1_t vasub(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m1_m))) +vint16m1_t vasub(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m2))) +vint16m2_t vasub(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m2_m))) +vint16m2_t vasub(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m4))) +vint16m4_t vasub(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m4_m))) +vint16m4_t vasub(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m8))) +vint16m8_t vasub(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16m8_m))) +vint16m8_t vasub(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16mf2))) +vint16mf2_t vasub(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16mf2_m))) +vint16mf2_t vasub(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16mf4))) +vint16mf4_t vasub(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i16mf4_m))) +vint16mf4_t vasub(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m1))) +vint32m1_t vasub(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m1_m))) +vint32m1_t vasub(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m2))) +vint32m2_t vasub(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m2_m))) +vint32m2_t vasub(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m4))) +vint32m4_t vasub(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m4_m))) +vint32m4_t vasub(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m8))) +vint32m8_t vasub(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32m8_m))) +vint32m8_t vasub(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32mf2))) +vint32mf2_t vasub(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i32mf2_m))) +vint32mf2_t vasub(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m1))) +vint64m1_t vasub(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m1_m))) +vint64m1_t vasub(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m2))) +vint64m2_t vasub(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m2_m))) +vint64m2_t vasub(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m4))) +vint64m4_t vasub(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m4_m))) +vint64m4_t vasub(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m8))) +vint64m8_t vasub(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vasub_vx_i64m8_m))) +vint64m8_t vasub(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m1))) +vint8m1_t vsmul(vint8m1_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m1_m))) +vint8m1_t vsmul(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m2))) +vint8m2_t vsmul(vint8m2_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m2_m))) +vint8m2_t vsmul(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m4))) +vint8m4_t vsmul(vint8m4_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m4_m))) +vint8m4_t vsmul(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m8))) +vint8m8_t vsmul(vint8m8_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8m8_m))) +vint8m8_t vsmul(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8mf2))) +vint8mf2_t vsmul(vint8mf2_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8mf2_m))) +vint8mf2_t vsmul(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8mf4))) +vint8mf4_t vsmul(vint8mf4_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8mf4_m))) +vint8mf4_t vsmul(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8mf8))) +vint8mf8_t vsmul(vint8mf8_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i8mf8_m))) +vint8mf8_t vsmul(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m1))) +vint16m1_t vsmul(vint16m1_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m1_m))) +vint16m1_t vsmul(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m2))) +vint16m2_t vsmul(vint16m2_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m2_m))) +vint16m2_t vsmul(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m4))) +vint16m4_t vsmul(vint16m4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m4_m))) +vint16m4_t vsmul(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m8))) +vint16m8_t vsmul(vint16m8_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16m8_m))) +vint16m8_t vsmul(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16mf2))) +vint16mf2_t vsmul(vint16mf2_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16mf2_m))) +vint16mf2_t vsmul(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16mf4))) +vint16mf4_t vsmul(vint16mf4_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i16mf4_m))) +vint16mf4_t vsmul(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m1))) +vint32m1_t vsmul(vint32m1_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m1_m))) +vint32m1_t vsmul(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m2))) +vint32m2_t vsmul(vint32m2_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m2_m))) +vint32m2_t vsmul(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m4))) +vint32m4_t vsmul(vint32m4_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m4_m))) +vint32m4_t vsmul(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m8))) +vint32m8_t vsmul(vint32m8_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32m8_m))) +vint32m8_t vsmul(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32mf2))) +vint32mf2_t vsmul(vint32mf2_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i32mf2_m))) +vint32mf2_t vsmul(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m1))) +vint64m1_t vsmul(vint64m1_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m1_m))) +vint64m1_t vsmul(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m2))) +vint64m2_t vsmul(vint64m2_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m2_m))) +vint64m2_t vsmul(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m4))) +vint64m4_t vsmul(vint64m4_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m4_m))) +vint64m4_t vsmul(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m8))) +vint64m8_t vsmul(vint64m8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vv_i64m8_m))) +vint64m8_t vsmul(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m1))) +vint8m1_t vsmul(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m1_m))) +vint8m1_t vsmul(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m2))) +vint8m2_t vsmul(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m2_m))) +vint8m2_t vsmul(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m4))) +vint8m4_t vsmul(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m4_m))) +vint8m4_t vsmul(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m8))) +vint8m8_t vsmul(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8m8_m))) +vint8m8_t vsmul(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8mf2))) +vint8mf2_t vsmul(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8mf2_m))) +vint8mf2_t vsmul(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8mf4))) +vint8mf4_t vsmul(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8mf4_m))) +vint8mf4_t vsmul(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8mf8))) +vint8mf8_t vsmul(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i8mf8_m))) +vint8mf8_t vsmul(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m1))) +vint16m1_t vsmul(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m1_m))) +vint16m1_t vsmul(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m2))) +vint16m2_t vsmul(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m2_m))) +vint16m2_t vsmul(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m4))) +vint16m4_t vsmul(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m4_m))) +vint16m4_t vsmul(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m8))) +vint16m8_t vsmul(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16m8_m))) +vint16m8_t vsmul(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16mf2))) +vint16mf2_t vsmul(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16mf2_m))) +vint16mf2_t vsmul(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16mf4))) +vint16mf4_t vsmul(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i16mf4_m))) +vint16mf4_t vsmul(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m1))) +vint32m1_t vsmul(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m1_m))) +vint32m1_t vsmul(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m2))) +vint32m2_t vsmul(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m2_m))) +vint32m2_t vsmul(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m4))) +vint32m4_t vsmul(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m4_m))) +vint32m4_t vsmul(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m8))) +vint32m8_t vsmul(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32m8_m))) +vint32m8_t vsmul(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32mf2))) +vint32mf2_t vsmul(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i32mf2_m))) +vint32mf2_t vsmul(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m1))) +vint64m1_t vsmul(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m1_m))) +vint64m1_t vsmul(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m2))) +vint64m2_t vsmul(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m2_m))) +vint64m2_t vsmul(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m4))) +vint64m4_t vsmul(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m4_m))) +vint64m4_t vsmul(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m8))) +vint64m8_t vsmul(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsmul_vx_i64m8_m))) +vint64m8_t vsmul(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m1))) +vuint8m1_t vssrl(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m1_m))) +vuint8m1_t vssrl(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m2))) +vuint8m2_t vssrl(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m2_m))) +vuint8m2_t vssrl(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m4))) +vuint8m4_t vssrl(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m4_m))) +vuint8m4_t vssrl(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m8))) +vuint8m8_t vssrl(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8m8_m))) +vuint8m8_t vssrl(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8mf2))) +vuint8mf2_t vssrl(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8mf2_m))) +vuint8mf2_t vssrl(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8mf4))) +vuint8mf4_t vssrl(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8mf4_m))) +vuint8mf4_t vssrl(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8mf8))) +vuint8mf8_t vssrl(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u8mf8_m))) +vuint8mf8_t vssrl(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m1))) +vuint16m1_t vssrl(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m1_m))) +vuint16m1_t vssrl(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m2))) +vuint16m2_t vssrl(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m2_m))) +vuint16m2_t vssrl(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m4))) +vuint16m4_t vssrl(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m4_m))) +vuint16m4_t vssrl(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m8))) +vuint16m8_t vssrl(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16m8_m))) +vuint16m8_t vssrl(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16mf2))) +vuint16mf2_t vssrl(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16mf2_m))) +vuint16mf2_t vssrl(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16mf4))) +vuint16mf4_t vssrl(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u16mf4_m))) +vuint16mf4_t vssrl(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m1))) +vuint32m1_t vssrl(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m1_m))) +vuint32m1_t vssrl(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m2))) +vuint32m2_t vssrl(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m2_m))) +vuint32m2_t vssrl(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m4))) +vuint32m4_t vssrl(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m4_m))) +vuint32m4_t vssrl(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m8))) +vuint32m8_t vssrl(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32m8_m))) +vuint32m8_t vssrl(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32mf2))) +vuint32mf2_t vssrl(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u32mf2_m))) +vuint32mf2_t vssrl(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m1))) +vuint64m1_t vssrl(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m1_m))) +vuint64m1_t vssrl(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m2))) +vuint64m2_t vssrl(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m2_m))) +vuint64m2_t vssrl(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m4))) +vuint64m4_t vssrl(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m4_m))) +vuint64m4_t vssrl(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m8))) +vuint64m8_t vssrl(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vv_u64m8_m))) +vuint64m8_t vssrl(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m1))) +vuint8m1_t vssrl(vuint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m1_m))) +vuint8m1_t vssrl(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m2))) +vuint8m2_t vssrl(vuint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m2_m))) +vuint8m2_t vssrl(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m4))) +vuint8m4_t vssrl(vuint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m4_m))) +vuint8m4_t vssrl(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m8))) +vuint8m8_t vssrl(vuint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8m8_m))) +vuint8m8_t vssrl(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8mf2))) +vuint8mf2_t vssrl(vuint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8mf2_m))) +vuint8mf2_t vssrl(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8mf4))) +vuint8mf4_t vssrl(vuint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8mf4_m))) +vuint8mf4_t vssrl(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8mf8))) +vuint8mf8_t vssrl(vuint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u8mf8_m))) +vuint8mf8_t vssrl(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m1))) +vuint16m1_t vssrl(vuint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m1_m))) +vuint16m1_t vssrl(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m2))) +vuint16m2_t vssrl(vuint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m2_m))) +vuint16m2_t vssrl(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m4))) +vuint16m4_t vssrl(vuint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m4_m))) +vuint16m4_t vssrl(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m8))) +vuint16m8_t vssrl(vuint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16m8_m))) +vuint16m8_t vssrl(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16mf2))) +vuint16mf2_t vssrl(vuint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16mf2_m))) +vuint16mf2_t vssrl(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16mf4))) +vuint16mf4_t vssrl(vuint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u16mf4_m))) +vuint16mf4_t vssrl(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m1))) +vuint32m1_t vssrl(vuint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m1_m))) +vuint32m1_t vssrl(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m2))) +vuint32m2_t vssrl(vuint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m2_m))) +vuint32m2_t vssrl(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m4))) +vuint32m4_t vssrl(vuint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m4_m))) +vuint32m4_t vssrl(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m8))) +vuint32m8_t vssrl(vuint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32m8_m))) +vuint32m8_t vssrl(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32mf2))) +vuint32mf2_t vssrl(vuint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u32mf2_m))) +vuint32mf2_t vssrl(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m1))) +vuint64m1_t vssrl(vuint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m1_m))) +vuint64m1_t vssrl(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m2))) +vuint64m2_t vssrl(vuint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m2_m))) +vuint64m2_t vssrl(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m4))) +vuint64m4_t vssrl(vuint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m4_m))) +vuint64m4_t vssrl(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m8))) +vuint64m8_t vssrl(vuint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssrl_vx_u64m8_m))) +vuint64m8_t vssrl(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m1))) +vint8m1_t vssra(vint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m1_m))) +vint8m1_t vssra(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m2))) +vint8m2_t vssra(vint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m2_m))) +vint8m2_t vssra(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m4))) +vint8m4_t vssra(vint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m4_m))) +vint8m4_t vssra(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m8))) +vint8m8_t vssra(vint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8m8_m))) +vint8m8_t vssra(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8mf2))) +vint8mf2_t vssra(vint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8mf2_m))) +vint8mf2_t vssra(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8mf4))) +vint8mf4_t vssra(vint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8mf4_m))) +vint8mf4_t vssra(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8mf8))) +vint8mf8_t vssra(vint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i8mf8_m))) +vint8mf8_t vssra(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m1))) +vint16m1_t vssra(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m1_m))) +vint16m1_t vssra(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m2))) +vint16m2_t vssra(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m2_m))) +vint16m2_t vssra(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m4))) +vint16m4_t vssra(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m4_m))) +vint16m4_t vssra(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m8))) +vint16m8_t vssra(vint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16m8_m))) +vint16m8_t vssra(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16mf2))) +vint16mf2_t vssra(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16mf2_m))) +vint16mf2_t vssra(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16mf4))) +vint16mf4_t vssra(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i16mf4_m))) +vint16mf4_t vssra(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m1))) +vint32m1_t vssra(vint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m1_m))) +vint32m1_t vssra(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m2))) +vint32m2_t vssra(vint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m2_m))) +vint32m2_t vssra(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m4))) +vint32m4_t vssra(vint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m4_m))) +vint32m4_t vssra(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m8))) +vint32m8_t vssra(vint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32m8_m))) +vint32m8_t vssra(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32mf2))) +vint32mf2_t vssra(vint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i32mf2_m))) +vint32mf2_t vssra(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m1))) +vint64m1_t vssra(vint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m1_m))) +vint64m1_t vssra(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m2))) +vint64m2_t vssra(vint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m2_m))) +vint64m2_t vssra(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m4))) +vint64m4_t vssra(vint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m4_m))) +vint64m4_t vssra(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m8))) +vint64m8_t vssra(vint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vv_i64m8_m))) +vint64m8_t vssra(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m1))) +vint8m1_t vssra(vint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m1_m))) +vint8m1_t vssra(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m2))) +vint8m2_t vssra(vint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m2_m))) +vint8m2_t vssra(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m4))) +vint8m4_t vssra(vint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m4_m))) +vint8m4_t vssra(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m8))) +vint8m8_t vssra(vint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8m8_m))) +vint8m8_t vssra(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8mf2))) +vint8mf2_t vssra(vint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8mf2_m))) +vint8mf2_t vssra(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8mf4))) +vint8mf4_t vssra(vint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8mf4_m))) +vint8mf4_t vssra(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8mf8))) +vint8mf8_t vssra(vint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i8mf8_m))) +vint8mf8_t vssra(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m1))) +vint16m1_t vssra(vint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m1_m))) +vint16m1_t vssra(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m2))) +vint16m2_t vssra(vint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m2_m))) +vint16m2_t vssra(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m4))) +vint16m4_t vssra(vint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m4_m))) +vint16m4_t vssra(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m8))) +vint16m8_t vssra(vint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16m8_m))) +vint16m8_t vssra(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16mf2))) +vint16mf2_t vssra(vint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16mf2_m))) +vint16mf2_t vssra(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16mf4))) +vint16mf4_t vssra(vint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i16mf4_m))) +vint16mf4_t vssra(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m1))) +vint32m1_t vssra(vint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m1_m))) +vint32m1_t vssra(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m2))) +vint32m2_t vssra(vint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m2_m))) +vint32m2_t vssra(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m4))) +vint32m4_t vssra(vint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m4_m))) +vint32m4_t vssra(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m8))) +vint32m8_t vssra(vint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32m8_m))) +vint32m8_t vssra(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32mf2))) +vint32mf2_t vssra(vint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i32mf2_m))) +vint32mf2_t vssra(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m1))) +vint64m1_t vssra(vint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m1_m))) +vint64m1_t vssra(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m2))) +vint64m2_t vssra(vint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m2_m))) +vint64m2_t vssra(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m4))) +vint64m4_t vssra(vint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m4_m))) +vint64m4_t vssra(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m8))) +vint64m8_t vssra(vint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssra_vx_i64m8_m))) +vint64m8_t vssra(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8m1))) +vuint8m1_t vnclipu(vuint16m2_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8m1_m))) +vuint8m1_t vnclipu(vbool8_t op0, vuint8m1_t op1, vuint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8m2))) +vuint8m2_t vnclipu(vuint16m4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8m2_m))) +vuint8m2_t vnclipu(vbool4_t op0, vuint8m2_t op1, vuint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8m4))) +vuint8m4_t vnclipu(vuint16m8_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8m4_m))) +vuint8m4_t vnclipu(vbool2_t op0, vuint8m4_t op1, vuint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8mf2))) +vuint8mf2_t vnclipu(vuint16m1_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8mf2_m))) +vuint8mf2_t vnclipu(vbool16_t op0, vuint8mf2_t op1, vuint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8mf4))) +vuint8mf4_t vnclipu(vuint16mf2_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8mf4_m))) +vuint8mf4_t vnclipu(vbool32_t op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8mf8))) +vuint8mf8_t vnclipu(vuint16mf4_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u8mf8_m))) +vuint8mf8_t vnclipu(vbool64_t op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16m1))) +vuint16m1_t vnclipu(vuint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16m1_m))) +vuint16m1_t vnclipu(vbool16_t op0, vuint16m1_t op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16m2))) +vuint16m2_t vnclipu(vuint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16m2_m))) +vuint16m2_t vnclipu(vbool8_t op0, vuint16m2_t op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16m4))) +vuint16m4_t vnclipu(vuint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16m4_m))) +vuint16m4_t vnclipu(vbool4_t op0, vuint16m4_t op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16mf2))) +vuint16mf2_t vnclipu(vuint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16mf2_m))) +vuint16mf2_t vnclipu(vbool32_t op0, vuint16mf2_t op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16mf4))) +vuint16mf4_t vnclipu(vuint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u16mf4_m))) +vuint16mf4_t vnclipu(vbool64_t op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32m1))) +vuint32m1_t vnclipu(vuint64m2_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32m1_m))) +vuint32m1_t vnclipu(vbool32_t op0, vuint32m1_t op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32m2))) +vuint32m2_t vnclipu(vuint64m4_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32m2_m))) +vuint32m2_t vnclipu(vbool16_t op0, vuint32m2_t op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32m4))) +vuint32m4_t vnclipu(vuint64m8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32m4_m))) +vuint32m4_t vnclipu(vbool8_t op0, vuint32m4_t op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32mf2))) +vuint32mf2_t vnclipu(vuint64m1_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wv_u32mf2_m))) +vuint32mf2_t vnclipu(vbool64_t op0, vuint32mf2_t op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8m1))) +vuint8m1_t vnclipu(vuint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8m1_m))) +vuint8m1_t vnclipu(vbool8_t op0, vuint8m1_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8m2))) +vuint8m2_t vnclipu(vuint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8m2_m))) +vuint8m2_t vnclipu(vbool4_t op0, vuint8m2_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8m4))) +vuint8m4_t vnclipu(vuint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8m4_m))) +vuint8m4_t vnclipu(vbool2_t op0, vuint8m4_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8mf2))) +vuint8mf2_t vnclipu(vuint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8mf2_m))) +vuint8mf2_t vnclipu(vbool16_t op0, vuint8mf2_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8mf4))) +vuint8mf4_t vnclipu(vuint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8mf4_m))) +vuint8mf4_t vnclipu(vbool32_t op0, vuint8mf4_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8mf8))) +vuint8mf8_t vnclipu(vuint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u8mf8_m))) +vuint8mf8_t vnclipu(vbool64_t op0, vuint8mf8_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16m1))) +vuint16m1_t vnclipu(vuint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16m1_m))) +vuint16m1_t vnclipu(vbool16_t op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16m2))) +vuint16m2_t vnclipu(vuint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16m2_m))) +vuint16m2_t vnclipu(vbool8_t op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16m4))) +vuint16m4_t vnclipu(vuint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16m4_m))) +vuint16m4_t vnclipu(vbool4_t op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16mf2))) +vuint16mf2_t vnclipu(vuint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16mf2_m))) +vuint16mf2_t vnclipu(vbool32_t op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16mf4))) +vuint16mf4_t vnclipu(vuint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u16mf4_m))) +vuint16mf4_t vnclipu(vbool64_t op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32m1))) +vuint32m1_t vnclipu(vuint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32m1_m))) +vuint32m1_t vnclipu(vbool32_t op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32m2))) +vuint32m2_t vnclipu(vuint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32m2_m))) +vuint32m2_t vnclipu(vbool16_t op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32m4))) +vuint32m4_t vnclipu(vuint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32m4_m))) +vuint32m4_t vnclipu(vbool8_t op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32mf2))) +vuint32mf2_t vnclipu(vuint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclipu_wx_u32mf2_m))) +vuint32mf2_t vnclipu(vbool64_t op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32m1))) +void vsuxei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32m1_m))) +void vsuxei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32m2))) +void vsuxei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32m2_m))) +void vsuxei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32m4))) +void vsuxei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32m4_m))) +void vsuxei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32mf2))) +void vsuxei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u32mf2_m))) +void vsuxei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8m1))) +vint8m1_t vnclip(vint16m2_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8m1_m))) +vint8m1_t vnclip(vbool8_t op0, vint8m1_t op1, vint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8m2))) +vint8m2_t vnclip(vint16m4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8m2_m))) +vint8m2_t vnclip(vbool4_t op0, vint8m2_t op1, vint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8m4))) +vint8m4_t vnclip(vint16m8_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8m4_m))) +vint8m4_t vnclip(vbool2_t op0, vint8m4_t op1, vint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8mf2))) +vint8mf2_t vnclip(vint16m1_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8mf2_m))) +vint8mf2_t vnclip(vbool16_t op0, vint8mf2_t op1, vint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8mf4))) +vint8mf4_t vnclip(vint16mf2_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8mf4_m))) +vint8mf4_t vnclip(vbool32_t op0, vint8mf4_t op1, vint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8mf8))) +vint8mf8_t vnclip(vint16mf4_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i8mf8_m))) +vint8mf8_t vnclip(vbool64_t op0, vint8mf8_t op1, vint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16m1))) +vint16m1_t vnclip(vint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16m1_m))) +vint16m1_t vnclip(vbool16_t op0, vint16m1_t op1, vint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16m2))) +vint16m2_t vnclip(vint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16m2_m))) +vint16m2_t vnclip(vbool8_t op0, vint16m2_t op1, vint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16m4))) +vint16m4_t vnclip(vint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16m4_m))) +vint16m4_t vnclip(vbool4_t op0, vint16m4_t op1, vint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16mf2))) +vint16mf2_t vnclip(vint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16mf2_m))) +vint16mf2_t vnclip(vbool32_t op0, vint16mf2_t op1, vint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16mf4))) +vint16mf4_t vnclip(vint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i16mf4_m))) +vint16mf4_t vnclip(vbool64_t op0, vint16mf4_t op1, vint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32m1))) +vint32m1_t vnclip(vint64m2_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32m1_m))) +vint32m1_t vnclip(vbool32_t op0, vint32m1_t op1, vint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32m2))) +vint32m2_t vnclip(vint64m4_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32m2_m))) +vint32m2_t vnclip(vbool16_t op0, vint32m2_t op1, vint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32m4))) +vint32m4_t vnclip(vint64m8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32m4_m))) +vint32m4_t vnclip(vbool8_t op0, vint32m4_t op1, vint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32mf2))) +vint32mf2_t vnclip(vint64m1_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wv_i32mf2_m))) +vint32mf2_t vnclip(vbool64_t op0, vint32mf2_t op1, vint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8m1))) +vint8m1_t vnclip(vint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8m1_m))) +vint8m1_t vnclip(vbool8_t op0, vint8m1_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8m2))) +vint8m2_t vnclip(vint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8m2_m))) +vint8m2_t vnclip(vbool4_t op0, vint8m2_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8m4))) +vint8m4_t vnclip(vint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8m4_m))) +vint8m4_t vnclip(vbool2_t op0, vint8m4_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8mf2))) +vint8mf2_t vnclip(vint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8mf2_m))) +vint8mf2_t vnclip(vbool16_t op0, vint8mf2_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8mf4))) +vint8mf4_t vnclip(vint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8mf4_m))) +vint8mf4_t vnclip(vbool32_t op0, vint8mf4_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8mf8))) +vint8mf8_t vnclip(vint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i8mf8_m))) +vint8mf8_t vnclip(vbool64_t op0, vint8mf8_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16m1))) +vint16m1_t vnclip(vint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16m1_m))) +vint16m1_t vnclip(vbool16_t op0, vint16m1_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16m2))) +vint16m2_t vnclip(vint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16m2_m))) +vint16m2_t vnclip(vbool8_t op0, vint16m2_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16m4))) +vint16m4_t vnclip(vint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16m4_m))) +vint16m4_t vnclip(vbool4_t op0, vint16m4_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16mf2))) +vint16mf2_t vnclip(vint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16mf2_m))) +vint16mf2_t vnclip(vbool32_t op0, vint16mf2_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16mf4))) +vint16mf4_t vnclip(vint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i16mf4_m))) +vint16mf4_t vnclip(vbool64_t op0, vint16mf4_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32m1))) +vint32m1_t vnclip(vint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32m1_m))) +vint32m1_t vnclip(vbool32_t op0, vint32m1_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32m2))) +vint32m2_t vnclip(vint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32m2_m))) +vint32m2_t vnclip(vbool16_t op0, vint32m2_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32m4))) +vint32m4_t vnclip(vint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32m4_m))) +vint32m4_t vnclip(vbool8_t op0, vint32m4_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32mf2))) +vint32mf2_t vnclip(vint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnclip_wx_i32mf2_m))) +vint32mf2_t vnclip(vbool64_t op0, vint32mf2_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m1))) +void vsuxei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m1_m))) +void vsuxei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m2))) +void vsuxei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m2_m))) +void vsuxei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m4))) +void vsuxei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m4_m))) +void vsuxei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m8))) +void vsuxei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_i64m8_m))) +void vsuxei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m1))) +void vsuxei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m1_m))) +void vsuxei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m2))) +void vsuxei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m2_m))) +void vsuxei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m4))) +void vsuxei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m4_m))) +void vsuxei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m8))) +void vsuxei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u64m8_m))) +void vsuxei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m1))) +void vsuxei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m1_m))) +void vsuxei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m2))) +void vsuxei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m2_m))) +void vsuxei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m4))) +void vsuxei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m4_m))) +void vsuxei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m8))) +void vsuxei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_i64m8_m))) +void vsuxei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m1))) +void vsuxei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m1_m))) +void vsuxei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m2))) +void vsuxei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m2_m))) +void vsuxei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m4))) +void vsuxei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m4_m))) +void vsuxei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m8))) +void vsuxei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_u64m8_m))) +void vsuxei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m1))) +void vsuxei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m1_m))) +void vsuxei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m2))) +void vsuxei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m2_m))) +void vsuxei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m4))) +void vsuxei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m4_m))) +void vsuxei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m8))) +void vsuxei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_i64m8_m))) +void vsuxei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m1))) +void vsuxei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m1_m))) +void vsuxei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m2))) +void vsuxei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m2_m))) +void vsuxei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m4))) +void vsuxei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m4_m))) +void vsuxei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m8))) +void vsuxei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_u64m8_m))) +void vsuxei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m1))) +void vsuxei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m1_m))) +void vsuxei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m2))) +void vsuxei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m2_m))) +void vsuxei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m4))) +void vsuxei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m4_m))) +void vsuxei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m8))) +void vsuxei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_i64m8_m))) +void vsuxei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m1_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m1_i8m1_m))) +vint8m1_t vredsum(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m2_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m2_i8m1_m))) +vint8m1_t vredsum(vbool4_t op0, vint8m1_t op1, vint8m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m4_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m4_i8m1_m))) +vint8m1_t vredsum(vbool2_t op0, vint8m1_t op1, vint8m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m8_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8m8_i8m1_m))) +vint8m1_t vredsum(vbool1_t op0, vint8m1_t op1, vint8m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8mf2_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8mf2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8mf2_i8m1_m))) +vint8m1_t vredsum(vbool16_t op0, vint8m1_t op1, vint8mf2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8mf4_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8mf4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8mf4_i8m1_m))) +vint8m1_t vredsum(vbool32_t op0, vint8m1_t op1, vint8mf4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8mf8_i8m1))) +vint8m1_t vredsum(vint8m1_t op0, vint8mf8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i8mf8_i8m1_m))) +vint8m1_t vredsum(vbool64_t op0, vint8m1_t op1, vint8mf8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m1_i16m1))) +vint16m1_t vredsum(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m1_i16m1_m))) +vint16m1_t vredsum(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m2_i16m1))) +vint16m1_t vredsum(vint16m1_t op0, vint16m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m2_i16m1_m))) +vint16m1_t vredsum(vbool8_t op0, vint16m1_t op1, vint16m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m4_i16m1))) +vint16m1_t vredsum(vint16m1_t op0, vint16m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m4_i16m1_m))) +vint16m1_t vredsum(vbool4_t op0, vint16m1_t op1, vint16m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m8_i16m1))) +vint16m1_t vredsum(vint16m1_t op0, vint16m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16m8_i16m1_m))) +vint16m1_t vredsum(vbool2_t op0, vint16m1_t op1, vint16m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16mf2_i16m1))) +vint16m1_t vredsum(vint16m1_t op0, vint16mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16mf2_i16m1_m))) +vint16m1_t vredsum(vbool32_t op0, vint16m1_t op1, vint16mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16mf4_i16m1))) +vint16m1_t vredsum(vint16m1_t op0, vint16mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i16mf4_i16m1_m))) +vint16m1_t vredsum(vbool64_t op0, vint16m1_t op1, vint16mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m1_i32m1))) +vint32m1_t vredsum(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m1_i32m1_m))) +vint32m1_t vredsum(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m2_i32m1))) +vint32m1_t vredsum(vint32m1_t op0, vint32m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m2_i32m1_m))) +vint32m1_t vredsum(vbool16_t op0, vint32m1_t op1, vint32m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m4_i32m1))) +vint32m1_t vredsum(vint32m1_t op0, vint32m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m4_i32m1_m))) +vint32m1_t vredsum(vbool8_t op0, vint32m1_t op1, vint32m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m8_i32m1))) +vint32m1_t vredsum(vint32m1_t op0, vint32m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32m8_i32m1_m))) +vint32m1_t vredsum(vbool4_t op0, vint32m1_t op1, vint32m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32mf2_i32m1))) +vint32m1_t vredsum(vint32m1_t op0, vint32mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i32mf2_i32m1_m))) +vint32m1_t vredsum(vbool64_t op0, vint32m1_t op1, vint32mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m1_i64m1))) +vint64m1_t vredsum(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m1_i64m1_m))) +vint64m1_t vredsum(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m2_i64m1))) +vint64m1_t vredsum(vint64m1_t op0, vint64m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m2_i64m1_m))) +vint64m1_t vredsum(vbool32_t op0, vint64m1_t op1, vint64m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m4_i64m1))) +vint64m1_t vredsum(vint64m1_t op0, vint64m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m4_i64m1_m))) +vint64m1_t vredsum(vbool16_t op0, vint64m1_t op1, vint64m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m8_i64m1))) +vint64m1_t vredsum(vint64m1_t op0, vint64m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_i64m8_i64m1_m))) +vint64m1_t vredsum(vbool8_t op0, vint64m1_t op1, vint64m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m1_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m1_u8m1_m))) +vuint8m1_t vredsum(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m2_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m2_u8m1_m))) +vuint8m1_t vredsum(vbool4_t op0, vuint8m1_t op1, vuint8m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m4_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m4_u8m1_m))) +vuint8m1_t vredsum(vbool2_t op0, vuint8m1_t op1, vuint8m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m8_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8m8_u8m1_m))) +vuint8m1_t vredsum(vbool1_t op0, vuint8m1_t op1, vuint8m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8mf2_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8mf2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8mf2_u8m1_m))) +vuint8m1_t vredsum(vbool16_t op0, vuint8m1_t op1, vuint8mf2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8mf4_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8mf4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8mf4_u8m1_m))) +vuint8m1_t vredsum(vbool32_t op0, vuint8m1_t op1, vuint8mf4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8mf8_u8m1))) +vuint8m1_t vredsum(vuint8m1_t op0, vuint8mf8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u8mf8_u8m1_m))) +vuint8m1_t vredsum(vbool64_t op0, vuint8m1_t op1, vuint8mf8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m1_u16m1))) +vuint16m1_t vredsum(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m1_u16m1_m))) +vuint16m1_t vredsum(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m2_u16m1))) +vuint16m1_t vredsum(vuint16m1_t op0, vuint16m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m2_u16m1_m))) +vuint16m1_t vredsum(vbool8_t op0, vuint16m1_t op1, vuint16m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m4_u16m1))) +vuint16m1_t vredsum(vuint16m1_t op0, vuint16m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m4_u16m1_m))) +vuint16m1_t vredsum(vbool4_t op0, vuint16m1_t op1, vuint16m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m8_u16m1))) +vuint16m1_t vredsum(vuint16m1_t op0, vuint16m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16m8_u16m1_m))) +vuint16m1_t vredsum(vbool2_t op0, vuint16m1_t op1, vuint16m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16mf2_u16m1))) +vuint16m1_t vredsum(vuint16m1_t op0, vuint16mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16mf2_u16m1_m))) +vuint16m1_t vredsum(vbool32_t op0, vuint16m1_t op1, vuint16mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16mf4_u16m1))) +vuint16m1_t vredsum(vuint16m1_t op0, vuint16mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u16mf4_u16m1_m))) +vuint16m1_t vredsum(vbool64_t op0, vuint16m1_t op1, vuint16mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m1_u32m1))) +vuint32m1_t vredsum(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m1_u32m1_m))) +vuint32m1_t vredsum(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m2_u32m1))) +vuint32m1_t vredsum(vuint32m1_t op0, vuint32m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m2_u32m1_m))) +vuint32m1_t vredsum(vbool16_t op0, vuint32m1_t op1, vuint32m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m4_u32m1))) +vuint32m1_t vredsum(vuint32m1_t op0, vuint32m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m4_u32m1_m))) +vuint32m1_t vredsum(vbool8_t op0, vuint32m1_t op1, vuint32m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m8_u32m1))) +vuint32m1_t vredsum(vuint32m1_t op0, vuint32m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32m8_u32m1_m))) +vuint32m1_t vredsum(vbool4_t op0, vuint32m1_t op1, vuint32m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32mf2_u32m1))) +vuint32m1_t vredsum(vuint32m1_t op0, vuint32mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u32mf2_u32m1_m))) +vuint32m1_t vredsum(vbool64_t op0, vuint32m1_t op1, vuint32mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m1_u64m1))) +vuint64m1_t vredsum(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m1_u64m1_m))) +vuint64m1_t vredsum(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m2_u64m1))) +vuint64m1_t vredsum(vuint64m1_t op0, vuint64m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m2_u64m1_m))) +vuint64m1_t vredsum(vbool32_t op0, vuint64m1_t op1, vuint64m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m4_u64m1))) +vuint64m1_t vredsum(vuint64m1_t op0, vuint64m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m4_u64m1_m))) +vuint64m1_t vredsum(vbool16_t op0, vuint64m1_t op1, vuint64m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m8_u64m1))) +vuint64m1_t vredsum(vuint64m1_t op0, vuint64m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredsum_vs_u64m8_u64m1_m))) +vuint64m1_t vredsum(vbool8_t op0, vuint64m1_t op1, vuint64m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m1_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m1_u8m1_m))) +vuint8m1_t vredmaxu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m2_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m2_u8m1_m))) +vuint8m1_t vredmaxu(vbool4_t op0, vuint8m1_t op1, vuint8m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m4_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m4_u8m1_m))) +vuint8m1_t vredmaxu(vbool2_t op0, vuint8m1_t op1, vuint8m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m8_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8m8_u8m1_m))) +vuint8m1_t vredmaxu(vbool1_t op0, vuint8m1_t op1, vuint8m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8mf2_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8mf2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8mf2_u8m1_m))) +vuint8m1_t vredmaxu(vbool16_t op0, vuint8m1_t op1, vuint8mf2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8mf4_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8mf4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8mf4_u8m1_m))) +vuint8m1_t vredmaxu(vbool32_t op0, vuint8m1_t op1, vuint8mf4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8mf8_u8m1))) +vuint8m1_t vredmaxu(vuint8m1_t op0, vuint8mf8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u8mf8_u8m1_m))) +vuint8m1_t vredmaxu(vbool64_t op0, vuint8m1_t op1, vuint8mf8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m1_u16m1))) +vuint16m1_t vredmaxu(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m1_u16m1_m))) +vuint16m1_t vredmaxu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m2_u16m1))) +vuint16m1_t vredmaxu(vuint16m1_t op0, vuint16m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m2_u16m1_m))) +vuint16m1_t vredmaxu(vbool8_t op0, vuint16m1_t op1, vuint16m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m4_u16m1))) +vuint16m1_t vredmaxu(vuint16m1_t op0, vuint16m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m4_u16m1_m))) +vuint16m1_t vredmaxu(vbool4_t op0, vuint16m1_t op1, vuint16m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m8_u16m1))) +vuint16m1_t vredmaxu(vuint16m1_t op0, vuint16m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16m8_u16m1_m))) +vuint16m1_t vredmaxu(vbool2_t op0, vuint16m1_t op1, vuint16m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16mf2_u16m1))) +vuint16m1_t vredmaxu(vuint16m1_t op0, vuint16mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16mf2_u16m1_m))) +vuint16m1_t vredmaxu(vbool32_t op0, vuint16m1_t op1, vuint16mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16mf4_u16m1))) +vuint16m1_t vredmaxu(vuint16m1_t op0, vuint16mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u16mf4_u16m1_m))) +vuint16m1_t vredmaxu(vbool64_t op0, vuint16m1_t op1, vuint16mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m1_u32m1))) +vuint32m1_t vredmaxu(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m1_u32m1_m))) +vuint32m1_t vredmaxu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m2_u32m1))) +vuint32m1_t vredmaxu(vuint32m1_t op0, vuint32m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m2_u32m1_m))) +vuint32m1_t vredmaxu(vbool16_t op0, vuint32m1_t op1, vuint32m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m4_u32m1))) +vuint32m1_t vredmaxu(vuint32m1_t op0, vuint32m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m4_u32m1_m))) +vuint32m1_t vredmaxu(vbool8_t op0, vuint32m1_t op1, vuint32m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m8_u32m1))) +vuint32m1_t vredmaxu(vuint32m1_t op0, vuint32m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32m8_u32m1_m))) +vuint32m1_t vredmaxu(vbool4_t op0, vuint32m1_t op1, vuint32m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32mf2_u32m1))) +vuint32m1_t vredmaxu(vuint32m1_t op0, vuint32mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u32mf2_u32m1_m))) +vuint32m1_t vredmaxu(vbool64_t op0, vuint32m1_t op1, vuint32mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m1_u64m1))) +vuint64m1_t vredmaxu(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m1_u64m1_m))) +vuint64m1_t vredmaxu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m2_u64m1))) +vuint64m1_t vredmaxu(vuint64m1_t op0, vuint64m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m2_u64m1_m))) +vuint64m1_t vredmaxu(vbool32_t op0, vuint64m1_t op1, vuint64m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m4_u64m1))) +vuint64m1_t vredmaxu(vuint64m1_t op0, vuint64m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m4_u64m1_m))) +vuint64m1_t vredmaxu(vbool16_t op0, vuint64m1_t op1, vuint64m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m8_u64m1))) +vuint64m1_t vredmaxu(vuint64m1_t op0, vuint64m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmaxu_vs_u64m8_u64m1_m))) +vuint64m1_t vredmaxu(vbool8_t op0, vuint64m1_t op1, vuint64m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m1_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m1_i8m1_m))) +vint8m1_t vredmax(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m2_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m2_i8m1_m))) +vint8m1_t vredmax(vbool4_t op0, vint8m1_t op1, vint8m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m4_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m4_i8m1_m))) +vint8m1_t vredmax(vbool2_t op0, vint8m1_t op1, vint8m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m8_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8m8_i8m1_m))) +vint8m1_t vredmax(vbool1_t op0, vint8m1_t op1, vint8m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8mf2_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8mf2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8mf2_i8m1_m))) +vint8m1_t vredmax(vbool16_t op0, vint8m1_t op1, vint8mf2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8mf4_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8mf4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8mf4_i8m1_m))) +vint8m1_t vredmax(vbool32_t op0, vint8m1_t op1, vint8mf4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8mf8_i8m1))) +vint8m1_t vredmax(vint8m1_t op0, vint8mf8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i8mf8_i8m1_m))) +vint8m1_t vredmax(vbool64_t op0, vint8m1_t op1, vint8mf8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m1_i16m1))) +vint16m1_t vredmax(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m1_i16m1_m))) +vint16m1_t vredmax(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m2_i16m1))) +vint16m1_t vredmax(vint16m1_t op0, vint16m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m2_i16m1_m))) +vint16m1_t vredmax(vbool8_t op0, vint16m1_t op1, vint16m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m4_i16m1))) +vint16m1_t vredmax(vint16m1_t op0, vint16m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m4_i16m1_m))) +vint16m1_t vredmax(vbool4_t op0, vint16m1_t op1, vint16m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m8_i16m1))) +vint16m1_t vredmax(vint16m1_t op0, vint16m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16m8_i16m1_m))) +vint16m1_t vredmax(vbool2_t op0, vint16m1_t op1, vint16m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16mf2_i16m1))) +vint16m1_t vredmax(vint16m1_t op0, vint16mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16mf2_i16m1_m))) +vint16m1_t vredmax(vbool32_t op0, vint16m1_t op1, vint16mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16mf4_i16m1))) +vint16m1_t vredmax(vint16m1_t op0, vint16mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i16mf4_i16m1_m))) +vint16m1_t vredmax(vbool64_t op0, vint16m1_t op1, vint16mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m1_i32m1))) +vint32m1_t vredmax(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m1_i32m1_m))) +vint32m1_t vredmax(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m2_i32m1))) +vint32m1_t vredmax(vint32m1_t op0, vint32m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m2_i32m1_m))) +vint32m1_t vredmax(vbool16_t op0, vint32m1_t op1, vint32m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m4_i32m1))) +vint32m1_t vredmax(vint32m1_t op0, vint32m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m4_i32m1_m))) +vint32m1_t vredmax(vbool8_t op0, vint32m1_t op1, vint32m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m8_i32m1))) +vint32m1_t vredmax(vint32m1_t op0, vint32m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32m8_i32m1_m))) +vint32m1_t vredmax(vbool4_t op0, vint32m1_t op1, vint32m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32mf2_i32m1))) +vint32m1_t vredmax(vint32m1_t op0, vint32mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i32mf2_i32m1_m))) +vint32m1_t vredmax(vbool64_t op0, vint32m1_t op1, vint32mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m1_i64m1))) +vint64m1_t vredmax(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m1_i64m1_m))) +vint64m1_t vredmax(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m2_i64m1))) +vint64m1_t vredmax(vint64m1_t op0, vint64m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m2_i64m1_m))) +vint64m1_t vredmax(vbool32_t op0, vint64m1_t op1, vint64m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m4_i64m1))) +vint64m1_t vredmax(vint64m1_t op0, vint64m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m4_i64m1_m))) +vint64m1_t vredmax(vbool16_t op0, vint64m1_t op1, vint64m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m8_i64m1))) +vint64m1_t vredmax(vint64m1_t op0, vint64m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmax_vs_i64m8_i64m1_m))) +vint64m1_t vredmax(vbool8_t op0, vint64m1_t op1, vint64m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m1_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m1_u8m1_m))) +vuint8m1_t vredminu(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m2_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m2_u8m1_m))) +vuint8m1_t vredminu(vbool4_t op0, vuint8m1_t op1, vuint8m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m4_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m4_u8m1_m))) +vuint8m1_t vredminu(vbool2_t op0, vuint8m1_t op1, vuint8m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m8_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8m8_u8m1_m))) +vuint8m1_t vredminu(vbool1_t op0, vuint8m1_t op1, vuint8m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8mf2_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8mf2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8mf2_u8m1_m))) +vuint8m1_t vredminu(vbool16_t op0, vuint8m1_t op1, vuint8mf2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8mf4_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8mf4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8mf4_u8m1_m))) +vuint8m1_t vredminu(vbool32_t op0, vuint8m1_t op1, vuint8mf4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8mf8_u8m1))) +vuint8m1_t vredminu(vuint8m1_t op0, vuint8mf8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u8mf8_u8m1_m))) +vuint8m1_t vredminu(vbool64_t op0, vuint8m1_t op1, vuint8mf8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m1_u16m1))) +vuint16m1_t vredminu(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m1_u16m1_m))) +vuint16m1_t vredminu(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m2_u16m1))) +vuint16m1_t vredminu(vuint16m1_t op0, vuint16m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m2_u16m1_m))) +vuint16m1_t vredminu(vbool8_t op0, vuint16m1_t op1, vuint16m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m4_u16m1))) +vuint16m1_t vredminu(vuint16m1_t op0, vuint16m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m4_u16m1_m))) +vuint16m1_t vredminu(vbool4_t op0, vuint16m1_t op1, vuint16m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m8_u16m1))) +vuint16m1_t vredminu(vuint16m1_t op0, vuint16m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16m8_u16m1_m))) +vuint16m1_t vredminu(vbool2_t op0, vuint16m1_t op1, vuint16m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16mf2_u16m1))) +vuint16m1_t vredminu(vuint16m1_t op0, vuint16mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16mf2_u16m1_m))) +vuint16m1_t vredminu(vbool32_t op0, vuint16m1_t op1, vuint16mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16mf4_u16m1))) +vuint16m1_t vredminu(vuint16m1_t op0, vuint16mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u16mf4_u16m1_m))) +vuint16m1_t vredminu(vbool64_t op0, vuint16m1_t op1, vuint16mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m1_u32m1))) +vuint32m1_t vredminu(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m1_u32m1_m))) +vuint32m1_t vredminu(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m2_u32m1))) +vuint32m1_t vredminu(vuint32m1_t op0, vuint32m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m2_u32m1_m))) +vuint32m1_t vredminu(vbool16_t op0, vuint32m1_t op1, vuint32m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m4_u32m1))) +vuint32m1_t vredminu(vuint32m1_t op0, vuint32m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m4_u32m1_m))) +vuint32m1_t vredminu(vbool8_t op0, vuint32m1_t op1, vuint32m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m8_u32m1))) +vuint32m1_t vredminu(vuint32m1_t op0, vuint32m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32m8_u32m1_m))) +vuint32m1_t vredminu(vbool4_t op0, vuint32m1_t op1, vuint32m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32mf2_u32m1))) +vuint32m1_t vredminu(vuint32m1_t op0, vuint32mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u32mf2_u32m1_m))) +vuint32m1_t vredminu(vbool64_t op0, vuint32m1_t op1, vuint32mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m1_u64m1))) +vuint64m1_t vredminu(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m1_u64m1_m))) +vuint64m1_t vredminu(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m2_u64m1))) +vuint64m1_t vredminu(vuint64m1_t op0, vuint64m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m2_u64m1_m))) +vuint64m1_t vredminu(vbool32_t op0, vuint64m1_t op1, vuint64m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m4_u64m1))) +vuint64m1_t vredminu(vuint64m1_t op0, vuint64m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m4_u64m1_m))) +vuint64m1_t vredminu(vbool16_t op0, vuint64m1_t op1, vuint64m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m8_u64m1))) +vuint64m1_t vredminu(vuint64m1_t op0, vuint64m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredminu_vs_u64m8_u64m1_m))) +vuint64m1_t vredminu(vbool8_t op0, vuint64m1_t op1, vuint64m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m1_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m1_i8m1_m))) +vint8m1_t vredmin(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m2_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m2_i8m1_m))) +vint8m1_t vredmin(vbool4_t op0, vint8m1_t op1, vint8m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m4_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m4_i8m1_m))) +vint8m1_t vredmin(vbool2_t op0, vint8m1_t op1, vint8m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m8_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8m8_i8m1_m))) +vint8m1_t vredmin(vbool1_t op0, vint8m1_t op1, vint8m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8mf2_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8mf2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8mf2_i8m1_m))) +vint8m1_t vredmin(vbool16_t op0, vint8m1_t op1, vint8mf2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8mf4_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8mf4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8mf4_i8m1_m))) +vint8m1_t vredmin(vbool32_t op0, vint8m1_t op1, vint8mf4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8mf8_i8m1))) +vint8m1_t vredmin(vint8m1_t op0, vint8mf8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i8mf8_i8m1_m))) +vint8m1_t vredmin(vbool64_t op0, vint8m1_t op1, vint8mf8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m1_i16m1))) +vint16m1_t vredmin(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m1_i16m1_m))) +vint16m1_t vredmin(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m2_i16m1))) +vint16m1_t vredmin(vint16m1_t op0, vint16m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m2_i16m1_m))) +vint16m1_t vredmin(vbool8_t op0, vint16m1_t op1, vint16m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m4_i16m1))) +vint16m1_t vredmin(vint16m1_t op0, vint16m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m4_i16m1_m))) +vint16m1_t vredmin(vbool4_t op0, vint16m1_t op1, vint16m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m8_i16m1))) +vint16m1_t vredmin(vint16m1_t op0, vint16m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16m8_i16m1_m))) +vint16m1_t vredmin(vbool2_t op0, vint16m1_t op1, vint16m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16mf2_i16m1))) +vint16m1_t vredmin(vint16m1_t op0, vint16mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16mf2_i16m1_m))) +vint16m1_t vredmin(vbool32_t op0, vint16m1_t op1, vint16mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16mf4_i16m1))) +vint16m1_t vredmin(vint16m1_t op0, vint16mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i16mf4_i16m1_m))) +vint16m1_t vredmin(vbool64_t op0, vint16m1_t op1, vint16mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m1_i32m1))) +vint32m1_t vredmin(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m1_i32m1_m))) +vint32m1_t vredmin(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m2_i32m1))) +vint32m1_t vredmin(vint32m1_t op0, vint32m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m2_i32m1_m))) +vint32m1_t vredmin(vbool16_t op0, vint32m1_t op1, vint32m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m4_i32m1))) +vint32m1_t vredmin(vint32m1_t op0, vint32m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m4_i32m1_m))) +vint32m1_t vredmin(vbool8_t op0, vint32m1_t op1, vint32m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m8_i32m1))) +vint32m1_t vredmin(vint32m1_t op0, vint32m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32m8_i32m1_m))) +vint32m1_t vredmin(vbool4_t op0, vint32m1_t op1, vint32m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32mf2_i32m1))) +vint32m1_t vredmin(vint32m1_t op0, vint32mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i32mf2_i32m1_m))) +vint32m1_t vredmin(vbool64_t op0, vint32m1_t op1, vint32mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m1_i64m1))) +vint64m1_t vredmin(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m1_i64m1_m))) +vint64m1_t vredmin(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m2_i64m1))) +vint64m1_t vredmin(vint64m1_t op0, vint64m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m2_i64m1_m))) +vint64m1_t vredmin(vbool32_t op0, vint64m1_t op1, vint64m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m4_i64m1))) +vint64m1_t vredmin(vint64m1_t op0, vint64m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m4_i64m1_m))) +vint64m1_t vredmin(vbool16_t op0, vint64m1_t op1, vint64m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m8_i64m1))) +vint64m1_t vredmin(vint64m1_t op0, vint64m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredmin_vs_i64m8_i64m1_m))) +vint64m1_t vredmin(vbool8_t op0, vint64m1_t op1, vint64m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m1_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m1_i8m1_m))) +vint8m1_t vredand(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m2_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m2_i8m1_m))) +vint8m1_t vredand(vbool4_t op0, vint8m1_t op1, vint8m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m4_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m4_i8m1_m))) +vint8m1_t vredand(vbool2_t op0, vint8m1_t op1, vint8m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m8_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8m8_i8m1_m))) +vint8m1_t vredand(vbool1_t op0, vint8m1_t op1, vint8m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8mf2_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8mf2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8mf2_i8m1_m))) +vint8m1_t vredand(vbool16_t op0, vint8m1_t op1, vint8mf2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8mf4_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8mf4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8mf4_i8m1_m))) +vint8m1_t vredand(vbool32_t op0, vint8m1_t op1, vint8mf4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8mf8_i8m1))) +vint8m1_t vredand(vint8m1_t op0, vint8mf8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i8mf8_i8m1_m))) +vint8m1_t vredand(vbool64_t op0, vint8m1_t op1, vint8mf8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m1_i16m1))) +vint16m1_t vredand(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m1_i16m1_m))) +vint16m1_t vredand(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m2_i16m1))) +vint16m1_t vredand(vint16m1_t op0, vint16m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m2_i16m1_m))) +vint16m1_t vredand(vbool8_t op0, vint16m1_t op1, vint16m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m4_i16m1))) +vint16m1_t vredand(vint16m1_t op0, vint16m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m4_i16m1_m))) +vint16m1_t vredand(vbool4_t op0, vint16m1_t op1, vint16m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m8_i16m1))) +vint16m1_t vredand(vint16m1_t op0, vint16m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16m8_i16m1_m))) +vint16m1_t vredand(vbool2_t op0, vint16m1_t op1, vint16m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16mf2_i16m1))) +vint16m1_t vredand(vint16m1_t op0, vint16mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16mf2_i16m1_m))) +vint16m1_t vredand(vbool32_t op0, vint16m1_t op1, vint16mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16mf4_i16m1))) +vint16m1_t vredand(vint16m1_t op0, vint16mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i16mf4_i16m1_m))) +vint16m1_t vredand(vbool64_t op0, vint16m1_t op1, vint16mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m1_i32m1))) +vint32m1_t vredand(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m1_i32m1_m))) +vint32m1_t vredand(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m2_i32m1))) +vint32m1_t vredand(vint32m1_t op0, vint32m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m2_i32m1_m))) +vint32m1_t vredand(vbool16_t op0, vint32m1_t op1, vint32m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m4_i32m1))) +vint32m1_t vredand(vint32m1_t op0, vint32m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m4_i32m1_m))) +vint32m1_t vredand(vbool8_t op0, vint32m1_t op1, vint32m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m8_i32m1))) +vint32m1_t vredand(vint32m1_t op0, vint32m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32m8_i32m1_m))) +vint32m1_t vredand(vbool4_t op0, vint32m1_t op1, vint32m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32mf2_i32m1))) +vint32m1_t vredand(vint32m1_t op0, vint32mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i32mf2_i32m1_m))) +vint32m1_t vredand(vbool64_t op0, vint32m1_t op1, vint32mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m1_i64m1))) +vint64m1_t vredand(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m1_i64m1_m))) +vint64m1_t vredand(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m2_i64m1))) +vint64m1_t vredand(vint64m1_t op0, vint64m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m2_i64m1_m))) +vint64m1_t vredand(vbool32_t op0, vint64m1_t op1, vint64m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m4_i64m1))) +vint64m1_t vredand(vint64m1_t op0, vint64m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m4_i64m1_m))) +vint64m1_t vredand(vbool16_t op0, vint64m1_t op1, vint64m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m8_i64m1))) +vint64m1_t vredand(vint64m1_t op0, vint64m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_i64m8_i64m1_m))) +vint64m1_t vredand(vbool8_t op0, vint64m1_t op1, vint64m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m1_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m1_u8m1_m))) +vuint8m1_t vredand(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m2_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m2_u8m1_m))) +vuint8m1_t vredand(vbool4_t op0, vuint8m1_t op1, vuint8m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m4_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m4_u8m1_m))) +vuint8m1_t vredand(vbool2_t op0, vuint8m1_t op1, vuint8m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m8_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8m8_u8m1_m))) +vuint8m1_t vredand(vbool1_t op0, vuint8m1_t op1, vuint8m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8mf2_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8mf2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8mf2_u8m1_m))) +vuint8m1_t vredand(vbool16_t op0, vuint8m1_t op1, vuint8mf2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8mf4_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8mf4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8mf4_u8m1_m))) +vuint8m1_t vredand(vbool32_t op0, vuint8m1_t op1, vuint8mf4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8mf8_u8m1))) +vuint8m1_t vredand(vuint8m1_t op0, vuint8mf8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u8mf8_u8m1_m))) +vuint8m1_t vredand(vbool64_t op0, vuint8m1_t op1, vuint8mf8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m1_u16m1))) +vuint16m1_t vredand(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m1_u16m1_m))) +vuint16m1_t vredand(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m2_u16m1))) +vuint16m1_t vredand(vuint16m1_t op0, vuint16m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m2_u16m1_m))) +vuint16m1_t vredand(vbool8_t op0, vuint16m1_t op1, vuint16m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m4_u16m1))) +vuint16m1_t vredand(vuint16m1_t op0, vuint16m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m4_u16m1_m))) +vuint16m1_t vredand(vbool4_t op0, vuint16m1_t op1, vuint16m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m8_u16m1))) +vuint16m1_t vredand(vuint16m1_t op0, vuint16m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16m8_u16m1_m))) +vuint16m1_t vredand(vbool2_t op0, vuint16m1_t op1, vuint16m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16mf2_u16m1))) +vuint16m1_t vredand(vuint16m1_t op0, vuint16mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16mf2_u16m1_m))) +vuint16m1_t vredand(vbool32_t op0, vuint16m1_t op1, vuint16mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16mf4_u16m1))) +vuint16m1_t vredand(vuint16m1_t op0, vuint16mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u16mf4_u16m1_m))) +vuint16m1_t vredand(vbool64_t op0, vuint16m1_t op1, vuint16mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m1_u32m1))) +vuint32m1_t vredand(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m1_u32m1_m))) +vuint32m1_t vredand(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m2_u32m1))) +vuint32m1_t vredand(vuint32m1_t op0, vuint32m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m2_u32m1_m))) +vuint32m1_t vredand(vbool16_t op0, vuint32m1_t op1, vuint32m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m4_u32m1))) +vuint32m1_t vredand(vuint32m1_t op0, vuint32m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m4_u32m1_m))) +vuint32m1_t vredand(vbool8_t op0, vuint32m1_t op1, vuint32m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m8_u32m1))) +vuint32m1_t vredand(vuint32m1_t op0, vuint32m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32m8_u32m1_m))) +vuint32m1_t vredand(vbool4_t op0, vuint32m1_t op1, vuint32m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32mf2_u32m1))) +vuint32m1_t vredand(vuint32m1_t op0, vuint32mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u32mf2_u32m1_m))) +vuint32m1_t vredand(vbool64_t op0, vuint32m1_t op1, vuint32mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m1_u64m1))) +vuint64m1_t vredand(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m1_u64m1_m))) +vuint64m1_t vredand(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m2_u64m1))) +vuint64m1_t vredand(vuint64m1_t op0, vuint64m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m2_u64m1_m))) +vuint64m1_t vredand(vbool32_t op0, vuint64m1_t op1, vuint64m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m4_u64m1))) +vuint64m1_t vredand(vuint64m1_t op0, vuint64m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m4_u64m1_m))) +vuint64m1_t vredand(vbool16_t op0, vuint64m1_t op1, vuint64m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m8_u64m1))) +vuint64m1_t vredand(vuint64m1_t op0, vuint64m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredand_vs_u64m8_u64m1_m))) +vuint64m1_t vredand(vbool8_t op0, vuint64m1_t op1, vuint64m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m1))) +void vsuxei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m1_m))) +void vsuxei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m2))) +void vsuxei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m2_m))) +void vsuxei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m4))) +void vsuxei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m4_m))) +void vsuxei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m8))) +void vsuxei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_u64m8_m))) +void vsuxei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m1_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m1_i8m1_m))) +vint8m1_t vredor(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m2_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m2_i8m1_m))) +vint8m1_t vredor(vbool4_t op0, vint8m1_t op1, vint8m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m4_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m4_i8m1_m))) +vint8m1_t vredor(vbool2_t op0, vint8m1_t op1, vint8m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m8_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8m8_i8m1_m))) +vint8m1_t vredor(vbool1_t op0, vint8m1_t op1, vint8m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8mf2_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8mf2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8mf2_i8m1_m))) +vint8m1_t vredor(vbool16_t op0, vint8m1_t op1, vint8mf2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8mf4_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8mf4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8mf4_i8m1_m))) +vint8m1_t vredor(vbool32_t op0, vint8m1_t op1, vint8mf4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8mf8_i8m1))) +vint8m1_t vredor(vint8m1_t op0, vint8mf8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i8mf8_i8m1_m))) +vint8m1_t vredor(vbool64_t op0, vint8m1_t op1, vint8mf8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m1_i16m1))) +vint16m1_t vredor(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m1_i16m1_m))) +vint16m1_t vredor(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m2_i16m1))) +vint16m1_t vredor(vint16m1_t op0, vint16m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m2_i16m1_m))) +vint16m1_t vredor(vbool8_t op0, vint16m1_t op1, vint16m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m4_i16m1))) +vint16m1_t vredor(vint16m1_t op0, vint16m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m4_i16m1_m))) +vint16m1_t vredor(vbool4_t op0, vint16m1_t op1, vint16m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m8_i16m1))) +vint16m1_t vredor(vint16m1_t op0, vint16m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16m8_i16m1_m))) +vint16m1_t vredor(vbool2_t op0, vint16m1_t op1, vint16m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16mf2_i16m1))) +vint16m1_t vredor(vint16m1_t op0, vint16mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16mf2_i16m1_m))) +vint16m1_t vredor(vbool32_t op0, vint16m1_t op1, vint16mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16mf4_i16m1))) +vint16m1_t vredor(vint16m1_t op0, vint16mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i16mf4_i16m1_m))) +vint16m1_t vredor(vbool64_t op0, vint16m1_t op1, vint16mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m1_i32m1))) +vint32m1_t vredor(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m1_i32m1_m))) +vint32m1_t vredor(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m2_i32m1))) +vint32m1_t vredor(vint32m1_t op0, vint32m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m2_i32m1_m))) +vint32m1_t vredor(vbool16_t op0, vint32m1_t op1, vint32m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m4_i32m1))) +vint32m1_t vredor(vint32m1_t op0, vint32m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m4_i32m1_m))) +vint32m1_t vredor(vbool8_t op0, vint32m1_t op1, vint32m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m8_i32m1))) +vint32m1_t vredor(vint32m1_t op0, vint32m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32m8_i32m1_m))) +vint32m1_t vredor(vbool4_t op0, vint32m1_t op1, vint32m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32mf2_i32m1))) +vint32m1_t vredor(vint32m1_t op0, vint32mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i32mf2_i32m1_m))) +vint32m1_t vredor(vbool64_t op0, vint32m1_t op1, vint32mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m1_i64m1))) +vint64m1_t vredor(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m1_i64m1_m))) +vint64m1_t vredor(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m2_i64m1))) +vint64m1_t vredor(vint64m1_t op0, vint64m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m2_i64m1_m))) +vint64m1_t vredor(vbool32_t op0, vint64m1_t op1, vint64m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m4_i64m1))) +vint64m1_t vredor(vint64m1_t op0, vint64m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m4_i64m1_m))) +vint64m1_t vredor(vbool16_t op0, vint64m1_t op1, vint64m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m8_i64m1))) +vint64m1_t vredor(vint64m1_t op0, vint64m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_i64m8_i64m1_m))) +vint64m1_t vredor(vbool8_t op0, vint64m1_t op1, vint64m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m1_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m1_u8m1_m))) +vuint8m1_t vredor(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m2_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m2_u8m1_m))) +vuint8m1_t vredor(vbool4_t op0, vuint8m1_t op1, vuint8m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m4_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m4_u8m1_m))) +vuint8m1_t vredor(vbool2_t op0, vuint8m1_t op1, vuint8m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m8_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8m8_u8m1_m))) +vuint8m1_t vredor(vbool1_t op0, vuint8m1_t op1, vuint8m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8mf2_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8mf2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8mf2_u8m1_m))) +vuint8m1_t vredor(vbool16_t op0, vuint8m1_t op1, vuint8mf2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8mf4_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8mf4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8mf4_u8m1_m))) +vuint8m1_t vredor(vbool32_t op0, vuint8m1_t op1, vuint8mf4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8mf8_u8m1))) +vuint8m1_t vredor(vuint8m1_t op0, vuint8mf8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u8mf8_u8m1_m))) +vuint8m1_t vredor(vbool64_t op0, vuint8m1_t op1, vuint8mf8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m1_u16m1))) +vuint16m1_t vredor(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m1_u16m1_m))) +vuint16m1_t vredor(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m2_u16m1))) +vuint16m1_t vredor(vuint16m1_t op0, vuint16m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m2_u16m1_m))) +vuint16m1_t vredor(vbool8_t op0, vuint16m1_t op1, vuint16m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m4_u16m1))) +vuint16m1_t vredor(vuint16m1_t op0, vuint16m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m4_u16m1_m))) +vuint16m1_t vredor(vbool4_t op0, vuint16m1_t op1, vuint16m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m8_u16m1))) +vuint16m1_t vredor(vuint16m1_t op0, vuint16m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16m8_u16m1_m))) +vuint16m1_t vredor(vbool2_t op0, vuint16m1_t op1, vuint16m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16mf2_u16m1))) +vuint16m1_t vredor(vuint16m1_t op0, vuint16mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16mf2_u16m1_m))) +vuint16m1_t vredor(vbool32_t op0, vuint16m1_t op1, vuint16mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16mf4_u16m1))) +vuint16m1_t vredor(vuint16m1_t op0, vuint16mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u16mf4_u16m1_m))) +vuint16m1_t vredor(vbool64_t op0, vuint16m1_t op1, vuint16mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m1_u32m1))) +vuint32m1_t vredor(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m1_u32m1_m))) +vuint32m1_t vredor(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m2_u32m1))) +vuint32m1_t vredor(vuint32m1_t op0, vuint32m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m2_u32m1_m))) +vuint32m1_t vredor(vbool16_t op0, vuint32m1_t op1, vuint32m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m4_u32m1))) +vuint32m1_t vredor(vuint32m1_t op0, vuint32m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m4_u32m1_m))) +vuint32m1_t vredor(vbool8_t op0, vuint32m1_t op1, vuint32m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m8_u32m1))) +vuint32m1_t vredor(vuint32m1_t op0, vuint32m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32m8_u32m1_m))) +vuint32m1_t vredor(vbool4_t op0, vuint32m1_t op1, vuint32m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32mf2_u32m1))) +vuint32m1_t vredor(vuint32m1_t op0, vuint32mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u32mf2_u32m1_m))) +vuint32m1_t vredor(vbool64_t op0, vuint32m1_t op1, vuint32mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m1_u64m1))) +vuint64m1_t vredor(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m1_u64m1_m))) +vuint64m1_t vredor(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m2_u64m1))) +vuint64m1_t vredor(vuint64m1_t op0, vuint64m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m2_u64m1_m))) +vuint64m1_t vredor(vbool32_t op0, vuint64m1_t op1, vuint64m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m4_u64m1))) +vuint64m1_t vredor(vuint64m1_t op0, vuint64m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m4_u64m1_m))) +vuint64m1_t vredor(vbool16_t op0, vuint64m1_t op1, vuint64m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m8_u64m1))) +vuint64m1_t vredor(vuint64m1_t op0, vuint64m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredor_vs_u64m8_u64m1_m))) +vuint64m1_t vredor(vbool8_t op0, vuint64m1_t op1, vuint64m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m1_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m1_i8m1_m))) +vint8m1_t vredxor(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m2_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m2_i8m1_m))) +vint8m1_t vredxor(vbool4_t op0, vint8m1_t op1, vint8m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m4_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m4_i8m1_m))) +vint8m1_t vredxor(vbool2_t op0, vint8m1_t op1, vint8m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m8_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8m8_i8m1_m))) +vint8m1_t vredxor(vbool1_t op0, vint8m1_t op1, vint8m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8mf2_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8mf2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8mf2_i8m1_m))) +vint8m1_t vredxor(vbool16_t op0, vint8m1_t op1, vint8mf2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8mf4_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8mf4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8mf4_i8m1_m))) +vint8m1_t vredxor(vbool32_t op0, vint8m1_t op1, vint8mf4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8mf8_i8m1))) +vint8m1_t vredxor(vint8m1_t op0, vint8mf8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i8mf8_i8m1_m))) +vint8m1_t vredxor(vbool64_t op0, vint8m1_t op1, vint8mf8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m1_i16m1))) +vint16m1_t vredxor(vint16m1_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m1_i16m1_m))) +vint16m1_t vredxor(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m2_i16m1))) +vint16m1_t vredxor(vint16m1_t op0, vint16m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m2_i16m1_m))) +vint16m1_t vredxor(vbool8_t op0, vint16m1_t op1, vint16m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m4_i16m1))) +vint16m1_t vredxor(vint16m1_t op0, vint16m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m4_i16m1_m))) +vint16m1_t vredxor(vbool4_t op0, vint16m1_t op1, vint16m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m8_i16m1))) +vint16m1_t vredxor(vint16m1_t op0, vint16m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16m8_i16m1_m))) +vint16m1_t vredxor(vbool2_t op0, vint16m1_t op1, vint16m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16mf2_i16m1))) +vint16m1_t vredxor(vint16m1_t op0, vint16mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16mf2_i16m1_m))) +vint16m1_t vredxor(vbool32_t op0, vint16m1_t op1, vint16mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16mf4_i16m1))) +vint16m1_t vredxor(vint16m1_t op0, vint16mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i16mf4_i16m1_m))) +vint16m1_t vredxor(vbool64_t op0, vint16m1_t op1, vint16mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m1_i32m1))) +vint32m1_t vredxor(vint32m1_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m1_i32m1_m))) +vint32m1_t vredxor(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m2_i32m1))) +vint32m1_t vredxor(vint32m1_t op0, vint32m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m2_i32m1_m))) +vint32m1_t vredxor(vbool16_t op0, vint32m1_t op1, vint32m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m4_i32m1))) +vint32m1_t vredxor(vint32m1_t op0, vint32m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m4_i32m1_m))) +vint32m1_t vredxor(vbool8_t op0, vint32m1_t op1, vint32m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m8_i32m1))) +vint32m1_t vredxor(vint32m1_t op0, vint32m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32m8_i32m1_m))) +vint32m1_t vredxor(vbool4_t op0, vint32m1_t op1, vint32m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32mf2_i32m1))) +vint32m1_t vredxor(vint32m1_t op0, vint32mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i32mf2_i32m1_m))) +vint32m1_t vredxor(vbool64_t op0, vint32m1_t op1, vint32mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m1_i64m1))) +vint64m1_t vredxor(vint64m1_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m1_i64m1_m))) +vint64m1_t vredxor(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m2_i64m1))) +vint64m1_t vredxor(vint64m1_t op0, vint64m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m2_i64m1_m))) +vint64m1_t vredxor(vbool32_t op0, vint64m1_t op1, vint64m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m4_i64m1))) +vint64m1_t vredxor(vint64m1_t op0, vint64m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m4_i64m1_m))) +vint64m1_t vredxor(vbool16_t op0, vint64m1_t op1, vint64m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m8_i64m1))) +vint64m1_t vredxor(vint64m1_t op0, vint64m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_i64m8_i64m1_m))) +vint64m1_t vredxor(vbool8_t op0, vint64m1_t op1, vint64m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m1_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m1_u8m1_m))) +vuint8m1_t vredxor(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m2_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m2_u8m1_m))) +vuint8m1_t vredxor(vbool4_t op0, vuint8m1_t op1, vuint8m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m4_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m4_u8m1_m))) +vuint8m1_t vredxor(vbool2_t op0, vuint8m1_t op1, vuint8m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m8_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8m8_u8m1_m))) +vuint8m1_t vredxor(vbool1_t op0, vuint8m1_t op1, vuint8m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8mf2_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8mf2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8mf2_u8m1_m))) +vuint8m1_t vredxor(vbool16_t op0, vuint8m1_t op1, vuint8mf2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8mf4_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8mf4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8mf4_u8m1_m))) +vuint8m1_t vredxor(vbool32_t op0, vuint8m1_t op1, vuint8mf4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8mf8_u8m1))) +vuint8m1_t vredxor(vuint8m1_t op0, vuint8mf8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u8mf8_u8m1_m))) +vuint8m1_t vredxor(vbool64_t op0, vuint8m1_t op1, vuint8mf8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m1_u16m1))) +vuint16m1_t vredxor(vuint16m1_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m1_u16m1_m))) +vuint16m1_t vredxor(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m2_u16m1))) +vuint16m1_t vredxor(vuint16m1_t op0, vuint16m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m2_u16m1_m))) +vuint16m1_t vredxor(vbool8_t op0, vuint16m1_t op1, vuint16m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m4_u16m1))) +vuint16m1_t vredxor(vuint16m1_t op0, vuint16m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m4_u16m1_m))) +vuint16m1_t vredxor(vbool4_t op0, vuint16m1_t op1, vuint16m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m8_u16m1))) +vuint16m1_t vredxor(vuint16m1_t op0, vuint16m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16m8_u16m1_m))) +vuint16m1_t vredxor(vbool2_t op0, vuint16m1_t op1, vuint16m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16mf2_u16m1))) +vuint16m1_t vredxor(vuint16m1_t op0, vuint16mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16mf2_u16m1_m))) +vuint16m1_t vredxor(vbool32_t op0, vuint16m1_t op1, vuint16mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16mf4_u16m1))) +vuint16m1_t vredxor(vuint16m1_t op0, vuint16mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u16mf4_u16m1_m))) +vuint16m1_t vredxor(vbool64_t op0, vuint16m1_t op1, vuint16mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m1_u32m1))) +vuint32m1_t vredxor(vuint32m1_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m1_u32m1_m))) +vuint32m1_t vredxor(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m2_u32m1))) +vuint32m1_t vredxor(vuint32m1_t op0, vuint32m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m2_u32m1_m))) +vuint32m1_t vredxor(vbool16_t op0, vuint32m1_t op1, vuint32m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m4_u32m1))) +vuint32m1_t vredxor(vuint32m1_t op0, vuint32m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m4_u32m1_m))) +vuint32m1_t vredxor(vbool8_t op0, vuint32m1_t op1, vuint32m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m8_u32m1))) +vuint32m1_t vredxor(vuint32m1_t op0, vuint32m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32m8_u32m1_m))) +vuint32m1_t vredxor(vbool4_t op0, vuint32m1_t op1, vuint32m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32mf2_u32m1))) +vuint32m1_t vredxor(vuint32m1_t op0, vuint32mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u32mf2_u32m1_m))) +vuint32m1_t vredxor(vbool64_t op0, vuint32m1_t op1, vuint32mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m1_u64m1))) +vuint64m1_t vredxor(vuint64m1_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m1_u64m1_m))) +vuint64m1_t vredxor(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m2_u64m1))) +vuint64m1_t vredxor(vuint64m1_t op0, vuint64m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m2_u64m1_m))) +vuint64m1_t vredxor(vbool32_t op0, vuint64m1_t op1, vuint64m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m4_u64m1))) +vuint64m1_t vredxor(vuint64m1_t op0, vuint64m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m4_u64m1_m))) +vuint64m1_t vredxor(vbool16_t op0, vuint64m1_t op1, vuint64m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m8_u64m1))) +vuint64m1_t vredxor(vuint64m1_t op0, vuint64m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vredxor_vs_u64m8_u64m1_m))) +vuint64m1_t vredxor(vbool8_t op0, vuint64m1_t op1, vuint64m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m1_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m1_i16m1_m))) +vint16m1_t vwredsum(vbool8_t op0, vint16m1_t op1, vint8m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m2_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m2_i16m1_m))) +vint16m1_t vwredsum(vbool4_t op0, vint16m1_t op1, vint8m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m4_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m4_i16m1_m))) +vint16m1_t vwredsum(vbool2_t op0, vint16m1_t op1, vint8m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m8_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8m8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8m8_i16m1_m))) +vint16m1_t vwredsum(vbool1_t op0, vint16m1_t op1, vint8m8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8mf2_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8mf2_i16m1_m))) +vint16m1_t vwredsum(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8mf4_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8mf4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8mf4_i16m1_m))) +vint16m1_t vwredsum(vbool32_t op0, vint16m1_t op1, vint8mf4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8mf8_i16m1))) +vint16m1_t vwredsum(vint16m1_t op0, vint8mf8_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i8mf8_i16m1_m))) +vint16m1_t vwredsum(vbool64_t op0, vint16m1_t op1, vint8mf8_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m1_i32m1))) +vint32m1_t vwredsum(vint32m1_t op0, vint16m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m1_i32m1_m))) +vint32m1_t vwredsum(vbool16_t op0, vint32m1_t op1, vint16m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m2_i32m1))) +vint32m1_t vwredsum(vint32m1_t op0, vint16m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m2_i32m1_m))) +vint32m1_t vwredsum(vbool8_t op0, vint32m1_t op1, vint16m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m4_i32m1))) +vint32m1_t vwredsum(vint32m1_t op0, vint16m4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m4_i32m1_m))) +vint32m1_t vwredsum(vbool4_t op0, vint32m1_t op1, vint16m4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m8_i32m1))) +vint32m1_t vwredsum(vint32m1_t op0, vint16m8_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16m8_i32m1_m))) +vint32m1_t vwredsum(vbool2_t op0, vint32m1_t op1, vint16m8_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16mf2_i32m1))) +vint32m1_t vwredsum(vint32m1_t op0, vint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16mf2_i32m1_m))) +vint32m1_t vwredsum(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16mf4_i32m1))) +vint32m1_t vwredsum(vint32m1_t op0, vint16mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i16mf4_i32m1_m))) +vint32m1_t vwredsum(vbool64_t op0, vint32m1_t op1, vint16mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m1_i64m1))) +vint64m1_t vwredsum(vint64m1_t op0, vint32m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m1_i64m1_m))) +vint64m1_t vwredsum(vbool32_t op0, vint64m1_t op1, vint32m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m2_i64m1))) +vint64m1_t vwredsum(vint64m1_t op0, vint32m2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m2_i64m1_m))) +vint64m1_t vwredsum(vbool16_t op0, vint64m1_t op1, vint32m2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m4_i64m1))) +vint64m1_t vwredsum(vint64m1_t op0, vint32m4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m4_i64m1_m))) +vint64m1_t vwredsum(vbool8_t op0, vint64m1_t op1, vint32m4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m8_i64m1))) +vint64m1_t vwredsum(vint64m1_t op0, vint32m8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32m8_i64m1_m))) +vint64m1_t vwredsum(vbool4_t op0, vint64m1_t op1, vint32m8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32mf2_i64m1))) +vint64m1_t vwredsum(vint64m1_t op0, vint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsum_vs_i32mf2_i64m1_m))) +vint64m1_t vwredsum(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m1_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m1_u16m1_m))) +vuint16m1_t vwredsumu(vbool8_t op0, vuint16m1_t op1, vuint8m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m2_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m2_u16m1_m))) +vuint16m1_t vwredsumu(vbool4_t op0, vuint16m1_t op1, vuint8m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m4_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m4_u16m1_m))) +vuint16m1_t vwredsumu(vbool2_t op0, vuint16m1_t op1, vuint8m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m8_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8m8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8m8_u16m1_m))) +vuint16m1_t vwredsumu(vbool1_t op0, vuint16m1_t op1, vuint8m8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8mf2_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8mf2_u16m1_m))) +vuint16m1_t vwredsumu(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8mf4_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8mf4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8mf4_u16m1_m))) +vuint16m1_t vwredsumu(vbool32_t op0, vuint16m1_t op1, vuint8mf4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8mf8_u16m1))) +vuint16m1_t vwredsumu(vuint16m1_t op0, vuint8mf8_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u8mf8_u16m1_m))) +vuint16m1_t vwredsumu(vbool64_t op0, vuint16m1_t op1, vuint8mf8_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m1_u32m1))) +vuint32m1_t vwredsumu(vuint32m1_t op0, vuint16m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m1_u32m1_m))) +vuint32m1_t vwredsumu(vbool16_t op0, vuint32m1_t op1, vuint16m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m2_u32m1))) +vuint32m1_t vwredsumu(vuint32m1_t op0, vuint16m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m2_u32m1_m))) +vuint32m1_t vwredsumu(vbool8_t op0, vuint32m1_t op1, vuint16m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m4_u32m1))) +vuint32m1_t vwredsumu(vuint32m1_t op0, vuint16m4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m4_u32m1_m))) +vuint32m1_t vwredsumu(vbool4_t op0, vuint32m1_t op1, vuint16m4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m8_u32m1))) +vuint32m1_t vwredsumu(vuint32m1_t op0, vuint16m8_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16m8_u32m1_m))) +vuint32m1_t vwredsumu(vbool2_t op0, vuint32m1_t op1, vuint16m8_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16mf2_u32m1))) +vuint32m1_t vwredsumu(vuint32m1_t op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16mf2_u32m1_m))) +vuint32m1_t vwredsumu(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16mf4_u32m1))) +vuint32m1_t vwredsumu(vuint32m1_t op0, vuint16mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u16mf4_u32m1_m))) +vuint32m1_t vwredsumu(vbool64_t op0, vuint32m1_t op1, vuint16mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m1_u64m1))) +vuint64m1_t vwredsumu(vuint64m1_t op0, vuint32m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m1_u64m1_m))) +vuint64m1_t vwredsumu(vbool32_t op0, vuint64m1_t op1, vuint32m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m2_u64m1))) +vuint64m1_t vwredsumu(vuint64m1_t op0, vuint32m2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m2_u64m1_m))) +vuint64m1_t vwredsumu(vbool16_t op0, vuint64m1_t op1, vuint32m2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m4_u64m1))) +vuint64m1_t vwredsumu(vuint64m1_t op0, vuint32m4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m4_u64m1_m))) +vuint64m1_t vwredsumu(vbool8_t op0, vuint64m1_t op1, vuint32m4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m8_u64m1))) +vuint64m1_t vwredsumu(vuint64m1_t op0, vuint32m8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32m8_u64m1_m))) +vuint64m1_t vwredsumu(vbool4_t op0, vuint64m1_t op1, vuint32m8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32mf2_u64m1))) +vuint64m1_t vwredsumu(vuint64m1_t op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwredsumu_vs_u32mf2_u64m1_m))) +vuint64m1_t vwredsumu(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m1))) +void vsuxei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m1_m))) +void vsuxei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m2))) +void vsuxei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m2_m))) +void vsuxei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m4))) +void vsuxei8(uint8_t * op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m4_m))) +void vsuxei8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m8))) +void vsuxei8(uint8_t * op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8m8_m))) +void vsuxei8(vbool1_t op0, uint8_t * op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8mf2))) +void vsuxei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8mf2_m))) +void vsuxei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8mf4))) +void vsuxei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8mf4_m))) +void vsuxei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8mf8))) +void vsuxei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_u8mf8_m))) +void vsuxei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b8))) +vbool8_t vmnot(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b4))) +vbool4_t vmnot(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b2))) +vbool2_t vmnot(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b1))) +vbool1_t vmnot(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b16))) +vbool16_t vmnot(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b32))) +vbool32_t vmnot(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnot_m_b64))) +vbool64_t vmnot(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8m1_m))) +vuint8m1_t viota(vbool8_t op0, vuint8m1_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8m2_m))) +vuint8m2_t viota(vbool4_t op0, vuint8m2_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8m4_m))) +vuint8m4_t viota(vbool2_t op0, vuint8m4_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8m8_m))) +vuint8m8_t viota(vbool1_t op0, vuint8m8_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8mf2_m))) +vuint8mf2_t viota(vbool16_t op0, vuint8mf2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8mf4_m))) +vuint8mf4_t viota(vbool32_t op0, vuint8mf4_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u8mf8_m))) +vuint8mf8_t viota(vbool64_t op0, vuint8mf8_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u16m1_m))) +vuint16m1_t viota(vbool16_t op0, vuint16m1_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u16m2_m))) +vuint16m2_t viota(vbool8_t op0, vuint16m2_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u16m4_m))) +vuint16m4_t viota(vbool4_t op0, vuint16m4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u16m8_m))) +vuint16m8_t viota(vbool2_t op0, vuint16m8_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u16mf2_m))) +vuint16mf2_t viota(vbool32_t op0, vuint16mf2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u16mf4_m))) +vuint16mf4_t viota(vbool64_t op0, vuint16mf4_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u32m1_m))) +vuint32m1_t viota(vbool32_t op0, vuint32m1_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u32m2_m))) +vuint32m2_t viota(vbool16_t op0, vuint32m2_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u32m4_m))) +vuint32m4_t viota(vbool8_t op0, vuint32m4_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u32m8_m))) +vuint32m8_t viota(vbool4_t op0, vuint32m8_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u32mf2_m))) +vuint32mf2_t viota(vbool64_t op0, vuint32mf2_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u64m1_m))) +vuint64m1_t viota(vbool64_t op0, vuint64m1_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u64m2_m))) +vuint64m2_t viota(vbool32_t op0, vuint64m2_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u64m4_m))) +vuint64m4_t viota(vbool16_t op0, vuint64m4_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_viota_m_u64m8_m))) +vuint64m8_t viota(vbool8_t op0, vuint64m8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8m1_m))) +vint8m1_t vid(vbool8_t op0, vint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8m2_m))) +vint8m2_t vid(vbool4_t op0, vint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8m4_m))) +vint8m4_t vid(vbool2_t op0, vint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8m8_m))) +vint8m8_t vid(vbool1_t op0, vint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8mf2_m))) +vint8mf2_t vid(vbool16_t op0, vint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8mf4_m))) +vint8mf4_t vid(vbool32_t op0, vint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i8mf8_m))) +vint8mf8_t vid(vbool64_t op0, vint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i16m1_m))) +vint16m1_t vid(vbool16_t op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i16m2_m))) +vint16m2_t vid(vbool8_t op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i16m4_m))) +vint16m4_t vid(vbool4_t op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i16m8_m))) +vint16m8_t vid(vbool2_t op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i16mf2_m))) +vint16mf2_t vid(vbool32_t op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i16mf4_m))) +vint16mf4_t vid(vbool64_t op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i32m1_m))) +vint32m1_t vid(vbool32_t op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i32m2_m))) +vint32m2_t vid(vbool16_t op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i32m4_m))) +vint32m4_t vid(vbool8_t op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i32m8_m))) +vint32m8_t vid(vbool4_t op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i32mf2_m))) +vint32mf2_t vid(vbool64_t op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i64m1_m))) +vint64m1_t vid(vbool64_t op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i64m2_m))) +vint64m2_t vid(vbool32_t op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i64m4_m))) +vint64m4_t vid(vbool16_t op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_i64m8_m))) +vint64m8_t vid(vbool8_t op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8m1_m))) +vuint8m1_t vid(vbool8_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8m2_m))) +vuint8m2_t vid(vbool4_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8m4_m))) +vuint8m4_t vid(vbool2_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8m8_m))) +vuint8m8_t vid(vbool1_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8mf2_m))) +vuint8mf2_t vid(vbool16_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8mf4_m))) +vuint8mf4_t vid(vbool32_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u8mf8_m))) +vuint8mf8_t vid(vbool64_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u16m1_m))) +vuint16m1_t vid(vbool16_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u16m2_m))) +vuint16m2_t vid(vbool8_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u16m4_m))) +vuint16m4_t vid(vbool4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u16m8_m))) +vuint16m8_t vid(vbool2_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u16mf2_m))) +vuint16mf2_t vid(vbool32_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u16mf4_m))) +vuint16mf4_t vid(vbool64_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u32m1_m))) +vuint32m1_t vid(vbool32_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u32m2_m))) +vuint32m2_t vid(vbool16_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u32m4_m))) +vuint32m4_t vid(vbool8_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u32m8_m))) +vuint32m8_t vid(vbool4_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u32mf2_m))) +vuint32mf2_t vid(vbool64_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u64m1_m))) +vuint64m1_t vid(vbool64_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u64m2_m))) +vuint64m2_t vid(vbool32_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u64m4_m))) +vuint64m4_t vid(vbool16_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vid_v_u64m8_m))) +vuint64m8_t vid(vbool8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8m1_i8))) +int8_t vmv_x(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8m2_i8))) +int8_t vmv_x(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8m4_i8))) +int8_t vmv_x(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8m8_i8))) +int8_t vmv_x(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8mf2_i8))) +int8_t vmv_x(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8mf4_i8))) +int8_t vmv_x(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i8mf8_i8))) +int8_t vmv_x(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i16m1_i16))) +int16_t vmv_x(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i16m2_i16))) +int16_t vmv_x(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i16m4_i16))) +int16_t vmv_x(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i16m8_i16))) +int16_t vmv_x(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i16mf2_i16))) +int16_t vmv_x(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i16mf4_i16))) +int16_t vmv_x(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i32m1_i32))) +int32_t vmv_x(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i32m2_i32))) +int32_t vmv_x(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i32m4_i32))) +int32_t vmv_x(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i32m8_i32))) +int32_t vmv_x(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i32mf2_i32))) +int32_t vmv_x(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i64m1_i64))) +int64_t vmv_x(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i64m2_i64))) +int64_t vmv_x(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i64m4_i64))) +int64_t vmv_x(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_i64m8_i64))) +int64_t vmv_x(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8m1_u8))) +uint8_t vmv_x(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8m2_u8))) +uint8_t vmv_x(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8m4_u8))) +uint8_t vmv_x(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8m8_u8))) +uint8_t vmv_x(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8mf2_u8))) +uint8_t vmv_x(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8mf4_u8))) +uint8_t vmv_x(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u8mf8_u8))) +uint8_t vmv_x(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u16m1_u16))) +uint16_t vmv_x(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u16m2_u16))) +uint16_t vmv_x(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u16m4_u16))) +uint16_t vmv_x(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u16m8_u16))) +uint16_t vmv_x(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u16mf2_u16))) +uint16_t vmv_x(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u16mf4_u16))) +uint16_t vmv_x(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u32m1_u32))) +uint32_t vmv_x(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u32m2_u32))) +uint32_t vmv_x(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u32m4_u32))) +uint32_t vmv_x(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u32m8_u32))) +uint32_t vmv_x(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u32mf2_u32))) +uint32_t vmv_x(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u64m1_u64))) +uint64_t vmv_x(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u64m2_u64))) +uint64_t vmv_x(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u64m4_u64))) +uint64_t vmv_x(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_x_s_u64m8_u64))) +uint64_t vmv_x(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8m1))) +vint8m1_t vmv_s(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8m2))) +vint8m2_t vmv_s(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8m4))) +vint8m4_t vmv_s(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8m8))) +vint8m8_t vmv_s(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8mf2))) +vint8mf2_t vmv_s(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8mf4))) +vint8mf4_t vmv_s(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i8mf8))) +vint8mf8_t vmv_s(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i16m1))) +vint16m1_t vmv_s(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i16m2))) +vint16m2_t vmv_s(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i16m4))) +vint16m4_t vmv_s(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i16m8))) +vint16m8_t vmv_s(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i16mf2))) +vint16mf2_t vmv_s(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i16mf4))) +vint16mf4_t vmv_s(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i32m1))) +vint32m1_t vmv_s(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i32m2))) +vint32m2_t vmv_s(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i32m4))) +vint32m4_t vmv_s(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i32m8))) +vint32m8_t vmv_s(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i32mf2))) +vint32mf2_t vmv_s(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i64m1))) +vint64m1_t vmv_s(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i64m2))) +vint64m2_t vmv_s(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i64m4))) +vint64m4_t vmv_s(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_i64m8))) +vint64m8_t vmv_s(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8m1))) +vuint8m1_t vmv_s(vuint8m1_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8m2))) +vuint8m2_t vmv_s(vuint8m2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8m4))) +vuint8m4_t vmv_s(vuint8m4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8m8))) +vuint8m8_t vmv_s(vuint8m8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8mf2))) +vuint8mf2_t vmv_s(vuint8mf2_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8mf4))) +vuint8mf4_t vmv_s(vuint8mf4_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u8mf8))) +vuint8mf8_t vmv_s(vuint8mf8_t op0, uint8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u16m1))) +vuint16m1_t vmv_s(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u16m2))) +vuint16m2_t vmv_s(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u16m4))) +vuint16m4_t vmv_s(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u16m8))) +vuint16m8_t vmv_s(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u16mf2))) +vuint16mf2_t vmv_s(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u16mf4))) +vuint16mf4_t vmv_s(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u32m1))) +vuint32m1_t vmv_s(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u32m2))) +vuint32m2_t vmv_s(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u32m4))) +vuint32m4_t vmv_s(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u32m8))) +vuint32m8_t vmv_s(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u32mf2))) +vuint32mf2_t vmv_s(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u64m1))) +vuint64m1_t vmv_s(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u64m2))) +vuint64m2_t vmv_s(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u64m4))) +vuint64m4_t vmv_s(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_s_x_u64m8))) +vuint64m8_t vmv_s(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u16m1))) +vuint16m1_t vfmv_s(vuint16m1_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u16m2))) +vuint16m2_t vfmv_s(vuint16m2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u16m4))) +vuint16m4_t vfmv_s(vuint16m4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u16m8))) +vuint16m8_t vfmv_s(vuint16m8_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u16mf2))) +vuint16mf2_t vfmv_s(vuint16mf2_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u16mf4))) +vuint16mf4_t vfmv_s(vuint16mf4_t op0, uint16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u32m1))) +vuint32m1_t vfmv_s(vuint32m1_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u32m2))) +vuint32m2_t vfmv_s(vuint32m2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u32m4))) +vuint32m4_t vfmv_s(vuint32m4_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u32m8))) +vuint32m8_t vfmv_s(vuint32m8_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u32mf2))) +vuint32mf2_t vfmv_s(vuint32mf2_t op0, uint32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u64m1))) +vuint64m1_t vfmv_s(vuint64m1_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u64m2))) +vuint64m2_t vfmv_s(vuint64m2_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u64m4))) +vuint64m4_t vfmv_s(vuint64m4_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_x_u64m8))) +vuint64m8_t vfmv_s(vuint64m8_t op0, uint64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m1))) +vint8m1_t vslideup(vint8m1_t op0, vint8m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m1_m))) +vint8m1_t vslideup(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m2))) +vint8m2_t vslideup(vint8m2_t op0, vint8m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m2_m))) +vint8m2_t vslideup(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m4))) +vint8m4_t vslideup(vint8m4_t op0, vint8m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m4_m))) +vint8m4_t vslideup(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m8))) +vint8m8_t vslideup(vint8m8_t op0, vint8m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8m8_m))) +vint8m8_t vslideup(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8mf2))) +vint8mf2_t vslideup(vint8mf2_t op0, vint8mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8mf2_m))) +vint8mf2_t vslideup(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8mf4))) +vint8mf4_t vslideup(vint8mf4_t op0, vint8mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8mf4_m))) +vint8mf4_t vslideup(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8mf8))) +vint8mf8_t vslideup(vint8mf8_t op0, vint8mf8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i8mf8_m))) +vint8mf8_t vslideup(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m1))) +vint16m1_t vslideup(vint16m1_t op0, vint16m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m1_m))) +vint16m1_t vslideup(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m2))) +vint16m2_t vslideup(vint16m2_t op0, vint16m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m2_m))) +vint16m2_t vslideup(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m4))) +vint16m4_t vslideup(vint16m4_t op0, vint16m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m4_m))) +vint16m4_t vslideup(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m8))) +vint16m8_t vslideup(vint16m8_t op0, vint16m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16m8_m))) +vint16m8_t vslideup(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16mf2))) +vint16mf2_t vslideup(vint16mf2_t op0, vint16mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16mf2_m))) +vint16mf2_t vslideup(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16mf4))) +vint16mf4_t vslideup(vint16mf4_t op0, vint16mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i16mf4_m))) +vint16mf4_t vslideup(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m1))) +vint32m1_t vslideup(vint32m1_t op0, vint32m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m1_m))) +vint32m1_t vslideup(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m2))) +vint32m2_t vslideup(vint32m2_t op0, vint32m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m2_m))) +vint32m2_t vslideup(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m4))) +vint32m4_t vslideup(vint32m4_t op0, vint32m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m4_m))) +vint32m4_t vslideup(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m8))) +vint32m8_t vslideup(vint32m8_t op0, vint32m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32m8_m))) +vint32m8_t vslideup(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32mf2))) +vint32mf2_t vslideup(vint32mf2_t op0, vint32mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i32mf2_m))) +vint32mf2_t vslideup(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m1))) +vint64m1_t vslideup(vint64m1_t op0, vint64m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m1_m))) +vint64m1_t vslideup(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m2))) +vint64m2_t vslideup(vint64m2_t op0, vint64m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m2_m))) +vint64m2_t vslideup(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m4))) +vint64m4_t vslideup(vint64m4_t op0, vint64m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m4_m))) +vint64m4_t vslideup(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m8))) +vint64m8_t vslideup(vint64m8_t op0, vint64m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_i64m8_m))) +vint64m8_t vslideup(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m1))) +vuint8m1_t vslideup(vuint8m1_t op0, vuint8m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m1_m))) +vuint8m1_t vslideup(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m2))) +vuint8m2_t vslideup(vuint8m2_t op0, vuint8m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m2_m))) +vuint8m2_t vslideup(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m4))) +vuint8m4_t vslideup(vuint8m4_t op0, vuint8m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m4_m))) +vuint8m4_t vslideup(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m8))) +vuint8m8_t vslideup(vuint8m8_t op0, vuint8m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8m8_m))) +vuint8m8_t vslideup(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8mf2))) +vuint8mf2_t vslideup(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8mf2_m))) +vuint8mf2_t vslideup(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8mf4))) +vuint8mf4_t vslideup(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8mf4_m))) +vuint8mf4_t vslideup(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8mf8))) +vuint8mf8_t vslideup(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u8mf8_m))) +vuint8mf8_t vslideup(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m1))) +vuint16m1_t vslideup(vuint16m1_t op0, vuint16m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m1_m))) +vuint16m1_t vslideup(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m2))) +vuint16m2_t vslideup(vuint16m2_t op0, vuint16m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m2_m))) +vuint16m2_t vslideup(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m4))) +vuint16m4_t vslideup(vuint16m4_t op0, vuint16m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m4_m))) +vuint16m4_t vslideup(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m8))) +vuint16m8_t vslideup(vuint16m8_t op0, vuint16m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16m8_m))) +vuint16m8_t vslideup(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16mf2))) +vuint16mf2_t vslideup(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16mf2_m))) +vuint16mf2_t vslideup(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16mf4))) +vuint16mf4_t vslideup(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u16mf4_m))) +vuint16mf4_t vslideup(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m1))) +vuint32m1_t vslideup(vuint32m1_t op0, vuint32m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m1_m))) +vuint32m1_t vslideup(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m2))) +vuint32m2_t vslideup(vuint32m2_t op0, vuint32m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m2_m))) +vuint32m2_t vslideup(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m4))) +vuint32m4_t vslideup(vuint32m4_t op0, vuint32m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m4_m))) +vuint32m4_t vslideup(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m8))) +vuint32m8_t vslideup(vuint32m8_t op0, vuint32m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32m8_m))) +vuint32m8_t vslideup(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32mf2))) +vuint32mf2_t vslideup(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u32mf2_m))) +vuint32mf2_t vslideup(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m1))) +vuint64m1_t vslideup(vuint64m1_t op0, vuint64m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m1_m))) +vuint64m1_t vslideup(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m2))) +vuint64m2_t vslideup(vuint64m2_t op0, vuint64m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m2_m))) +vuint64m2_t vslideup(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m4))) +vuint64m4_t vslideup(vuint64m4_t op0, vuint64m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m4_m))) +vuint64m4_t vslideup(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m8))) +vuint64m8_t vslideup(vuint64m8_t op0, vuint64m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_u64m8_m))) +vuint64m8_t vslideup(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m1))) +vint8m1_t vslidedown(vint8m1_t op0, vint8m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m1_m))) +vint8m1_t vslidedown(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m2))) +vint8m2_t vslidedown(vint8m2_t op0, vint8m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m2_m))) +vint8m2_t vslidedown(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m4))) +vint8m4_t vslidedown(vint8m4_t op0, vint8m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m4_m))) +vint8m4_t vslidedown(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m8))) +vint8m8_t vslidedown(vint8m8_t op0, vint8m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8m8_m))) +vint8m8_t vslidedown(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8mf2))) +vint8mf2_t vslidedown(vint8mf2_t op0, vint8mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8mf2_m))) +vint8mf2_t vslidedown(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8mf4))) +vint8mf4_t vslidedown(vint8mf4_t op0, vint8mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8mf4_m))) +vint8mf4_t vslidedown(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8mf8))) +vint8mf8_t vslidedown(vint8mf8_t op0, vint8mf8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i8mf8_m))) +vint8mf8_t vslidedown(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m1))) +vint16m1_t vslidedown(vint16m1_t op0, vint16m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m1_m))) +vint16m1_t vslidedown(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m2))) +vint16m2_t vslidedown(vint16m2_t op0, vint16m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m2_m))) +vint16m2_t vslidedown(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m4))) +vint16m4_t vslidedown(vint16m4_t op0, vint16m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m4_m))) +vint16m4_t vslidedown(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m8))) +vint16m8_t vslidedown(vint16m8_t op0, vint16m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16m8_m))) +vint16m8_t vslidedown(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16mf2))) +vint16mf2_t vslidedown(vint16mf2_t op0, vint16mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16mf2_m))) +vint16mf2_t vslidedown(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16mf4))) +vint16mf4_t vslidedown(vint16mf4_t op0, vint16mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i16mf4_m))) +vint16mf4_t vslidedown(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m1))) +vint32m1_t vslidedown(vint32m1_t op0, vint32m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m1_m))) +vint32m1_t vslidedown(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m2))) +vint32m2_t vslidedown(vint32m2_t op0, vint32m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m2_m))) +vint32m2_t vslidedown(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m4))) +vint32m4_t vslidedown(vint32m4_t op0, vint32m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m4_m))) +vint32m4_t vslidedown(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m8))) +vint32m8_t vslidedown(vint32m8_t op0, vint32m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32m8_m))) +vint32m8_t vslidedown(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32mf2))) +vint32mf2_t vslidedown(vint32mf2_t op0, vint32mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i32mf2_m))) +vint32mf2_t vslidedown(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m1))) +vint64m1_t vslidedown(vint64m1_t op0, vint64m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m1_m))) +vint64m1_t vslidedown(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m2))) +vint64m2_t vslidedown(vint64m2_t op0, vint64m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m2_m))) +vint64m2_t vslidedown(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m4))) +vint64m4_t vslidedown(vint64m4_t op0, vint64m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m4_m))) +vint64m4_t vslidedown(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m8))) +vint64m8_t vslidedown(vint64m8_t op0, vint64m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_i64m8_m))) +vint64m8_t vslidedown(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m1))) +vuint8m1_t vslidedown(vuint8m1_t op0, vuint8m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m1_m))) +vuint8m1_t vslidedown(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m2))) +vuint8m2_t vslidedown(vuint8m2_t op0, vuint8m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m2_m))) +vuint8m2_t vslidedown(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m4))) +vuint8m4_t vslidedown(vuint8m4_t op0, vuint8m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m4_m))) +vuint8m4_t vslidedown(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m8))) +vuint8m8_t vslidedown(vuint8m8_t op0, vuint8m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8m8_m))) +vuint8m8_t vslidedown(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8mf2))) +vuint8mf2_t vslidedown(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8mf2_m))) +vuint8mf2_t vslidedown(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8mf4))) +vuint8mf4_t vslidedown(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8mf4_m))) +vuint8mf4_t vslidedown(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8mf8))) +vuint8mf8_t vslidedown(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u8mf8_m))) +vuint8mf8_t vslidedown(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m1))) +vuint16m1_t vslidedown(vuint16m1_t op0, vuint16m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m1_m))) +vuint16m1_t vslidedown(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m2))) +vuint16m2_t vslidedown(vuint16m2_t op0, vuint16m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m2_m))) +vuint16m2_t vslidedown(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m4))) +vuint16m4_t vslidedown(vuint16m4_t op0, vuint16m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m4_m))) +vuint16m4_t vslidedown(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m8))) +vuint16m8_t vslidedown(vuint16m8_t op0, vuint16m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16m8_m))) +vuint16m8_t vslidedown(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16mf2))) +vuint16mf2_t vslidedown(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16mf2_m))) +vuint16mf2_t vslidedown(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16mf4))) +vuint16mf4_t vslidedown(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u16mf4_m))) +vuint16mf4_t vslidedown(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m1))) +vuint32m1_t vslidedown(vuint32m1_t op0, vuint32m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m1_m))) +vuint32m1_t vslidedown(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m2))) +vuint32m2_t vslidedown(vuint32m2_t op0, vuint32m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m2_m))) +vuint32m2_t vslidedown(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m4))) +vuint32m4_t vslidedown(vuint32m4_t op0, vuint32m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m4_m))) +vuint32m4_t vslidedown(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m8))) +vuint32m8_t vslidedown(vuint32m8_t op0, vuint32m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32m8_m))) +vuint32m8_t vslidedown(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32mf2))) +vuint32mf2_t vslidedown(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u32mf2_m))) +vuint32mf2_t vslidedown(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m1))) +vuint64m1_t vslidedown(vuint64m1_t op0, vuint64m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m1_m))) +vuint64m1_t vslidedown(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m2))) +vuint64m2_t vslidedown(vuint64m2_t op0, vuint64m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m2_m))) +vuint64m2_t vslidedown(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m4))) +vuint64m4_t vslidedown(vuint64m4_t op0, vuint64m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m4_m))) +vuint64m4_t vslidedown(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m8))) +vuint64m8_t vslidedown(vuint64m8_t op0, vuint64m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_u64m8_m))) +vuint64m8_t vslidedown(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m1))) +vint8m1_t vslide1up(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m1_m))) +vint8m1_t vslide1up(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m2))) +vint8m2_t vslide1up(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m2_m))) +vint8m2_t vslide1up(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m4))) +vint8m4_t vslide1up(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m4_m))) +vint8m4_t vslide1up(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m8))) +vint8m8_t vslide1up(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8m8_m))) +vint8m8_t vslide1up(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8mf2))) +vint8mf2_t vslide1up(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8mf2_m))) +vint8mf2_t vslide1up(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8mf4))) +vint8mf4_t vslide1up(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8mf4_m))) +vint8mf4_t vslide1up(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8mf8))) +vint8mf8_t vslide1up(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i8mf8_m))) +vint8mf8_t vslide1up(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m1))) +vint16m1_t vslide1up(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m1_m))) +vint16m1_t vslide1up(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m2))) +vint16m2_t vslide1up(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m2_m))) +vint16m2_t vslide1up(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m4))) +vint16m4_t vslide1up(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m4_m))) +vint16m4_t vslide1up(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m8))) +vint16m8_t vslide1up(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16m8_m))) +vint16m8_t vslide1up(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16mf2))) +vint16mf2_t vslide1up(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16mf2_m))) +vint16mf2_t vslide1up(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16mf4))) +vint16mf4_t vslide1up(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i16mf4_m))) +vint16mf4_t vslide1up(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m1))) +vint32m1_t vslide1up(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m1_m))) +vint32m1_t vslide1up(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m2))) +vint32m2_t vslide1up(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m2_m))) +vint32m2_t vslide1up(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m4))) +vint32m4_t vslide1up(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m4_m))) +vint32m4_t vslide1up(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m8))) +vint32m8_t vslide1up(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32m8_m))) +vint32m8_t vslide1up(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32mf2))) +vint32mf2_t vslide1up(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i32mf2_m))) +vint32mf2_t vslide1up(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m1))) +vint64m1_t vslide1up(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m1_m))) +vint64m1_t vslide1up(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m2))) +vint64m2_t vslide1up(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m2_m))) +vint64m2_t vslide1up(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m4))) +vint64m4_t vslide1up(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m4_m))) +vint64m4_t vslide1up(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m8))) +vint64m8_t vslide1up(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_i64m8_m))) +vint64m8_t vslide1up(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m1))) +vuint8m1_t vslide1up(vuint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m1_m))) +vuint8m1_t vslide1up(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m2))) +vuint8m2_t vslide1up(vuint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m2_m))) +vuint8m2_t vslide1up(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m4))) +vuint8m4_t vslide1up(vuint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m4_m))) +vuint8m4_t vslide1up(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m8))) +vuint8m8_t vslide1up(vuint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8m8_m))) +vuint8m8_t vslide1up(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8mf2))) +vuint8mf2_t vslide1up(vuint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8mf2_m))) +vuint8mf2_t vslide1up(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8mf4))) +vuint8mf4_t vslide1up(vuint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8mf4_m))) +vuint8mf4_t vslide1up(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8mf8))) +vuint8mf8_t vslide1up(vuint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u8mf8_m))) +vuint8mf8_t vslide1up(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m1))) +vuint16m1_t vslide1up(vuint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m1_m))) +vuint16m1_t vslide1up(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m2))) +vuint16m2_t vslide1up(vuint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m2_m))) +vuint16m2_t vslide1up(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m4))) +vuint16m4_t vslide1up(vuint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m4_m))) +vuint16m4_t vslide1up(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m8))) +vuint16m8_t vslide1up(vuint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16m8_m))) +vuint16m8_t vslide1up(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16mf2))) +vuint16mf2_t vslide1up(vuint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16mf2_m))) +vuint16mf2_t vslide1up(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16mf4))) +vuint16mf4_t vslide1up(vuint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u16mf4_m))) +vuint16mf4_t vslide1up(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m1))) +vuint32m1_t vslide1up(vuint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m1_m))) +vuint32m1_t vslide1up(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m2))) +vuint32m2_t vslide1up(vuint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m2_m))) +vuint32m2_t vslide1up(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m4))) +vuint32m4_t vslide1up(vuint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m4_m))) +vuint32m4_t vslide1up(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m8))) +vuint32m8_t vslide1up(vuint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32m8_m))) +vuint32m8_t vslide1up(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32mf2))) +vuint32mf2_t vslide1up(vuint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u32mf2_m))) +vuint32mf2_t vslide1up(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m1))) +vuint64m1_t vslide1up(vuint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m1_m))) +vuint64m1_t vslide1up(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m2))) +vuint64m2_t vslide1up(vuint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m2_m))) +vuint64m2_t vslide1up(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m4))) +vuint64m4_t vslide1up(vuint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m4_m))) +vuint64m4_t vslide1up(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m8))) +vuint64m8_t vslide1up(vuint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1up_vx_u64m8_m))) +vuint64m8_t vslide1up(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m1))) +vint8m1_t vslide1down(vint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m1_m))) +vint8m1_t vslide1down(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m2))) +vint8m2_t vslide1down(vint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m2_m))) +vint8m2_t vslide1down(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m4))) +vint8m4_t vslide1down(vint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m4_m))) +vint8m4_t vslide1down(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m8))) +vint8m8_t vslide1down(vint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8m8_m))) +vint8m8_t vslide1down(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8mf2))) +vint8mf2_t vslide1down(vint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8mf2_m))) +vint8mf2_t vslide1down(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8mf4))) +vint8mf4_t vslide1down(vint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8mf4_m))) +vint8mf4_t vslide1down(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8mf8))) +vint8mf8_t vslide1down(vint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i8mf8_m))) +vint8mf8_t vslide1down(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m1))) +vint16m1_t vslide1down(vint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m1_m))) +vint16m1_t vslide1down(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m2))) +vint16m2_t vslide1down(vint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m2_m))) +vint16m2_t vslide1down(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m4))) +vint16m4_t vslide1down(vint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m4_m))) +vint16m4_t vslide1down(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m8))) +vint16m8_t vslide1down(vint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16m8_m))) +vint16m8_t vslide1down(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16mf2))) +vint16mf2_t vslide1down(vint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16mf2_m))) +vint16mf2_t vslide1down(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16mf4))) +vint16mf4_t vslide1down(vint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i16mf4_m))) +vint16mf4_t vslide1down(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m1))) +vint32m1_t vslide1down(vint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m1_m))) +vint32m1_t vslide1down(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m2))) +vint32m2_t vslide1down(vint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m2_m))) +vint32m2_t vslide1down(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m4))) +vint32m4_t vslide1down(vint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m4_m))) +vint32m4_t vslide1down(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m8))) +vint32m8_t vslide1down(vint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32m8_m))) +vint32m8_t vslide1down(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32mf2))) +vint32mf2_t vslide1down(vint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i32mf2_m))) +vint32mf2_t vslide1down(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m1))) +vint64m1_t vslide1down(vint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m1_m))) +vint64m1_t vslide1down(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m2))) +vint64m2_t vslide1down(vint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m2_m))) +vint64m2_t vslide1down(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m4))) +vint64m4_t vslide1down(vint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m4_m))) +vint64m4_t vslide1down(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m8))) +vint64m8_t vslide1down(vint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_i64m8_m))) +vint64m8_t vslide1down(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m1))) +vuint8m1_t vslide1down(vuint8m1_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m1_m))) +vuint8m1_t vslide1down(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m2))) +vuint8m2_t vslide1down(vuint8m2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m2_m))) +vuint8m2_t vslide1down(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m4))) +vuint8m4_t vslide1down(vuint8m4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m4_m))) +vuint8m4_t vslide1down(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m8))) +vuint8m8_t vslide1down(vuint8m8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8m8_m))) +vuint8m8_t vslide1down(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8mf2))) +vuint8mf2_t vslide1down(vuint8mf2_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8mf2_m))) +vuint8mf2_t vslide1down(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8mf4))) +vuint8mf4_t vslide1down(vuint8mf4_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8mf4_m))) +vuint8mf4_t vslide1down(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8mf8))) +vuint8mf8_t vslide1down(vuint8mf8_t op0, int8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u8mf8_m))) +vuint8mf8_t vslide1down(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, int8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m1))) +vuint16m1_t vslide1down(vuint16m1_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m1_m))) +vuint16m1_t vslide1down(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m2))) +vuint16m2_t vslide1down(vuint16m2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m2_m))) +vuint16m2_t vslide1down(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m4))) +vuint16m4_t vslide1down(vuint16m4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m4_m))) +vuint16m4_t vslide1down(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m8))) +vuint16m8_t vslide1down(vuint16m8_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16m8_m))) +vuint16m8_t vslide1down(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16mf2))) +vuint16mf2_t vslide1down(vuint16mf2_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16mf2_m))) +vuint16mf2_t vslide1down(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16mf4))) +vuint16mf4_t vslide1down(vuint16mf4_t op0, int16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u16mf4_m))) +vuint16mf4_t vslide1down(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, int16_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m1))) +vuint32m1_t vslide1down(vuint32m1_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m1_m))) +vuint32m1_t vslide1down(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m2))) +vuint32m2_t vslide1down(vuint32m2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m2_m))) +vuint32m2_t vslide1down(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m4))) +vuint32m4_t vslide1down(vuint32m4_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m4_m))) +vuint32m4_t vslide1down(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m8))) +vuint32m8_t vslide1down(vuint32m8_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32m8_m))) +vuint32m8_t vslide1down(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32mf2))) +vuint32mf2_t vslide1down(vuint32mf2_t op0, int32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u32mf2_m))) +vuint32mf2_t vslide1down(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, int32_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m1))) +vuint64m1_t vslide1down(vuint64m1_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m1_m))) +vuint64m1_t vslide1down(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m2))) +vuint64m2_t vslide1down(vuint64m2_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m2_m))) +vuint64m2_t vslide1down(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m4))) +vuint64m4_t vslide1down(vuint64m4_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m4_m))) +vuint64m4_t vslide1down(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m8))) +vuint64m8_t vslide1down(vuint64m8_t op0, int64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslide1down_vx_u64m8_m))) +vuint64m8_t vslide1down(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, int64_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m1))) +vint8m1_t vrgather(vint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m1_m))) +vint8m1_t vrgather(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m2))) +vint8m2_t vrgather(vint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m2_m))) +vint8m2_t vrgather(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m4))) +vint8m4_t vrgather(vint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m4_m))) +vint8m4_t vrgather(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m8))) +vint8m8_t vrgather(vint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8m8_m))) +vint8m8_t vrgather(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8mf2))) +vint8mf2_t vrgather(vint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8mf2_m))) +vint8mf2_t vrgather(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8mf4))) +vint8mf4_t vrgather(vint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8mf4_m))) +vint8mf4_t vrgather(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8mf8))) +vint8mf8_t vrgather(vint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i8mf8_m))) +vint8mf8_t vrgather(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m1))) +vint16m1_t vrgather(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m1_m))) +vint16m1_t vrgather(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m2))) +vint16m2_t vrgather(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m2_m))) +vint16m2_t vrgather(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m4))) +vint16m4_t vrgather(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m4_m))) +vint16m4_t vrgather(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m8))) +vint16m8_t vrgather(vint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16m8_m))) +vint16m8_t vrgather(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16mf2))) +vint16mf2_t vrgather(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16mf2_m))) +vint16mf2_t vrgather(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16mf4))) +vint16mf4_t vrgather(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i16mf4_m))) +vint16mf4_t vrgather(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m1))) +vint32m1_t vrgather(vint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m1_m))) +vint32m1_t vrgather(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m2))) +vint32m2_t vrgather(vint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m2_m))) +vint32m2_t vrgather(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m4))) +vint32m4_t vrgather(vint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m4_m))) +vint32m4_t vrgather(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m8))) +vint32m8_t vrgather(vint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32m8_m))) +vint32m8_t vrgather(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32mf2))) +vint32mf2_t vrgather(vint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i32mf2_m))) +vint32mf2_t vrgather(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m1))) +vint64m1_t vrgather(vint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m1_m))) +vint64m1_t vrgather(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m2))) +vint64m2_t vrgather(vint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m2_m))) +vint64m2_t vrgather(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m4))) +vint64m4_t vrgather(vint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m4_m))) +vint64m4_t vrgather(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m8))) +vint64m8_t vrgather(vint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_i64m8_m))) +vint64m8_t vrgather(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m1))) +vint8m1_t vrgather(vint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m1_m))) +vint8m1_t vrgather(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m2))) +vint8m2_t vrgather(vint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m2_m))) +vint8m2_t vrgather(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m4))) +vint8m4_t vrgather(vint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m4_m))) +vint8m4_t vrgather(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m8))) +vint8m8_t vrgather(vint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8m8_m))) +vint8m8_t vrgather(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8mf2))) +vint8mf2_t vrgather(vint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8mf2_m))) +vint8mf2_t vrgather(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8mf4))) +vint8mf4_t vrgather(vint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8mf4_m))) +vint8mf4_t vrgather(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8mf8))) +vint8mf8_t vrgather(vint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i8mf8_m))) +vint8mf8_t vrgather(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m1))) +vint16m1_t vrgather(vint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m1_m))) +vint16m1_t vrgather(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m2))) +vint16m2_t vrgather(vint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m2_m))) +vint16m2_t vrgather(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m4))) +vint16m4_t vrgather(vint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m4_m))) +vint16m4_t vrgather(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m8))) +vint16m8_t vrgather(vint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16m8_m))) +vint16m8_t vrgather(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16mf2))) +vint16mf2_t vrgather(vint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16mf2_m))) +vint16mf2_t vrgather(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16mf4))) +vint16mf4_t vrgather(vint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i16mf4_m))) +vint16mf4_t vrgather(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m1))) +vint32m1_t vrgather(vint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m1_m))) +vint32m1_t vrgather(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m2))) +vint32m2_t vrgather(vint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m2_m))) +vint32m2_t vrgather(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m4))) +vint32m4_t vrgather(vint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m4_m))) +vint32m4_t vrgather(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m8))) +vint32m8_t vrgather(vint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32m8_m))) +vint32m8_t vrgather(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32mf2))) +vint32mf2_t vrgather(vint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i32mf2_m))) +vint32mf2_t vrgather(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m1))) +vint64m1_t vrgather(vint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m1_m))) +vint64m1_t vrgather(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m2))) +vint64m2_t vrgather(vint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m2_m))) +vint64m2_t vrgather(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m4))) +vint64m4_t vrgather(vint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m4_m))) +vint64m4_t vrgather(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m8))) +vint64m8_t vrgather(vint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_i64m8_m))) +vint64m8_t vrgather(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8m1))) +vint8m1_t vrgatherei16(vint8m1_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8m1_m))) +vint8m1_t vrgatherei16(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8m2))) +vint8m2_t vrgatherei16(vint8m2_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8m2_m))) +vint8m2_t vrgatherei16(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8m4))) +vint8m4_t vrgatherei16(vint8m4_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8m4_m))) +vint8m4_t vrgatherei16(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8mf2))) +vint8mf2_t vrgatherei16(vint8mf2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8mf2_m))) +vint8mf2_t vrgatherei16(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8mf4))) +vint8mf4_t vrgatherei16(vint8mf4_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8mf4_m))) +vint8mf4_t vrgatherei16(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8mf8))) +vint8mf8_t vrgatherei16(vint8mf8_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i8mf8_m))) +vint8mf8_t vrgatherei16(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m1))) +vint16m1_t vrgatherei16(vint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m1_m))) +vint16m1_t vrgatherei16(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m2))) +vint16m2_t vrgatherei16(vint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m2_m))) +vint16m2_t vrgatherei16(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m4))) +vint16m4_t vrgatherei16(vint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m4_m))) +vint16m4_t vrgatherei16(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m8))) +vint16m8_t vrgatherei16(vint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16m8_m))) +vint16m8_t vrgatherei16(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16mf2))) +vint16mf2_t vrgatherei16(vint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16mf2_m))) +vint16mf2_t vrgatherei16(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16mf4))) +vint16mf4_t vrgatherei16(vint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i16mf4_m))) +vint16mf4_t vrgatherei16(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m1))) +vint32m1_t vrgatherei16(vint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m1_m))) +vint32m1_t vrgatherei16(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m2))) +vint32m2_t vrgatherei16(vint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m2_m))) +vint32m2_t vrgatherei16(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m4))) +vint32m4_t vrgatherei16(vint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m4_m))) +vint32m4_t vrgatherei16(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m8))) +vint32m8_t vrgatherei16(vint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32m8_m))) +vint32m8_t vrgatherei16(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32mf2))) +vint32mf2_t vrgatherei16(vint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i32mf2_m))) +vint32mf2_t vrgatherei16(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m1))) +vint64m1_t vrgatherei16(vint64m1_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m1_m))) +vint64m1_t vrgatherei16(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m2))) +vint64m2_t vrgatherei16(vint64m2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m2_m))) +vint64m2_t vrgatherei16(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m4))) +vint64m4_t vrgatherei16(vint64m4_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m4_m))) +vint64m4_t vrgatherei16(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m8))) +vint64m8_t vrgatherei16(vint64m8_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_i64m8_m))) +vint64m8_t vrgatherei16(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m1))) +vuint8m1_t vrgather(vuint8m1_t op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m1_m))) +vuint8m1_t vrgather(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m2))) +vuint8m2_t vrgather(vuint8m2_t op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m2_m))) +vuint8m2_t vrgather(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m4))) +vuint8m4_t vrgather(vuint8m4_t op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m4_m))) +vuint8m4_t vrgather(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m8))) +vuint8m8_t vrgather(vuint8m8_t op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8m8_m))) +vuint8m8_t vrgather(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8mf2))) +vuint8mf2_t vrgather(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8mf2_m))) +vuint8mf2_t vrgather(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8mf4))) +vuint8mf4_t vrgather(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8mf4_m))) +vuint8mf4_t vrgather(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8mf8))) +vuint8mf8_t vrgather(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u8mf8_m))) +vuint8mf8_t vrgather(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m1))) +vuint16m1_t vrgather(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m1_m))) +vuint16m1_t vrgather(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m2))) +vuint16m2_t vrgather(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m2_m))) +vuint16m2_t vrgather(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m4))) +vuint16m4_t vrgather(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m4_m))) +vuint16m4_t vrgather(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m8))) +vuint16m8_t vrgather(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16m8_m))) +vuint16m8_t vrgather(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16mf2))) +vuint16mf2_t vrgather(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16mf2_m))) +vuint16mf2_t vrgather(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16mf4))) +vuint16mf4_t vrgather(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u16mf4_m))) +vuint16mf4_t vrgather(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m1))) +vuint32m1_t vrgather(vuint32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m1_m))) +vuint32m1_t vrgather(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m2))) +vuint32m2_t vrgather(vuint32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m2_m))) +vuint32m2_t vrgather(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m4))) +vuint32m4_t vrgather(vuint32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m4_m))) +vuint32m4_t vrgather(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m8))) +vuint32m8_t vrgather(vuint32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32m8_m))) +vuint32m8_t vrgather(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32mf2))) +vuint32mf2_t vrgather(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u32mf2_m))) +vuint32mf2_t vrgather(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m1))) +vuint64m1_t vrgather(vuint64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m1_m))) +vuint64m1_t vrgather(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m2))) +vuint64m2_t vrgather(vuint64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m2_m))) +vuint64m2_t vrgather(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m4))) +vuint64m4_t vrgather(vuint64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m4_m))) +vuint64m4_t vrgather(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m8))) +vuint64m8_t vrgather(vuint64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_u64m8_m))) +vuint64m8_t vrgather(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m1))) +vuint8m1_t vrgather(vuint8m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m1_m))) +vuint8m1_t vrgather(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m2))) +vuint8m2_t vrgather(vuint8m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m2_m))) +vuint8m2_t vrgather(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m4))) +vuint8m4_t vrgather(vuint8m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m4_m))) +vuint8m4_t vrgather(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m8))) +vuint8m8_t vrgather(vuint8m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8m8_m))) +vuint8m8_t vrgather(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8mf2))) +vuint8mf2_t vrgather(vuint8mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8mf2_m))) +vuint8mf2_t vrgather(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8mf4))) +vuint8mf4_t vrgather(vuint8mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8mf4_m))) +vuint8mf4_t vrgather(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8mf8))) +vuint8mf8_t vrgather(vuint8mf8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u8mf8_m))) +vuint8mf8_t vrgather(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m1))) +vuint16m1_t vrgather(vuint16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m1_m))) +vuint16m1_t vrgather(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m2))) +vuint16m2_t vrgather(vuint16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m2_m))) +vuint16m2_t vrgather(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m4))) +vuint16m4_t vrgather(vuint16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m4_m))) +vuint16m4_t vrgather(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m8))) +vuint16m8_t vrgather(vuint16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16m8_m))) +vuint16m8_t vrgather(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16mf2))) +vuint16mf2_t vrgather(vuint16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16mf2_m))) +vuint16mf2_t vrgather(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16mf4))) +vuint16mf4_t vrgather(vuint16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u16mf4_m))) +vuint16mf4_t vrgather(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m1))) +vuint32m1_t vrgather(vuint32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m1_m))) +vuint32m1_t vrgather(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m2))) +vuint32m2_t vrgather(vuint32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m2_m))) +vuint32m2_t vrgather(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m4))) +vuint32m4_t vrgather(vuint32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m4_m))) +vuint32m4_t vrgather(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m8))) +vuint32m8_t vrgather(vuint32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32m8_m))) +vuint32m8_t vrgather(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32mf2))) +vuint32mf2_t vrgather(vuint32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u32mf2_m))) +vuint32mf2_t vrgather(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m1))) +vuint64m1_t vrgather(vuint64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m1_m))) +vuint64m1_t vrgather(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m2))) +vuint64m2_t vrgather(vuint64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m2_m))) +vuint64m2_t vrgather(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m4))) +vuint64m4_t vrgather(vuint64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m4_m))) +vuint64m4_t vrgather(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m8))) +vuint64m8_t vrgather(vuint64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_u64m8_m))) +vuint64m8_t vrgather(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8m1))) +vuint8m1_t vrgatherei16(vuint8m1_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8m1_m))) +vuint8m1_t vrgatherei16(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8m2))) +vuint8m2_t vrgatherei16(vuint8m2_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8m2_m))) +vuint8m2_t vrgatherei16(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8m4))) +vuint8m4_t vrgatherei16(vuint8m4_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8m4_m))) +vuint8m4_t vrgatherei16(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8mf2))) +vuint8mf2_t vrgatherei16(vuint8mf2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8mf2_m))) +vuint8mf2_t vrgatherei16(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8mf4))) +vuint8mf4_t vrgatherei16(vuint8mf4_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8mf4_m))) +vuint8mf4_t vrgatherei16(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8mf8))) +vuint8mf8_t vrgatherei16(vuint8mf8_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u8mf8_m))) +vuint8mf8_t vrgatherei16(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m1))) +vuint16m1_t vrgatherei16(vuint16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m1_m))) +vuint16m1_t vrgatherei16(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m2))) +vuint16m2_t vrgatherei16(vuint16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m2_m))) +vuint16m2_t vrgatherei16(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m4))) +vuint16m4_t vrgatherei16(vuint16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m4_m))) +vuint16m4_t vrgatherei16(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m8))) +vuint16m8_t vrgatherei16(vuint16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16m8_m))) +vuint16m8_t vrgatherei16(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16mf2))) +vuint16mf2_t vrgatherei16(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16mf2_m))) +vuint16mf2_t vrgatherei16(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16mf4))) +vuint16mf4_t vrgatherei16(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u16mf4_m))) +vuint16mf4_t vrgatherei16(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m1))) +vuint32m1_t vrgatherei16(vuint32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m1_m))) +vuint32m1_t vrgatherei16(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m2))) +vuint32m2_t vrgatherei16(vuint32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m2_m))) +vuint32m2_t vrgatherei16(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m4))) +vuint32m4_t vrgatherei16(vuint32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m4_m))) +vuint32m4_t vrgatherei16(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m8))) +vuint32m8_t vrgatherei16(vuint32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32m8_m))) +vuint32m8_t vrgatherei16(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32mf2))) +vuint32mf2_t vrgatherei16(vuint32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u32mf2_m))) +vuint32mf2_t vrgatherei16(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m1))) +vuint64m1_t vrgatherei16(vuint64m1_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m1_m))) +vuint64m1_t vrgatherei16(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m2))) +vuint64m2_t vrgatherei16(vuint64m2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m2_m))) +vuint64m2_t vrgatherei16(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m4))) +vuint64m4_t vrgatherei16(vuint64m4_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m4_m))) +vuint64m4_t vrgatherei16(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m8))) +vuint64m8_t vrgatherei16(vuint64m8_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_u64m8_m))) +vuint64m8_t vrgatherei16(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8m1))) +vint8m1_t vcompress(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8m2))) +vint8m2_t vcompress(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8m4))) +vint8m4_t vcompress(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8m8))) +vint8m8_t vcompress(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8mf2))) +vint8mf2_t vcompress(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8mf4))) +vint8mf4_t vcompress(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i8mf8))) +vint8mf8_t vcompress(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i16m1))) +vint16m1_t vcompress(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i16m2))) +vint16m2_t vcompress(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i16m4))) +vint16m4_t vcompress(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i16m8))) +vint16m8_t vcompress(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i16mf2))) +vint16mf2_t vcompress(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i16mf4))) +vint16mf4_t vcompress(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i32m1))) +vint32m1_t vcompress(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i32m2))) +vint32m2_t vcompress(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i32m4))) +vint32m4_t vcompress(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i32m8))) +vint32m8_t vcompress(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i32mf2))) +vint32mf2_t vcompress(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i64m1))) +vint64m1_t vcompress(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i64m2))) +vint64m2_t vcompress(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i64m4))) +vint64m4_t vcompress(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_i64m8))) +vint64m8_t vcompress(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8m1))) +vuint8m1_t vcompress(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8m2))) +vuint8m2_t vcompress(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8m4))) +vuint8m4_t vcompress(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8m8))) +vuint8m8_t vcompress(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8mf2))) +vuint8mf2_t vcompress(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8mf4))) +vuint8mf4_t vcompress(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u8mf8))) +vuint8mf8_t vcompress(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u16m1))) +vuint16m1_t vcompress(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u16m2))) +vuint16m2_t vcompress(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u16m4))) +vuint16m4_t vcompress(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u16m8))) +vuint16m8_t vcompress(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u16mf2))) +vuint16mf2_t vcompress(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u16mf4))) +vuint16mf4_t vcompress(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u32m1))) +vuint32m1_t vcompress(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u32m2))) +vuint32m2_t vcompress(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u32m4))) +vuint32m4_t vcompress(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u32m8))) +vuint32m8_t vcompress(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u32mf2))) +vuint32mf2_t vcompress(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u64m1))) +vuint64m1_t vcompress(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u64m2))) +vuint64m2_t vcompress(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u64m4))) +vuint64m4_t vcompress(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_u64m8))) +vuint64m8_t vcompress(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i8m2_i8m1))) +vint8m1_t vget_i8m1(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i8m4_i8m1))) +vint8m1_t vget_i8m1(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i8m8_i8m1))) +vint8m1_t vget_i8m1(vint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i16m2_i16m1))) +vint16m1_t vget_i16m1(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i16m4_i16m1))) +vint16m1_t vget_i16m1(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i16m8_i16m1))) +vint16m1_t vget_i16m1(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i32m2_i32m1))) +vint32m1_t vget_i32m1(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i32m4_i32m1))) +vint32m1_t vget_i32m1(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i32m8_i32m1))) +vint32m1_t vget_i32m1(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i64m2_i64m1))) +vint64m1_t vget_i64m1(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i64m4_i64m1))) +vint64m1_t vget_i64m1(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i64m8_i64m1))) +vint64m1_t vget_i64m1(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u8m2_u8m1))) +vuint8m1_t vget_u8m1(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u8m4_u8m1))) +vuint8m1_t vget_u8m1(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u8m8_u8m1))) +vuint8m1_t vget_u8m1(vuint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u16m2_u16m1))) +vuint16m1_t vget_u16m1(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u16m4_u16m1))) +vuint16m1_t vget_u16m1(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u16m8_u16m1))) +vuint16m1_t vget_u16m1(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u32m2_u32m1))) +vuint32m1_t vget_u32m1(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u32m4_u32m1))) +vuint32m1_t vget_u32m1(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u32m8_u32m1))) +vuint32m1_t vget_u32m1(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u64m2_u64m1))) +vuint64m1_t vget_u64m1(vuint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u64m4_u64m1))) +vuint64m1_t vget_u64m1(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u64m8_u64m1))) +vuint64m1_t vget_u64m1(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i8m4_i8m2))) +vint8m2_t vget_i8m2(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i8m8_i8m2))) +vint8m2_t vget_i8m2(vint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i16m4_i16m2))) +vint16m2_t vget_i16m2(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i16m8_i16m2))) +vint16m2_t vget_i16m2(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i32m4_i32m2))) +vint32m2_t vget_i32m2(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i32m8_i32m2))) +vint32m2_t vget_i32m2(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i64m4_i64m2))) +vint64m2_t vget_i64m2(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i64m8_i64m2))) +vint64m2_t vget_i64m2(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u8m4_u8m2))) +vuint8m2_t vget_u8m2(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u8m8_u8m2))) +vuint8m2_t vget_u8m2(vuint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u16m4_u16m2))) +vuint16m2_t vget_u16m2(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u16m8_u16m2))) +vuint16m2_t vget_u16m2(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u32m4_u32m2))) +vuint32m2_t vget_u32m2(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u32m8_u32m2))) +vuint32m2_t vget_u32m2(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u64m4_u64m2))) +vuint64m2_t vget_u64m2(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u64m8_u64m2))) +vuint64m2_t vget_u64m2(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i8m8_i8m4))) +vint8m4_t vget_i8m4(vint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i16m8_i16m4))) +vint16m4_t vget_i16m4(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i32m8_i32m4))) +vint32m4_t vget_i32m4(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_i64m8_i64m4))) +vint64m4_t vget_i64m4(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u8m8_u8m4))) +vuint8m4_t vget_u8m4(vuint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u16m8_u16m4))) +vuint16m4_t vget_u16m4(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u32m8_u32m4))) +vuint32m4_t vget_u32m4(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_u64m8_u64m4))) +vuint64m4_t vget_u64m4(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i8m1_i8m2))) +vint8m2_t vset(vint8m2_t op0, size_t op1, vint8m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i16m1_i16m2))) +vint16m2_t vset(vint16m2_t op0, size_t op1, vint16m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i32m1_i32m2))) +vint32m2_t vset(vint32m2_t op0, size_t op1, vint32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i64m1_i64m2))) +vint64m2_t vset(vint64m2_t op0, size_t op1, vint64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u8m1_u8m2))) +vuint8m2_t vset(vuint8m2_t op0, size_t op1, vuint8m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u16m1_u16m2))) +vuint16m2_t vset(vuint16m2_t op0, size_t op1, vuint16m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u32m1_u32m2))) +vuint32m2_t vset(vuint32m2_t op0, size_t op1, vuint32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u64m1_u64m2))) +vuint64m2_t vset(vuint64m2_t op0, size_t op1, vuint64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i8m1_i8m4))) +vint8m4_t vset(vint8m4_t op0, size_t op1, vint8m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i8m2_i8m4))) +vint8m4_t vset(vint8m4_t op0, size_t op1, vint8m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i16m1_i16m4))) +vint16m4_t vset(vint16m4_t op0, size_t op1, vint16m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i16m2_i16m4))) +vint16m4_t vset(vint16m4_t op0, size_t op1, vint16m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i32m1_i32m4))) +vint32m4_t vset(vint32m4_t op0, size_t op1, vint32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i32m2_i32m4))) +vint32m4_t vset(vint32m4_t op0, size_t op1, vint32m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i64m1_i64m4))) +vint64m4_t vset(vint64m4_t op0, size_t op1, vint64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i64m2_i64m4))) +vint64m4_t vset(vint64m4_t op0, size_t op1, vint64m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u8m1_u8m4))) +vuint8m4_t vset(vuint8m4_t op0, size_t op1, vuint8m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u8m2_u8m4))) +vuint8m4_t vset(vuint8m4_t op0, size_t op1, vuint8m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u16m1_u16m4))) +vuint16m4_t vset(vuint16m4_t op0, size_t op1, vuint16m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u16m2_u16m4))) +vuint16m4_t vset(vuint16m4_t op0, size_t op1, vuint16m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u32m1_u32m4))) +vuint32m4_t vset(vuint32m4_t op0, size_t op1, vuint32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u32m2_u32m4))) +vuint32m4_t vset(vuint32m4_t op0, size_t op1, vuint32m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u64m1_u64m4))) +vuint64m4_t vset(vuint64m4_t op0, size_t op1, vuint64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u64m2_u64m4))) +vuint64m4_t vset(vuint64m4_t op0, size_t op1, vuint64m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i8m1_i8m8))) +vint8m8_t vset(vint8m8_t op0, size_t op1, vint8m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i8m2_i8m8))) +vint8m8_t vset(vint8m8_t op0, size_t op1, vint8m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i8m4_i8m8))) +vint8m8_t vset(vint8m8_t op0, size_t op1, vint8m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i16m1_i16m8))) +vint16m8_t vset(vint16m8_t op0, size_t op1, vint16m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i16m2_i16m8))) +vint16m8_t vset(vint16m8_t op0, size_t op1, vint16m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i16m4_i16m8))) +vint16m8_t vset(vint16m8_t op0, size_t op1, vint16m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i32m1_i32m8))) +vint32m8_t vset(vint32m8_t op0, size_t op1, vint32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i32m2_i32m8))) +vint32m8_t vset(vint32m8_t op0, size_t op1, vint32m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i32m4_i32m8))) +vint32m8_t vset(vint32m8_t op0, size_t op1, vint32m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i64m1_i64m8))) +vint64m8_t vset(vint64m8_t op0, size_t op1, vint64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i64m2_i64m8))) +vint64m8_t vset(vint64m8_t op0, size_t op1, vint64m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_i64m4_i64m8))) +vint64m8_t vset(vint64m8_t op0, size_t op1, vint64m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u8m1_u8m8))) +vuint8m8_t vset(vuint8m8_t op0, size_t op1, vuint8m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u8m2_u8m8))) +vuint8m8_t vset(vuint8m8_t op0, size_t op1, vuint8m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u8m4_u8m8))) +vuint8m8_t vset(vuint8m8_t op0, size_t op1, vuint8m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u16m1_u16m8))) +vuint16m8_t vset(vuint16m8_t op0, size_t op1, vuint16m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u16m2_u16m8))) +vuint16m8_t vset(vuint16m8_t op0, size_t op1, vuint16m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u16m4_u16m8))) +vuint16m8_t vset(vuint16m8_t op0, size_t op1, vuint16m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u32m1_u32m8))) +vuint32m8_t vset(vuint32m8_t op0, size_t op1, vuint32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u32m2_u32m8))) +vuint32m8_t vset(vuint32m8_t op0, size_t op1, vuint32m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u32m4_u32m8))) +vuint32m8_t vset(vuint32m8_t op0, size_t op1, vuint32m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u64m1_u64m8))) +vuint64m8_t vset(vuint64m8_t op0, size_t op1, vuint64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u64m2_u64m8))) +vuint64m8_t vset(vuint64m8_t op0, size_t op1, vuint64m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_u64m4_u64m8))) +vuint64m8_t vset(vuint64m8_t op0, size_t op1, vuint64m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m1))) +void vsoxei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m1_m))) +void vsoxei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m2))) +void vsoxei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m2_m))) +void vsoxei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m4))) +void vsoxei8(int8_t * op0, vuint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m4_m))) +void vsoxei8(vbool2_t op0, int8_t * op1, vuint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m8))) +void vsoxei8(int8_t * op0, vuint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8m8_m))) +void vsoxei8(vbool1_t op0, int8_t * op1, vuint8m8_t op2, vint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8mf2))) +void vsoxei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8mf2_m))) +void vsoxei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8mf4))) +void vsoxei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8mf4_m))) +void vsoxei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8mf8))) +void vsoxei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i8mf8_m))) +void vsoxei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m1))) +void vsoxei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m1_m))) +void vsoxei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m2))) +void vsoxei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m2_m))) +void vsoxei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m4))) +void vsoxei8(uint8_t * op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m4_m))) +void vsoxei8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m8))) +void vsoxei8(uint8_t * op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8m8_m))) +void vsoxei8(vbool1_t op0, uint8_t * op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8mf2))) +void vsoxei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8mf2_m))) +void vsoxei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8mf4))) +void vsoxei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8mf4_m))) +void vsoxei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8mf8))) +void vsoxei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u8mf8_m))) +void vsoxei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8m1))) +void vsoxei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8m1_m))) +void vsoxei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8m2))) +void vsoxei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8m2_m))) +void vsoxei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8m4))) +void vsoxei16(int8_t * op0, vuint16m8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8m4_m))) +void vsoxei16(vbool2_t op0, int8_t * op1, vuint16m8_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8mf2))) +void vsoxei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8mf2_m))) +void vsoxei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8mf4))) +void vsoxei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8mf4_m))) +void vsoxei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8mf8))) +void vsoxei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i8mf8_m))) +void vsoxei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8m1))) +void vsoxei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8m1_m))) +void vsoxei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8m2))) +void vsoxei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8m2_m))) +void vsoxei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8m4))) +void vsoxei16(uint8_t * op0, vuint16m8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8m4_m))) +void vsoxei16(vbool2_t op0, uint8_t * op1, vuint16m8_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8mf2))) +void vsoxei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8mf2_m))) +void vsoxei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8mf4))) +void vsoxei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8mf4_m))) +void vsoxei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8mf8))) +void vsoxei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u8mf8_m))) +void vsoxei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8m1))) +void vsoxei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8m1_m))) +void vsoxei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8m2))) +void vsoxei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8m2_m))) +void vsoxei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8mf2))) +void vsoxei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8mf2_m))) +void vsoxei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8mf4))) +void vsoxei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8mf4_m))) +void vsoxei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8mf8))) +void vsoxei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i8mf8_m))) +void vsoxei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8m1))) +void vsoxei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8m1_m))) +void vsoxei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8m2))) +void vsoxei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8m2_m))) +void vsoxei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8mf2))) +void vsoxei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8mf2_m))) +void vsoxei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8mf4))) +void vsoxei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8mf4_m))) +void vsoxei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8mf8))) +void vsoxei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u8mf8_m))) +void vsoxei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8m1))) +void vsoxei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8m1_m))) +void vsoxei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8mf2))) +void vsoxei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8mf2_m))) +void vsoxei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8mf4))) +void vsoxei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8mf4_m))) +void vsoxei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8mf8))) +void vsoxei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i8mf8_m))) +void vsoxei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8m1))) +void vsoxei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8m1_m))) +void vsoxei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8mf2))) +void vsoxei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8mf2_m))) +void vsoxei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8mf4))) +void vsoxei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8mf4_m))) +void vsoxei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8mf8))) +void vsoxei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u8mf8_m))) +void vsoxei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m1))) +void vsoxei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m1_m))) +void vsoxei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m2))) +void vsoxei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m2_m))) +void vsoxei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m4))) +void vsoxei8(int16_t * op0, vuint8m2_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m4_m))) +void vsoxei8(vbool4_t op0, int16_t * op1, vuint8m2_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m8))) +void vsoxei8(int16_t * op0, vuint8m4_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16m8_m))) +void vsoxei8(vbool2_t op0, int16_t * op1, vuint8m4_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16mf2))) +void vsoxei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16mf2_m))) +void vsoxei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16mf4))) +void vsoxei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i16mf4_m))) +void vsoxei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m1))) +void vsoxei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m1_m))) +void vsoxei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m2))) +void vsoxei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m2_m))) +void vsoxei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m4))) +void vsoxei8(uint16_t * op0, vuint8m2_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m4_m))) +void vsoxei8(vbool4_t op0, uint16_t * op1, vuint8m2_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m8))) +void vsoxei8(uint16_t * op0, vuint8m4_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16m8_m))) +void vsoxei8(vbool2_t op0, uint16_t * op1, vuint8m4_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16mf2))) +void vsoxei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16mf2_m))) +void vsoxei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16mf4))) +void vsoxei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u16mf4_m))) +void vsoxei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m1))) +void vsoxei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m1_m))) +void vsoxei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m2))) +void vsoxei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m2_m))) +void vsoxei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m4))) +void vsoxei16(int16_t * op0, vuint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m4_m))) +void vsoxei16(vbool4_t op0, int16_t * op1, vuint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m8))) +void vsoxei16(int16_t * op0, vuint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16m8_m))) +void vsoxei16(vbool2_t op0, int16_t * op1, vuint16m8_t op2, vint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16mf2))) +void vsoxei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16mf2_m))) +void vsoxei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16mf4))) +void vsoxei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i16mf4_m))) +void vsoxei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m1))) +void vsoxei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m1_m))) +void vsoxei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m2))) +void vsoxei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m2_m))) +void vsoxei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m4))) +void vsoxei16(uint16_t * op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m4_m))) +void vsoxei16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m8))) +void vsoxei16(uint16_t * op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16m8_m))) +void vsoxei16(vbool2_t op0, uint16_t * op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16mf2))) +void vsoxei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16mf2_m))) +void vsoxei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16mf4))) +void vsoxei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u16mf4_m))) +void vsoxei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16m1))) +void vsoxei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16m1_m))) +void vsoxei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16m2))) +void vsoxei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16m2_m))) +void vsoxei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16m4))) +void vsoxei32(int16_t * op0, vuint32m8_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16m4_m))) +void vsoxei32(vbool4_t op0, int16_t * op1, vuint32m8_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16mf2))) +void vsoxei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16mf2_m))) +void vsoxei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16mf4))) +void vsoxei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i16mf4_m))) +void vsoxei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16m1))) +void vsoxei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16m1_m))) +void vsoxei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16m2))) +void vsoxei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16m2_m))) +void vsoxei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16m4))) +void vsoxei32(uint16_t * op0, vuint32m8_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16m4_m))) +void vsoxei32(vbool4_t op0, uint16_t * op1, vuint32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16mf2))) +void vsoxei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16mf2_m))) +void vsoxei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16mf4))) +void vsoxei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u16mf4_m))) +void vsoxei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16m1))) +void vsoxei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16m1_m))) +void vsoxei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16m2))) +void vsoxei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16m2_m))) +void vsoxei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16mf2))) +void vsoxei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16mf2_m))) +void vsoxei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16mf4))) +void vsoxei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i16mf4_m))) +void vsoxei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16m1))) +void vsoxei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16m1_m))) +void vsoxei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16m2))) +void vsoxei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16m2_m))) +void vsoxei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16mf2))) +void vsoxei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16mf2_m))) +void vsoxei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16mf4))) +void vsoxei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u16mf4_m))) +void vsoxei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m1))) +void vsoxei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m1_m))) +void vsoxei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m2))) +void vsoxei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m2_m))) +void vsoxei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m4))) +void vsoxei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m4_m))) +void vsoxei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m8))) +void vsoxei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32m8_m))) +void vsoxei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32mf2))) +void vsoxei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i32mf2_m))) +void vsoxei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m1))) +void vsoxei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m1_m))) +void vsoxei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m2))) +void vsoxei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m2_m))) +void vsoxei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m4))) +void vsoxei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m4_m))) +void vsoxei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m8))) +void vsoxei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32m8_m))) +void vsoxei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32mf2))) +void vsoxei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u32mf2_m))) +void vsoxei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m1))) +void vsoxei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m1_m))) +void vsoxei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m2))) +void vsoxei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m2_m))) +void vsoxei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m4))) +void vsoxei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m4_m))) +void vsoxei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m8))) +void vsoxei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32m8_m))) +void vsoxei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32mf2))) +void vsoxei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i32mf2_m))) +void vsoxei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m1))) +void vsoxei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m1_m))) +void vsoxei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m2))) +void vsoxei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m2_m))) +void vsoxei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m4))) +void vsoxei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m4_m))) +void vsoxei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m8))) +void vsoxei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32m8_m))) +void vsoxei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32mf2))) +void vsoxei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u32mf2_m))) +void vsoxei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m1))) +void vsoxei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m1_m))) +void vsoxei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m2))) +void vsoxei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m2_m))) +void vsoxei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m4))) +void vsoxei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m4_m))) +void vsoxei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m8))) +void vsoxei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32m8_m))) +void vsoxei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32mf2))) +void vsoxei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i32mf2_m))) +void vsoxei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m1))) +void vsoxei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m1_m))) +void vsoxei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m2))) +void vsoxei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m2_m))) +void vsoxei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m4))) +void vsoxei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m4_m))) +void vsoxei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m8))) +void vsoxei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32m8_m))) +void vsoxei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32mf2))) +void vsoxei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u32mf2_m))) +void vsoxei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32m1))) +void vsoxei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32m1_m))) +void vsoxei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32m2))) +void vsoxei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32m2_m))) +void vsoxei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32m4))) +void vsoxei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32m4_m))) +void vsoxei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32mf2))) +void vsoxei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i32mf2_m))) +void vsoxei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32m1))) +void vsoxei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32m1_m))) +void vsoxei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32m2))) +void vsoxei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32m2_m))) +void vsoxei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32m4))) +void vsoxei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32m4_m))) +void vsoxei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32mf2))) +void vsoxei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u32mf2_m))) +void vsoxei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m1))) +void vsoxei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m1_m))) +void vsoxei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m2))) +void vsoxei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m2_m))) +void vsoxei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m4))) +void vsoxei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m4_m))) +void vsoxei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m8))) +void vsoxei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_i64m8_m))) +void vsoxei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m1))) +void vsoxei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m1_m))) +void vsoxei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m2))) +void vsoxei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m2_m))) +void vsoxei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m4))) +void vsoxei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m4_m))) +void vsoxei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m8))) +void vsoxei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_u64m8_m))) +void vsoxei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m1))) +void vsoxei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m1_m))) +void vsoxei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m2))) +void vsoxei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m2_m))) +void vsoxei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m4))) +void vsoxei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m4_m))) +void vsoxei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m8))) +void vsoxei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_i64m8_m))) +void vsoxei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m1))) +void vsoxei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m1_m))) +void vsoxei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m2))) +void vsoxei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m2_m))) +void vsoxei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m4))) +void vsoxei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m4_m))) +void vsoxei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m8))) +void vsoxei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_u64m8_m))) +void vsoxei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m1))) +void vsoxei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m1_m))) +void vsoxei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m2))) +void vsoxei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m2_m))) +void vsoxei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m4))) +void vsoxei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m4_m))) +void vsoxei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m8))) +void vsoxei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_i64m8_m))) +void vsoxei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m1))) +void vsoxei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m1_m))) +void vsoxei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m2))) +void vsoxei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m2_m))) +void vsoxei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m4))) +void vsoxei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m4_m))) +void vsoxei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m8))) +void vsoxei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_u64m8_m))) +void vsoxei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m1))) +void vsoxei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m1_m))) +void vsoxei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m2))) +void vsoxei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m2_m))) +void vsoxei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m4))) +void vsoxei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m4_m))) +void vsoxei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m8))) +void vsoxei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_i64m8_m))) +void vsoxei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m1))) +void vsoxei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m1_m))) +void vsoxei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m2))) +void vsoxei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m2_m))) +void vsoxei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m4))) +void vsoxei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m4_m))) +void vsoxei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m8))) +void vsoxei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_u64m8_m))) +void vsoxei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_i16m1_m))) +vint16m1_t vle16ff(vbool16_t op0, vint16m1_t op1, const int16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_i16m2_m))) +vint16m2_t vle16ff(vbool8_t op0, vint16m2_t op1, const int16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_i16m4_m))) +vint16m4_t vle16ff(vbool4_t op0, vint16m4_t op1, const int16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_i16m8_m))) +vint16m8_t vle16ff(vbool2_t op0, vint16m8_t op1, const int16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_i16mf2_m))) +vint16mf2_t vle16ff(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_i16mf4_m))) +vint16mf4_t vle16ff(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_u16m1_m))) +vuint16m1_t vle16ff(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_u16m2_m))) +vuint16m2_t vle16ff(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_u16m4_m))) +vuint16m4_t vle16ff(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_u16m8_m))) +vuint16m8_t vle16ff(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_u16mf2_m))) +vuint16mf2_t vle16ff(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_u16mf4_m))) +vuint16mf4_t vle16ff(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_i32m1_m))) +vint32m1_t vle32ff(vbool32_t op0, vint32m1_t op1, const int32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_i32m2_m))) +vint32m2_t vle32ff(vbool16_t op0, vint32m2_t op1, const int32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_i32m4_m))) +vint32m4_t vle32ff(vbool8_t op0, vint32m4_t op1, const int32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_i32m8_m))) +vint32m8_t vle32ff(vbool4_t op0, vint32m8_t op1, const int32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_i32mf2_m))) +vint32mf2_t vle32ff(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_u32m1_m))) +vuint32m1_t vle32ff(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_u32m2_m))) +vuint32m2_t vle32ff(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_u32m4_m))) +vuint32m4_t vle32ff(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_u32m8_m))) +vuint32m8_t vle32ff(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_u32mf2_m))) +vuint32mf2_t vle32ff(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_i64m1_m))) +vint64m1_t vle64ff(vbool64_t op0, vint64m1_t op1, const int64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_i64m2_m))) +vint64m2_t vle64ff(vbool32_t op0, vint64m2_t op1, const int64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_i64m4_m))) +vint64m4_t vle64ff(vbool16_t op0, vint64m4_t op1, const int64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_i64m8_m))) +vint64m8_t vle64ff(vbool8_t op0, vint64m8_t op1, const int64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_u64m1_m))) +vuint64m1_t vle64ff(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_u64m2_m))) +vuint64m2_t vle64ff(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_u64m4_m))) +vuint64m4_t vle64ff(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_u64m8_m))) +vuint64m8_t vle64ff(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8m1_m))) +vint8m1_t vle8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8m2_m))) +vint8m2_t vle8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8m4_m))) +vint8m4_t vle8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8m8_m))) +vint8m8_t vle8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8mf2_m))) +vint8mf2_t vle8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8mf4_m))) +vint8mf4_t vle8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_i8mf8_m))) +vint8mf8_t vle8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8m1_m))) +vuint8m1_t vle8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8m2_m))) +vuint8m2_t vle8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8m4_m))) +vuint8m4_t vle8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8m8_m))) +vuint8m8_t vle8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8mf2_m))) +vuint8mf2_t vle8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8mf4_m))) +vuint8mf4_t vle8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8_v_u8mf8_m))) +vuint8mf8_t vle8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8m1_m))) +vint8m1_t vle8ff(vbool8_t op0, vint8m1_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8m2_m))) +vint8m2_t vle8ff(vbool4_t op0, vint8m2_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8m4_m))) +vint8m4_t vle8ff(vbool2_t op0, vint8m4_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8m8_m))) +vint8m8_t vle8ff(vbool1_t op0, vint8m8_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8mf2_m))) +vint8mf2_t vle8ff(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8mf4_m))) +vint8mf4_t vle8ff(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_i8mf8_m))) +vint8mf8_t vle8ff(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8m1_m))) +vuint8m1_t vle8ff(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8m2_m))) +vuint8m2_t vle8ff(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8m4_m))) +vuint8m4_t vle8ff(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8m8_m))) +vuint8m8_t vle8ff(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8mf2_m))) +vuint8mf2_t vle8ff(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8mf4_m))) +vuint8mf4_t vle8ff(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle8ff_v_u8mf8_m))) +vuint8mf8_t vle8ff(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m1))) +vint8m1_t vneg(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m1_m))) +vint8m1_t vneg(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m2))) +vint8m2_t vneg(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m2_m))) +vint8m2_t vneg(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m4))) +vint8m4_t vneg(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m4_m))) +vint8m4_t vneg(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m8))) +vint8m8_t vneg(vint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8m8_m))) +vint8m8_t vneg(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8mf2))) +vint8mf2_t vneg(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8mf2_m))) +vint8mf2_t vneg(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8mf4))) +vint8mf4_t vneg(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8mf4_m))) +vint8mf4_t vneg(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8mf8))) +vint8mf8_t vneg(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i8mf8_m))) +vint8mf8_t vneg(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m1))) +vint16m1_t vneg(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m1_m))) +vint16m1_t vneg(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m2))) +vint16m2_t vneg(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m2_m))) +vint16m2_t vneg(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m4))) +vint16m4_t vneg(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m4_m))) +vint16m4_t vneg(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m8))) +vint16m8_t vneg(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16m8_m))) +vint16m8_t vneg(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16mf2))) +vint16mf2_t vneg(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16mf2_m))) +vint16mf2_t vneg(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16mf4))) +vint16mf4_t vneg(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i16mf4_m))) +vint16mf4_t vneg(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m1))) +vint32m1_t vneg(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m1_m))) +vint32m1_t vneg(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m2))) +vint32m2_t vneg(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m2_m))) +vint32m2_t vneg(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m4))) +vint32m4_t vneg(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m4_m))) +vint32m4_t vneg(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m8))) +vint32m8_t vneg(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32m8_m))) +vint32m8_t vneg(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32mf2))) +vint32mf2_t vneg(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i32mf2_m))) +vint32mf2_t vneg(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m1))) +vint64m1_t vneg(vint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m1_m))) +vint64m1_t vneg(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m2))) +vint64m2_t vneg(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m2_m))) +vint64m2_t vneg(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m4))) +vint64m4_t vneg(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m4_m))) +vint64m4_t vneg(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m8))) +vint64m8_t vneg(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vneg_v_i64m8_m))) +vint64m8_t vneg(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m1))) +vint8m1_t vnot(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m1_m))) +vint8m1_t vnot(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m2))) +vint8m2_t vnot(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m2_m))) +vint8m2_t vnot(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m4))) +vint8m4_t vnot(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m4_m))) +vint8m4_t vnot(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m8))) +vint8m8_t vnot(vint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8m8_m))) +vint8m8_t vnot(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8mf2))) +vint8mf2_t vnot(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8mf2_m))) +vint8mf2_t vnot(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8mf4))) +vint8mf4_t vnot(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8mf4_m))) +vint8mf4_t vnot(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8mf8))) +vint8mf8_t vnot(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i8mf8_m))) +vint8mf8_t vnot(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m1))) +vint16m1_t vnot(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m1_m))) +vint16m1_t vnot(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m2))) +vint16m2_t vnot(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m2_m))) +vint16m2_t vnot(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m4))) +vint16m4_t vnot(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m4_m))) +vint16m4_t vnot(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m8))) +vint16m8_t vnot(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16m8_m))) +vint16m8_t vnot(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16mf2))) +vint16mf2_t vnot(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16mf2_m))) +vint16mf2_t vnot(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16mf4))) +vint16mf4_t vnot(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i16mf4_m))) +vint16mf4_t vnot(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m1))) +vint32m1_t vnot(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m1_m))) +vint32m1_t vnot(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m2))) +vint32m2_t vnot(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m2_m))) +vint32m2_t vnot(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m4))) +vint32m4_t vnot(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m4_m))) +vint32m4_t vnot(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m8))) +vint32m8_t vnot(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32m8_m))) +vint32m8_t vnot(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32mf2))) +vint32mf2_t vnot(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i32mf2_m))) +vint32mf2_t vnot(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m1))) +vint64m1_t vnot(vint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m1_m))) +vint64m1_t vnot(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m2))) +vint64m2_t vnot(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m2_m))) +vint64m2_t vnot(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m4))) +vint64m4_t vnot(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m4_m))) +vint64m4_t vnot(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m8))) +vint64m8_t vnot(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_i64m8_m))) +vint64m8_t vnot(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8m1_m))) +vint8m1_t vlse8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8m2_m))) +vint8m2_t vlse8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8m4_m))) +vint8m4_t vlse8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8m8_m))) +vint8m8_t vlse8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8mf2_m))) +vint8mf2_t vlse8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8mf4_m))) +vint8mf4_t vlse8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_i8mf8_m))) +vint8mf8_t vlse8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m1))) +vuint8m1_t vnot(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m1_m))) +vuint8m1_t vnot(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m2))) +vuint8m2_t vnot(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m2_m))) +vuint8m2_t vnot(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m4))) +vuint8m4_t vnot(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m4_m))) +vuint8m4_t vnot(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m8))) +vuint8m8_t vnot(vuint8m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8m8_m))) +vuint8m8_t vnot(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8mf2))) +vuint8mf2_t vnot(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8mf2_m))) +vuint8mf2_t vnot(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8mf4))) +vuint8mf4_t vnot(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8mf4_m))) +vuint8mf4_t vnot(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8mf8))) +vuint8mf8_t vnot(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u8mf8_m))) +vuint8mf8_t vnot(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m1))) +vuint16m1_t vnot(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m1_m))) +vuint16m1_t vnot(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m2))) +vuint16m2_t vnot(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m2_m))) +vuint16m2_t vnot(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m4))) +vuint16m4_t vnot(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m4_m))) +vuint16m4_t vnot(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m8))) +vuint16m8_t vnot(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16m8_m))) +vuint16m8_t vnot(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16mf2))) +vuint16mf2_t vnot(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16mf2_m))) +vuint16mf2_t vnot(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16mf4))) +vuint16mf4_t vnot(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u16mf4_m))) +vuint16mf4_t vnot(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m1))) +vuint32m1_t vnot(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m1_m))) +vuint32m1_t vnot(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m2))) +vuint32m2_t vnot(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m2_m))) +vuint32m2_t vnot(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m4))) +vuint32m4_t vnot(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m4_m))) +vuint32m4_t vnot(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m8))) +vuint32m8_t vnot(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32m8_m))) +vuint32m8_t vnot(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32mf2))) +vuint32mf2_t vnot(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u32mf2_m))) +vuint32mf2_t vnot(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m1))) +vuint64m1_t vnot(vuint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m1_m))) +vuint64m1_t vnot(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m2))) +vuint64m2_t vnot(vuint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m2_m))) +vuint64m2_t vnot(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m4))) +vuint64m4_t vnot(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m4_m))) +vuint64m4_t vnot(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m8))) +vuint64m8_t vnot(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vnot_v_u64m8_m))) +vuint64m8_t vnot(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b8))) +vbool8_t vmmv(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b4))) +vbool4_t vmmv(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b2))) +vbool2_t vmmv(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b1))) +vbool1_t vmmv(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b16))) +vbool16_t vmmv(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b32))) +vbool32_t vmmv(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmmv_m_b64))) +vbool64_t vmmv(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m2))) +vuint16m2_t vwcvtu_x(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m2_m))) +vuint16m2_t vwcvtu_x(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m4))) +vuint16m4_t vwcvtu_x(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m4_m))) +vuint16m4_t vwcvtu_x(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m8))) +vuint16m8_t vwcvtu_x(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m8_m))) +vuint16m8_t vwcvtu_x(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m1))) +vuint16m1_t vwcvtu_x(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16m1_m))) +vuint16m1_t vwcvtu_x(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16mf2))) +vuint16mf2_t vwcvtu_x(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16mf2_m))) +vuint16mf2_t vwcvtu_x(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16mf4))) +vuint16mf4_t vwcvtu_x(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u16mf4_m))) +vuint16mf4_t vwcvtu_x(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m2))) +vuint32m2_t vwcvtu_x(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m2_m))) +vuint32m2_t vwcvtu_x(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m4))) +vuint32m4_t vwcvtu_x(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m4_m))) +vuint32m4_t vwcvtu_x(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m8))) +vuint32m8_t vwcvtu_x(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m8_m))) +vuint32m8_t vwcvtu_x(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m1))) +vuint32m1_t vwcvtu_x(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32m1_m))) +vuint32m1_t vwcvtu_x(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32mf2))) +vuint32mf2_t vwcvtu_x(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u32mf2_m))) +vuint32mf2_t vwcvtu_x(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m2))) +vuint64m2_t vwcvtu_x(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m2_m))) +vuint64m2_t vwcvtu_x(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m4))) +vuint64m4_t vwcvtu_x(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m4_m))) +vuint64m4_t vwcvtu_x(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m8))) +vuint64m8_t vwcvtu_x(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m8_m))) +vuint64m8_t vwcvtu_x(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m1))) +vuint64m1_t vwcvtu_x(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vwcvtu_x_x_v_u64m1_m))) +vuint64m1_t vwcvtu_x(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8m1))) +vint8m1_t vncvt_x(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8m1_m))) +vint8m1_t vncvt_x(vbool8_t op0, vint8m1_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8m2))) +vint8m2_t vncvt_x(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8m2_m))) +vint8m2_t vncvt_x(vbool4_t op0, vint8m2_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8m4))) +vint8m4_t vncvt_x(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8m4_m))) +vint8m4_t vncvt_x(vbool2_t op0, vint8m4_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8mf2))) +vint8mf2_t vncvt_x(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8mf2_m))) +vint8mf2_t vncvt_x(vbool16_t op0, vint8mf2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8mf4))) +vint8mf4_t vncvt_x(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8mf4_m))) +vint8mf4_t vncvt_x(vbool32_t op0, vint8mf4_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8mf8))) +vint8mf8_t vncvt_x(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i8mf8_m))) +vint8mf8_t vncvt_x(vbool64_t op0, vint8mf8_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16m1))) +vint16m1_t vncvt_x(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16m1_m))) +vint16m1_t vncvt_x(vbool16_t op0, vint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16m2))) +vint16m2_t vncvt_x(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16m2_m))) +vint16m2_t vncvt_x(vbool8_t op0, vint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16m4))) +vint16m4_t vncvt_x(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16m4_m))) +vint16m4_t vncvt_x(vbool4_t op0, vint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16mf2))) +vint16mf2_t vncvt_x(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16mf2_m))) +vint16mf2_t vncvt_x(vbool32_t op0, vint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16mf4))) +vint16mf4_t vncvt_x(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i16mf4_m))) +vint16mf4_t vncvt_x(vbool64_t op0, vint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32m1))) +vint32m1_t vncvt_x(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32m1_m))) +vint32m1_t vncvt_x(vbool32_t op0, vint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32m2))) +vint32m2_t vncvt_x(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32m2_m))) +vint32m2_t vncvt_x(vbool16_t op0, vint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32m4))) +vint32m4_t vncvt_x(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32m4_m))) +vint32m4_t vncvt_x(vbool8_t op0, vint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32mf2))) +vint32mf2_t vncvt_x(vint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vncvt_x_x_w_i32mf2_m))) +vint32mf2_t vncvt_x(vbool64_t op0, vint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_i16m1_m))) +vint16m1_t vle16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_i16m2_m))) +vint16m2_t vle16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_i16m4_m))) +vint16m4_t vle16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_i16m8_m))) +vint16m8_t vle16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_i16mf2_m))) +vint16mf2_t vle16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_i16mf4_m))) +vint16mf4_t vle16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_u16m1_m))) +vuint16m1_t vle16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_u16m2_m))) +vuint16m2_t vle16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_u16m4_m))) +vuint16m4_t vle16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_u16m8_m))) +vuint16m8_t vle16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_u16mf2_m))) +vuint16mf2_t vle16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_u16mf4_m))) +vuint16mf4_t vle16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_i32m1_m))) +vint32m1_t vle32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_i32m2_m))) +vint32m2_t vle32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_i32m4_m))) +vint32m4_t vle32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_i32m8_m))) +vint32m8_t vle32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_i32mf2_m))) +vint32mf2_t vle32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_u32m1_m))) +vuint32m1_t vle32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_u32m2_m))) +vuint32m2_t vle32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_u32m4_m))) +vuint32m4_t vle32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_u32m8_m))) +vuint32m8_t vle32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_u32mf2_m))) +vuint32mf2_t vle32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8m1_m))) +vuint8m1_t vlse8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8m2_m))) +vuint8m2_t vlse8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8m4_m))) +vuint8m4_t vlse8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8m8_m))) +vuint8m8_t vlse8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8mf2_m))) +vuint8mf2_t vlse8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8mf4_m))) +vuint8mf4_t vlse8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse8_v_u8mf8_m))) +vuint8mf8_t vlse8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_i64m1_m))) +vint64m1_t vle64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_i64m2_m))) +vint64m2_t vle64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_i64m4_m))) +vint64m4_t vle64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_i64m8_m))) +vint64m8_t vle64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_u64m1_m))) +vuint64m1_t vle64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_u64m2_m))) +vuint64m2_t vle64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_u64m4_m))) +vuint64m4_t vle64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_u64m8_m))) +vuint64m8_t vle64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m1))) +void vse16(int16_t * op0, vint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m1_m))) +void vse16(vbool16_t op0, int16_t * op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m2))) +void vse16(int16_t * op0, vint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m2_m))) +void vse16(vbool8_t op0, int16_t * op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m4))) +void vse16(int16_t * op0, vint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m4_m))) +void vse16(vbool4_t op0, int16_t * op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m8))) +void vse16(int16_t * op0, vint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16m8_m))) +void vse16(vbool2_t op0, int16_t * op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16mf2))) +void vse16(int16_t * op0, vint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16mf2_m))) +void vse16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16mf4))) +void vse16(int16_t * op0, vint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_i16mf4_m))) +void vse16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m1))) +void vse16(uint16_t * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m1_m))) +void vse16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m2))) +void vse16(uint16_t * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m2_m))) +void vse16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m4))) +void vse16(uint16_t * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m4_m))) +void vse16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m8))) +void vse16(uint16_t * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16m8_m))) +void vse16(vbool2_t op0, uint16_t * op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16mf2))) +void vse16(uint16_t * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16mf2_m))) +void vse16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16mf4))) +void vse16(uint16_t * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_u16mf4_m))) +void vse16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m1))) +void vse32(int32_t * op0, vint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m1_m))) +void vse32(vbool32_t op0, int32_t * op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m2))) +void vse32(int32_t * op0, vint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m2_m))) +void vse32(vbool16_t op0, int32_t * op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m4))) +void vse32(int32_t * op0, vint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m4_m))) +void vse32(vbool8_t op0, int32_t * op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m8))) +void vse32(int32_t * op0, vint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32m8_m))) +void vse32(vbool4_t op0, int32_t * op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32mf2))) +void vse32(int32_t * op0, vint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_i32mf2_m))) +void vse32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m1))) +void vse32(uint32_t * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m1_m))) +void vse32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m2))) +void vse32(uint32_t * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m2_m))) +void vse32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m4))) +void vse32(uint32_t * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m4_m))) +void vse32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m8))) +void vse32(uint32_t * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32m8_m))) +void vse32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32mf2))) +void vse32(uint32_t * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_u32mf2_m))) +void vse32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m1))) +vint8m1_t vluxei8(const int8_t * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m1_m))) +vint8m1_t vluxei8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m2))) +vint8m2_t vluxei8(const int8_t * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m2_m))) +vint8m2_t vluxei8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m4))) +vint8m4_t vluxei8(const int8_t * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m4_m))) +vint8m4_t vluxei8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m8))) +vint8m8_t vluxei8(const int8_t * op0, vuint8m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8m8_m))) +vint8m8_t vluxei8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, vuint8m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8mf2))) +vint8mf2_t vluxei8(const int8_t * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8mf2_m))) +vint8mf2_t vluxei8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8mf4))) +vint8mf4_t vluxei8(const int8_t * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8mf4_m))) +vint8mf4_t vluxei8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8mf8))) +vint8mf8_t vluxei8(const int8_t * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_i8mf8_m))) +vint8mf8_t vluxei8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m1))) +void vse64(int64_t * op0, vint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m1_m))) +void vse64(vbool64_t op0, int64_t * op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m2))) +void vse64(int64_t * op0, vint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m2_m))) +void vse64(vbool32_t op0, int64_t * op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m4))) +void vse64(int64_t * op0, vint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m4_m))) +void vse64(vbool16_t op0, int64_t * op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m8))) +void vse64(int64_t * op0, vint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_i64m8_m))) +void vse64(vbool8_t op0, int64_t * op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m1))) +void vse64(uint64_t * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m1_m))) +void vse64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m2))) +void vse64(uint64_t * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m2_m))) +void vse64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m4))) +void vse64(uint64_t * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m4_m))) +void vse64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m8))) +void vse64(uint64_t * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_u64m8_m))) +void vse64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_i16m1_m))) +vint16m1_t vlse16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_i16m2_m))) +vint16m2_t vlse16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_i16m4_m))) +vint16m4_t vlse16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_i16m8_m))) +vint16m8_t vlse16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_i16mf2_m))) +vint16mf2_t vlse16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_i16mf4_m))) +vint16mf4_t vlse16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_u16m1_m))) +vuint16m1_t vlse16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_u16m2_m))) +vuint16m2_t vlse16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_u16m4_m))) +vuint16m4_t vlse16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_u16m8_m))) +vuint16m8_t vlse16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_u16mf2_m))) +vuint16mf2_t vlse16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_u16mf4_m))) +vuint16mf4_t vlse16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_i32m1_m))) +vint32m1_t vlse32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_i32m2_m))) +vint32m2_t vlse32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_i32m4_m))) +vint32m4_t vlse32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_i32m8_m))) +vint32m8_t vlse32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_i32mf2_m))) +vint32mf2_t vlse32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_u32m1_m))) +vuint32m1_t vlse32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_u32m2_m))) +vuint32m2_t vlse32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_u32m4_m))) +vuint32m4_t vlse32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_u32m8_m))) +vuint32m8_t vlse32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_u32mf2_m))) +vuint32mf2_t vlse32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_i64m1_m))) +vint64m1_t vlse64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_i64m2_m))) +vint64m2_t vlse64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_i64m4_m))) +vint64m4_t vlse64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_i64m8_m))) +vint64m8_t vlse64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b8))) +long vfirst(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b8_m))) +long vfirst(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b4))) +long vfirst(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b4_m))) +long vfirst(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b2))) +long vfirst(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b2_m))) +long vfirst(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b1))) +long vfirst(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b1_m))) +long vfirst(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b16))) +long vfirst(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b16_m))) +long vfirst(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b32))) +long vfirst(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b32_m))) +long vfirst(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b64))) +long vfirst(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfirst_m_b64_m))) +long vfirst(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf2_i8m1))) +vint8m1_t vlmul_ext_i8m1(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf4_i8m1))) +vint8m1_t vlmul_ext_i8m1(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf8_i8m1))) +vint8m1_t vlmul_ext_i8m1(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf2_i16m1))) +vint16m1_t vlmul_ext_i16m1(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf4_i16m1))) +vint16m1_t vlmul_ext_i16m1(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32mf2_i32m1))) +vint32m1_t vlmul_ext_i32m1(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf4_i8mf2))) +vint8mf2_t vlmul_ext_i8mf2(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf8_i8mf2))) +vint8mf2_t vlmul_ext_i8mf2(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf4_i16mf2))) +vint16mf2_t vlmul_ext_i16mf2(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf8_i8mf4))) +vint8mf4_t vlmul_ext_i8mf4(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8m1_i8m2))) +vint8m2_t vlmul_ext_i8m2(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf2_i8m2))) +vint8m2_t vlmul_ext_i8m2(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf4_i8m2))) +vint8m2_t vlmul_ext_i8m2(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf8_i8m2))) +vint8m2_t vlmul_ext_i8m2(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16m1_i16m2))) +vint16m2_t vlmul_ext_i16m2(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf2_i16m2))) +vint16m2_t vlmul_ext_i16m2(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf4_i16m2))) +vint16m2_t vlmul_ext_i16m2(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32m1_i32m2))) +vint32m2_t vlmul_ext_i32m2(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32mf2_i32m2))) +vint32m2_t vlmul_ext_i32m2(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i64m1_i64m2))) +vint64m2_t vlmul_ext_i64m2(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8m1_i8m4))) +vint8m4_t vlmul_ext_i8m4(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8m2_i8m4))) +vint8m4_t vlmul_ext_i8m4(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf2_i8m4))) +vint8m4_t vlmul_ext_i8m4(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf4_i8m4))) +vint8m4_t vlmul_ext_i8m4(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf8_i8m4))) +vint8m4_t vlmul_ext_i8m4(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16m1_i16m4))) +vint16m4_t vlmul_ext_i16m4(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16m2_i16m4))) +vint16m4_t vlmul_ext_i16m4(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf2_i16m4))) +vint16m4_t vlmul_ext_i16m4(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf4_i16m4))) +vint16m4_t vlmul_ext_i16m4(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32m1_i32m4))) +vint32m4_t vlmul_ext_i32m4(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32m2_i32m4))) +vint32m4_t vlmul_ext_i32m4(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32mf2_i32m4))) +vint32m4_t vlmul_ext_i32m4(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i64m1_i64m4))) +vint64m4_t vlmul_ext_i64m4(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i64m2_i64m4))) +vint64m4_t vlmul_ext_i64m4(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8m1_i8m8))) +vint8m8_t vlmul_ext_i8m8(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8m2_i8m8))) +vint8m8_t vlmul_ext_i8m8(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8m4_i8m8))) +vint8m8_t vlmul_ext_i8m8(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf2_i8m8))) +vint8m8_t vlmul_ext_i8m8(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf4_i8m8))) +vint8m8_t vlmul_ext_i8m8(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i8mf8_i8m8))) +vint8m8_t vlmul_ext_i8m8(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16m1_i16m8))) +vint16m8_t vlmul_ext_i16m8(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16m2_i16m8))) +vint16m8_t vlmul_ext_i16m8(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16m4_i16m8))) +vint16m8_t vlmul_ext_i16m8(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf2_i16m8))) +vint16m8_t vlmul_ext_i16m8(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i16mf4_i16m8))) +vint16m8_t vlmul_ext_i16m8(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32m1_i32m8))) +vint32m8_t vlmul_ext_i32m8(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32m2_i32m8))) +vint32m8_t vlmul_ext_i32m8(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32m4_i32m8))) +vint32m8_t vlmul_ext_i32m8(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i32mf2_i32m8))) +vint32m8_t vlmul_ext_i32m8(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i64m1_i64m8))) +vint64m8_t vlmul_ext_i64m8(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i64m2_i64m8))) +vint64m8_t vlmul_ext_i64m8(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_i64m4_i64m8))) +vint64m8_t vlmul_ext_i64m8(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf2_u8m1))) +vuint8m1_t vlmul_ext_u8m1(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf4_u8m1))) +vuint8m1_t vlmul_ext_u8m1(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf8_u8m1))) +vuint8m1_t vlmul_ext_u8m1(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf2_u16m1))) +vuint16m1_t vlmul_ext_u16m1(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf4_u16m1))) +vuint16m1_t vlmul_ext_u16m1(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32mf2_u32m1))) +vuint32m1_t vlmul_ext_u32m1(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf4_u8mf2))) +vuint8mf2_t vlmul_ext_u8mf2(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf8_u8mf2))) +vuint8mf2_t vlmul_ext_u8mf2(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf4_u16mf2))) +vuint16mf2_t vlmul_ext_u16mf2(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf8_u8mf4))) +vuint8mf4_t vlmul_ext_u8mf4(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8m1_u8m2))) +vuint8m2_t vlmul_ext_u8m2(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf2_u8m2))) +vuint8m2_t vlmul_ext_u8m2(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf4_u8m2))) +vuint8m2_t vlmul_ext_u8m2(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf8_u8m2))) +vuint8m2_t vlmul_ext_u8m2(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16m1_u16m2))) +vuint16m2_t vlmul_ext_u16m2(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf2_u16m2))) +vuint16m2_t vlmul_ext_u16m2(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf4_u16m2))) +vuint16m2_t vlmul_ext_u16m2(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32m1_u32m2))) +vuint32m2_t vlmul_ext_u32m2(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32mf2_u32m2))) +vuint32m2_t vlmul_ext_u32m2(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u64m1_u64m2))) +vuint64m2_t vlmul_ext_u64m2(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8m1_u8m4))) +vuint8m4_t vlmul_ext_u8m4(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8m2_u8m4))) +vuint8m4_t vlmul_ext_u8m4(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf2_u8m4))) +vuint8m4_t vlmul_ext_u8m4(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf4_u8m4))) +vuint8m4_t vlmul_ext_u8m4(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf8_u8m4))) +vuint8m4_t vlmul_ext_u8m4(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16m1_u16m4))) +vuint16m4_t vlmul_ext_u16m4(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16m2_u16m4))) +vuint16m4_t vlmul_ext_u16m4(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf2_u16m4))) +vuint16m4_t vlmul_ext_u16m4(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf4_u16m4))) +vuint16m4_t vlmul_ext_u16m4(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32m1_u32m4))) +vuint32m4_t vlmul_ext_u32m4(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32m2_u32m4))) +vuint32m4_t vlmul_ext_u32m4(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32mf2_u32m4))) +vuint32m4_t vlmul_ext_u32m4(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u64m1_u64m4))) +vuint64m4_t vlmul_ext_u64m4(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u64m2_u64m4))) +vuint64m4_t vlmul_ext_u64m4(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8m1_u8m8))) +vuint8m8_t vlmul_ext_u8m8(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8m2_u8m8))) +vuint8m8_t vlmul_ext_u8m8(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8m4_u8m8))) +vuint8m8_t vlmul_ext_u8m8(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf2_u8m8))) +vuint8m8_t vlmul_ext_u8m8(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf4_u8m8))) +vuint8m8_t vlmul_ext_u8m8(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u8mf8_u8m8))) +vuint8m8_t vlmul_ext_u8m8(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16m1_u16m8))) +vuint16m8_t vlmul_ext_u16m8(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16m2_u16m8))) +vuint16m8_t vlmul_ext_u16m8(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16m4_u16m8))) +vuint16m8_t vlmul_ext_u16m8(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf2_u16m8))) +vuint16m8_t vlmul_ext_u16m8(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u16mf4_u16m8))) +vuint16m8_t vlmul_ext_u16m8(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32m1_u32m8))) +vuint32m8_t vlmul_ext_u32m8(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32m2_u32m8))) +vuint32m8_t vlmul_ext_u32m8(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32m4_u32m8))) +vuint32m8_t vlmul_ext_u32m8(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u32mf2_u32m8))) +vuint32m8_t vlmul_ext_u32m8(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u64m1_u64m8))) +vuint64m8_t vlmul_ext_u64m8(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u64m2_u64m8))) +vuint64m8_t vlmul_ext_u64m8(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_u64m4_u64m8))) +vuint64m8_t vlmul_ext_u64m8(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m1_i8mf2))) +vint8mf2_t vlmul_trunc_i8mf2(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m2_i8mf2))) +vint8mf2_t vlmul_trunc_i8mf2(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m4_i8mf2))) +vint8mf2_t vlmul_trunc_i8mf2(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m8_i8mf2))) +vint8mf2_t vlmul_trunc_i8mf2(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m1_i16mf2))) +vint16mf2_t vlmul_trunc_i16mf2(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m2_i16mf2))) +vint16mf2_t vlmul_trunc_i16mf2(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m4_i16mf2))) +vint16mf2_t vlmul_trunc_i16mf2(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m8_i16mf2))) +vint16mf2_t vlmul_trunc_i16mf2(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m1_i32mf2))) +vint32mf2_t vlmul_trunc_i32mf2(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m2_i32mf2))) +vint32mf2_t vlmul_trunc_i32mf2(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m4_i32mf2))) +vint32mf2_t vlmul_trunc_i32mf2(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m8_i32mf2))) +vint32mf2_t vlmul_trunc_i32mf2(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m1_i8mf4))) +vint8mf4_t vlmul_trunc_i8mf4(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m2_i8mf4))) +vint8mf4_t vlmul_trunc_i8mf4(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m4_i8mf4))) +vint8mf4_t vlmul_trunc_i8mf4(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m8_i8mf4))) +vint8mf4_t vlmul_trunc_i8mf4(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8mf2_i8mf4))) +vint8mf4_t vlmul_trunc_i8mf4(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m1_i16mf4))) +vint16mf4_t vlmul_trunc_i16mf4(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m2_i16mf4))) +vint16mf4_t vlmul_trunc_i16mf4(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m4_i16mf4))) +vint16mf4_t vlmul_trunc_i16mf4(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m8_i16mf4))) +vint16mf4_t vlmul_trunc_i16mf4(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16mf2_i16mf4))) +vint16mf4_t vlmul_trunc_i16mf4(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m1_i8mf8))) +vint8mf8_t vlmul_trunc_i8mf8(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m2_i8mf8))) +vint8mf8_t vlmul_trunc_i8mf8(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m4_i8mf8))) +vint8mf8_t vlmul_trunc_i8mf8(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m8_i8mf8))) +vint8mf8_t vlmul_trunc_i8mf8(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8mf2_i8mf8))) +vint8mf8_t vlmul_trunc_i8mf8(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8mf4_i8mf8))) +vint8mf8_t vlmul_trunc_i8mf8(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m2_i8m1))) +vint8m1_t vlmul_trunc_i8m1(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m4_i8m1))) +vint8m1_t vlmul_trunc_i8m1(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m8_i8m1))) +vint8m1_t vlmul_trunc_i8m1(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m2_i16m1))) +vint16m1_t vlmul_trunc_i16m1(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m4_i16m1))) +vint16m1_t vlmul_trunc_i16m1(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m8_i16m1))) +vint16m1_t vlmul_trunc_i16m1(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m2_i32m1))) +vint32m1_t vlmul_trunc_i32m1(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m4_i32m1))) +vint32m1_t vlmul_trunc_i32m1(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m8_i32m1))) +vint32m1_t vlmul_trunc_i32m1(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i64m2_i64m1))) +vint64m1_t vlmul_trunc_i64m1(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i64m4_i64m1))) +vint64m1_t vlmul_trunc_i64m1(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i64m8_i64m1))) +vint64m1_t vlmul_trunc_i64m1(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m4_i8m2))) +vint8m2_t vlmul_trunc_i8m2(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m8_i8m2))) +vint8m2_t vlmul_trunc_i8m2(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m4_i16m2))) +vint16m2_t vlmul_trunc_i16m2(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m8_i16m2))) +vint16m2_t vlmul_trunc_i16m2(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m4_i32m2))) +vint32m2_t vlmul_trunc_i32m2(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m8_i32m2))) +vint32m2_t vlmul_trunc_i32m2(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i64m4_i64m2))) +vint64m2_t vlmul_trunc_i64m2(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i64m8_i64m2))) +vint64m2_t vlmul_trunc_i64m2(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i8m8_i8m4))) +vint8m4_t vlmul_trunc_i8m4(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i16m8_i16m4))) +vint16m4_t vlmul_trunc_i16m4(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i32m8_i32m4))) +vint32m4_t vlmul_trunc_i32m4(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_i64m8_i64m4))) +vint64m4_t vlmul_trunc_i64m4(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m1_u8mf2))) +vuint8mf2_t vlmul_trunc_u8mf2(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m2_u8mf2))) +vuint8mf2_t vlmul_trunc_u8mf2(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m4_u8mf2))) +vuint8mf2_t vlmul_trunc_u8mf2(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m8_u8mf2))) +vuint8mf2_t vlmul_trunc_u8mf2(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m1_u16mf2))) +vuint16mf2_t vlmul_trunc_u16mf2(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m2_u16mf2))) +vuint16mf2_t vlmul_trunc_u16mf2(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m4_u16mf2))) +vuint16mf2_t vlmul_trunc_u16mf2(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m8_u16mf2))) +vuint16mf2_t vlmul_trunc_u16mf2(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m1_u32mf2))) +vuint32mf2_t vlmul_trunc_u32mf2(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m2_u32mf2))) +vuint32mf2_t vlmul_trunc_u32mf2(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m4_u32mf2))) +vuint32mf2_t vlmul_trunc_u32mf2(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m8_u32mf2))) +vuint32mf2_t vlmul_trunc_u32mf2(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m1_u8mf4))) +vuint8mf4_t vlmul_trunc_u8mf4(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m2_u8mf4))) +vuint8mf4_t vlmul_trunc_u8mf4(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m4_u8mf4))) +vuint8mf4_t vlmul_trunc_u8mf4(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m8_u8mf4))) +vuint8mf4_t vlmul_trunc_u8mf4(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8mf2_u8mf4))) +vuint8mf4_t vlmul_trunc_u8mf4(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m1_u16mf4))) +vuint16mf4_t vlmul_trunc_u16mf4(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m2_u16mf4))) +vuint16mf4_t vlmul_trunc_u16mf4(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m4_u16mf4))) +vuint16mf4_t vlmul_trunc_u16mf4(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m8_u16mf4))) +vuint16mf4_t vlmul_trunc_u16mf4(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16mf2_u16mf4))) +vuint16mf4_t vlmul_trunc_u16mf4(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m1_u8mf8))) +vuint8mf8_t vlmul_trunc_u8mf8(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m2_u8mf8))) +vuint8mf8_t vlmul_trunc_u8mf8(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m4_u8mf8))) +vuint8mf8_t vlmul_trunc_u8mf8(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m8_u8mf8))) +vuint8mf8_t vlmul_trunc_u8mf8(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8mf2_u8mf8))) +vuint8mf8_t vlmul_trunc_u8mf8(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8mf4_u8mf8))) +vuint8mf8_t vlmul_trunc_u8mf8(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m2_u8m1))) +vuint8m1_t vlmul_trunc_u8m1(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m4_u8m1))) +vuint8m1_t vlmul_trunc_u8m1(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m8_u8m1))) +vuint8m1_t vlmul_trunc_u8m1(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m2_u16m1))) +vuint16m1_t vlmul_trunc_u16m1(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m4_u16m1))) +vuint16m1_t vlmul_trunc_u16m1(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m8_u16m1))) +vuint16m1_t vlmul_trunc_u16m1(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m2_u32m1))) +vuint32m1_t vlmul_trunc_u32m1(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m4_u32m1))) +vuint32m1_t vlmul_trunc_u32m1(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m8_u32m1))) +vuint32m1_t vlmul_trunc_u32m1(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u64m2_u64m1))) +vuint64m1_t vlmul_trunc_u64m1(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u64m4_u64m1))) +vuint64m1_t vlmul_trunc_u64m1(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u64m8_u64m1))) +vuint64m1_t vlmul_trunc_u64m1(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m4_u8m2))) +vuint8m2_t vlmul_trunc_u8m2(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m8_u8m2))) +vuint8m2_t vlmul_trunc_u8m2(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m4_u16m2))) +vuint16m2_t vlmul_trunc_u16m2(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m8_u16m2))) +vuint16m2_t vlmul_trunc_u16m2(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m4_u32m2))) +vuint32m2_t vlmul_trunc_u32m2(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m8_u32m2))) +vuint32m2_t vlmul_trunc_u32m2(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u64m4_u64m2))) +vuint64m2_t vlmul_trunc_u64m2(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u64m8_u64m2))) +vuint64m2_t vlmul_trunc_u64m2(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u8m8_u8m4))) +vuint8m4_t vlmul_trunc_u8m4(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u16m8_u16m4))) +vuint16m4_t vlmul_trunc_u16m4(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u32m8_u32m4))) +vuint32m4_t vlmul_trunc_u32m4(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_u64m8_u64m4))) +vuint64m4_t vlmul_trunc_u64m4(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b8))) +vbool8_t vmand(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b4))) +vbool4_t vmand(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b2))) +vbool2_t vmand(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b1))) +vbool1_t vmand(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b16))) +vbool16_t vmand(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b32))) +vbool32_t vmand(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmand_mm_b64))) +vbool64_t vmand(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b8))) +vbool8_t vmandnot(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b4))) +vbool4_t vmandnot(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b2))) +vbool2_t vmandnot(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b1))) +vbool1_t vmandnot(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b16))) +vbool16_t vmandnot(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b32))) +vbool32_t vmandnot(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmandnot_mm_b64))) +vbool64_t vmandnot(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b8))) +vbool8_t vmnand(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b4))) +vbool4_t vmnand(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b2))) +vbool2_t vmnand(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b1))) +vbool1_t vmnand(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b16))) +vbool16_t vmnand(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b32))) +vbool32_t vmnand(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnand_mm_b64))) +vbool64_t vmnand(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b8))) +vbool8_t vmnor(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b4))) +vbool4_t vmnor(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b2))) +vbool2_t vmnor(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b1))) +vbool1_t vmnor(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b16))) +vbool16_t vmnor(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b32))) +vbool32_t vmnor(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmnor_mm_b64))) +vbool64_t vmnor(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b8))) +vbool8_t vmor(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b4))) +vbool4_t vmor(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b2))) +vbool2_t vmor(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b1))) +vbool1_t vmor(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b16))) +vbool16_t vmor(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b32))) +vbool32_t vmor(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmor_mm_b64))) +vbool64_t vmor(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b8))) +vbool8_t vmornot(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b4))) +vbool4_t vmornot(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b2))) +vbool2_t vmornot(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b1))) +vbool1_t vmornot(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b16))) +vbool16_t vmornot(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b32))) +vbool32_t vmornot(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmornot_mm_b64))) +vbool64_t vmornot(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b8))) +vbool8_t vmsbf(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b8_m))) +vbool8_t vmsbf(vbool8_t op0, vbool8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b4))) +vbool4_t vmsbf(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b4_m))) +vbool4_t vmsbf(vbool4_t op0, vbool4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b2))) +vbool2_t vmsbf(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b2_m))) +vbool2_t vmsbf(vbool2_t op0, vbool2_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b1))) +vbool1_t vmsbf(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b1_m))) +vbool1_t vmsbf(vbool1_t op0, vbool1_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b16))) +vbool16_t vmsbf(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b16_m))) +vbool16_t vmsbf(vbool16_t op0, vbool16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b32))) +vbool32_t vmsbf(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b32_m))) +vbool32_t vmsbf(vbool32_t op0, vbool32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b64))) +vbool64_t vmsbf(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsbf_m_b64_m))) +vbool64_t vmsbf(vbool64_t op0, vbool64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b8))) +vbool8_t vmsif(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b8_m))) +vbool8_t vmsif(vbool8_t op0, vbool8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b4))) +vbool4_t vmsif(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b4_m))) +vbool4_t vmsif(vbool4_t op0, vbool4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b2))) +vbool2_t vmsif(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b2_m))) +vbool2_t vmsif(vbool2_t op0, vbool2_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b1))) +vbool1_t vmsif(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b1_m))) +vbool1_t vmsif(vbool1_t op0, vbool1_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b16))) +vbool16_t vmsif(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b16_m))) +vbool16_t vmsif(vbool16_t op0, vbool16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b32))) +vbool32_t vmsif(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b32_m))) +vbool32_t vmsif(vbool32_t op0, vbool32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b64))) +vbool64_t vmsif(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsif_m_b64_m))) +vbool64_t vmsif(vbool64_t op0, vbool64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b8))) +vbool8_t vmsof(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b8_m))) +vbool8_t vmsof(vbool8_t op0, vbool8_t op1, vbool8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b4))) +vbool4_t vmsof(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b4_m))) +vbool4_t vmsof(vbool4_t op0, vbool4_t op1, vbool4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b2))) +vbool2_t vmsof(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b2_m))) +vbool2_t vmsof(vbool2_t op0, vbool2_t op1, vbool2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b1))) +vbool1_t vmsof(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b1_m))) +vbool1_t vmsof(vbool1_t op0, vbool1_t op1, vbool1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b16))) +vbool16_t vmsof(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b16_m))) +vbool16_t vmsof(vbool16_t op0, vbool16_t op1, vbool16_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b32))) +vbool32_t vmsof(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b32_m))) +vbool32_t vmsof(vbool32_t op0, vbool32_t op1, vbool32_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b64))) +vbool64_t vmsof(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmsof_m_b64_m))) +vbool64_t vmsof(vbool64_t op0, vbool64_t op1, vbool64_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b8))) +vbool8_t vmxnor(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b4))) +vbool4_t vmxnor(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b2))) +vbool2_t vmxnor(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b1))) +vbool1_t vmxnor(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b16))) +vbool16_t vmxnor(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b32))) +vbool32_t vmxnor(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxnor_mm_b64))) +vbool64_t vmxnor(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b8))) +vbool8_t vmxor(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b4))) +vbool4_t vmxor(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b2))) +vbool2_t vmxor(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b1))) +vbool1_t vmxor(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b16))) +vbool16_t vmxor(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b32))) +vbool32_t vmxor(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmxor_mm_b64))) +vbool64_t vmxor(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b8))) +unsigned long vpopc(vbool8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b8_m))) +unsigned long vpopc(vbool8_t op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b4))) +unsigned long vpopc(vbool4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b4_m))) +unsigned long vpopc(vbool4_t op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b2))) +unsigned long vpopc(vbool2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b2_m))) +unsigned long vpopc(vbool2_t op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b1))) +unsigned long vpopc(vbool1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b1_m))) +unsigned long vpopc(vbool1_t op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b16))) +unsigned long vpopc(vbool16_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b16_m))) +unsigned long vpopc(vbool16_t op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b32))) +unsigned long vpopc(vbool32_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b32_m))) +unsigned long vpopc(vbool32_t op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b64))) +unsigned long vpopc(vbool64_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vpopc_m_b64_m))) +unsigned long vpopc(vbool64_t op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m1_i16m1))) +vint16m1_t vreinterpret_i16m1(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m2_i16m2))) +vint16m2_t vreinterpret_i16m2(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m4_i16m4))) +vint16m4_t vreinterpret_i16m4(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m8_i16m8))) +vint16m8_t vreinterpret_i16m8(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8mf2_i16mf2))) +vint16mf2_t vreinterpret_i16mf2(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8mf4_i16mf4))) +vint16mf4_t vreinterpret_i16mf4(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m1_i16m1))) +vint16m1_t vreinterpret_i16m1(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m2_i16m2))) +vint16m2_t vreinterpret_i16m2(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m4_i16m4))) +vint16m4_t vreinterpret_i16m4(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m8_i16m8))) +vint16m8_t vreinterpret_i16m8(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32mf2_i16mf2))) +vint16mf2_t vreinterpret_i16mf2(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m1_i16m1))) +vint16m1_t vreinterpret_i16m1(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m2_i16m2))) +vint16m2_t vreinterpret_i16m2(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m4_i16m4))) +vint16m4_t vreinterpret_i16m4(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m8_i16m8))) +vint16m8_t vreinterpret_i16m8(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m1_i32m1))) +vint32m1_t vreinterpret_i32m1(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m2_i32m2))) +vint32m2_t vreinterpret_i32m2(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m4_i32m4))) +vint32m4_t vreinterpret_i32m4(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m8_i32m8))) +vint32m8_t vreinterpret_i32m8(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8mf2_i32mf2))) +vint32mf2_t vreinterpret_i32mf2(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m1_i32m1))) +vint32m1_t vreinterpret_i32m1(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m2_i32m2))) +vint32m2_t vreinterpret_i32m2(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m4_i32m4))) +vint32m4_t vreinterpret_i32m4(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m8_i32m8))) +vint32m8_t vreinterpret_i32m8(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf2_i32mf2))) +vint32mf2_t vreinterpret_i32mf2(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m1_i32m1))) +vint32m1_t vreinterpret_i32m1(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m2_i32m2))) +vint32m2_t vreinterpret_i32m2(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m4_i32m4))) +vint32m4_t vreinterpret_i32m4(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m8_i32m8))) +vint32m8_t vreinterpret_i32m8(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m1_i64m1))) +vint64m1_t vreinterpret_i64m1(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m2_i64m2))) +vint64m2_t vreinterpret_i64m2(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m4_i64m4))) +vint64m4_t vreinterpret_i64m4(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m8_i64m8))) +vint64m8_t vreinterpret_i64m8(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m1_i64m1))) +vint64m1_t vreinterpret_i64m1(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m2_i64m2))) +vint64m2_t vreinterpret_i64m2(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m4_i64m4))) +vint64m4_t vreinterpret_i64m4(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m8_i64m8))) +vint64m8_t vreinterpret_i64m8(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m1_i64m1))) +vint64m1_t vreinterpret_i64m1(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m2_i64m2))) +vint64m2_t vreinterpret_i64m2(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m4_i64m4))) +vint64m4_t vreinterpret_i64m4(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m8_i64m8))) +vint64m8_t vreinterpret_i64m8(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m1_i8m1))) +vint8m1_t vreinterpret_i8m1(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m2_i8m2))) +vint8m2_t vreinterpret_i8m2(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m4_i8m4))) +vint8m4_t vreinterpret_i8m4(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m8_i8m8))) +vint8m8_t vreinterpret_i8m8(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf2_i8mf2))) +vint8mf2_t vreinterpret_i8mf2(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf4_i8mf4))) +vint8mf4_t vreinterpret_i8mf4(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m1_i8m1))) +vint8m1_t vreinterpret_i8m1(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m2_i8m2))) +vint8m2_t vreinterpret_i8m2(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m4_i8m4))) +vint8m4_t vreinterpret_i8m4(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m8_i8m8))) +vint8m8_t vreinterpret_i8m8(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32mf2_i8mf2))) +vint8mf2_t vreinterpret_i8mf2(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m1_i8m1))) +vint8m1_t vreinterpret_i8m1(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m2_i8m2))) +vint8m2_t vreinterpret_i8m2(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m4_i8m4))) +vint8m4_t vreinterpret_i8m4(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m8_i8m8))) +vint8m8_t vreinterpret_i8m8(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m1_i8m1))) +vint8m1_t vreinterpret_i8m1(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m2_i8m2))) +vint8m2_t vreinterpret_i8m2(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m4_i8m4))) +vint8m4_t vreinterpret_i8m4(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m8_i8m8))) +vint8m8_t vreinterpret_i8m8(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8mf2_i8mf2))) +vint8mf2_t vreinterpret_i8mf2(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8mf4_i8mf4))) +vint8mf4_t vreinterpret_i8mf4(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8mf8_i8mf8))) +vint8mf8_t vreinterpret_i8mf8(vuint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m1_i16m1))) +vint16m1_t vreinterpret_i16m1(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m2_i16m2))) +vint16m2_t vreinterpret_i16m2(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m4_i16m4))) +vint16m4_t vreinterpret_i16m4(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m8_i16m8))) +vint16m8_t vreinterpret_i16m8(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf2_i16mf2))) +vint16mf2_t vreinterpret_i16mf2(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf4_i16mf4))) +vint16mf4_t vreinterpret_i16mf4(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m1_i32m1))) +vint32m1_t vreinterpret_i32m1(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m2_i32m2))) +vint32m2_t vreinterpret_i32m2(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m4_i32m4))) +vint32m4_t vreinterpret_i32m4(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m8_i32m8))) +vint32m8_t vreinterpret_i32m8(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32mf2_i32mf2))) +vint32mf2_t vreinterpret_i32mf2(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m1_i64m1))) +vint64m1_t vreinterpret_i64m1(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m2_i64m2))) +vint64m2_t vreinterpret_i64m2(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m4_i64m4))) +vint64m4_t vreinterpret_i64m4(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m8_i64m8))) +vint64m8_t vreinterpret_i64m8(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m1_u16m1))) +vuint16m1_t vreinterpret_u16m1(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m2_u16m2))) +vuint16m2_t vreinterpret_u16m2(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m4_u16m4))) +vuint16m4_t vreinterpret_u16m4(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m8_u16m8))) +vuint16m8_t vreinterpret_u16m8(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8mf2_u16mf2))) +vuint16mf2_t vreinterpret_u16mf2(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8mf4_u16mf4))) +vuint16mf4_t vreinterpret_u16mf4(vuint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m1_u16m1))) +vuint16m1_t vreinterpret_u16m1(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m2_u16m2))) +vuint16m2_t vreinterpret_u16m2(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m4_u16m4))) +vuint16m4_t vreinterpret_u16m4(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m8_u16m8))) +vuint16m8_t vreinterpret_u16m8(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32mf2_u16mf2))) +vuint16mf2_t vreinterpret_u16mf2(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m1_u16m1))) +vuint16m1_t vreinterpret_u16m1(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m2_u16m2))) +vuint16m2_t vreinterpret_u16m2(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m4_u16m4))) +vuint16m4_t vreinterpret_u16m4(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m8_u16m8))) +vuint16m8_t vreinterpret_u16m8(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m1_u32m1))) +vuint32m1_t vreinterpret_u32m1(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m2_u32m2))) +vuint32m2_t vreinterpret_u32m2(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m4_u32m4))) +vuint32m4_t vreinterpret_u32m4(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m8_u32m8))) +vuint32m8_t vreinterpret_u32m8(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8mf2_u32mf2))) +vuint32mf2_t vreinterpret_u32mf2(vuint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m1_u32m1))) +vuint32m1_t vreinterpret_u32m1(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m2_u32m2))) +vuint32m2_t vreinterpret_u32m2(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m4_u32m4))) +vuint32m4_t vreinterpret_u32m4(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m8_u32m8))) +vuint32m8_t vreinterpret_u32m8(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf2_u32mf2))) +vuint32mf2_t vreinterpret_u32mf2(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m1_u32m1))) +vuint32m1_t vreinterpret_u32m1(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m2_u32m2))) +vuint32m2_t vreinterpret_u32m2(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m4_u32m4))) +vuint32m4_t vreinterpret_u32m4(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m8_u32m8))) +vuint32m8_t vreinterpret_u32m8(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m1_u64m1))) +vuint64m1_t vreinterpret_u64m1(vuint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m2_u64m2))) +vuint64m2_t vreinterpret_u64m2(vuint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m4_u64m4))) +vuint64m4_t vreinterpret_u64m4(vuint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u8m8_u64m8))) +vuint64m8_t vreinterpret_u64m8(vuint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m1_u64m1))) +vuint64m1_t vreinterpret_u64m1(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m2_u64m2))) +vuint64m2_t vreinterpret_u64m2(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m4_u64m4))) +vuint64m4_t vreinterpret_u64m4(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m8_u64m8))) +vuint64m8_t vreinterpret_u64m8(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m1_u64m1))) +vuint64m1_t vreinterpret_u64m1(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m2_u64m2))) +vuint64m2_t vreinterpret_u64m2(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m4_u64m4))) +vuint64m4_t vreinterpret_u64m4(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m8_u64m8))) +vuint64m8_t vreinterpret_u64m8(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m1_u8m1))) +vuint8m1_t vreinterpret_u8m1(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m2_u8m2))) +vuint8m2_t vreinterpret_u8m2(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m4_u8m4))) +vuint8m4_t vreinterpret_u8m4(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m8_u8m8))) +vuint8m8_t vreinterpret_u8m8(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf2_u8mf2))) +vuint8mf2_t vreinterpret_u8mf2(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf4_u8mf4))) +vuint8mf4_t vreinterpret_u8mf4(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m1_u8m1))) +vuint8m1_t vreinterpret_u8m1(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m2_u8m2))) +vuint8m2_t vreinterpret_u8m2(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m4_u8m4))) +vuint8m4_t vreinterpret_u8m4(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m8_u8m8))) +vuint8m8_t vreinterpret_u8m8(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32mf2_u8mf2))) +vuint8mf2_t vreinterpret_u8mf2(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m1_u8m1))) +vuint8m1_t vreinterpret_u8m1(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m2_u8m2))) +vuint8m2_t vreinterpret_u8m2(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m4_u8m4))) +vuint8m4_t vreinterpret_u8m4(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m8_u8m8))) +vuint8m8_t vreinterpret_u8m8(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m1_u8m1))) +vuint8m1_t vreinterpret_u8m1(vint8m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m2_u8m2))) +vuint8m2_t vreinterpret_u8m2(vint8m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m4_u8m4))) +vuint8m4_t vreinterpret_u8m4(vint8m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8m8_u8m8))) +vuint8m8_t vreinterpret_u8m8(vint8m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8mf2_u8mf2))) +vuint8mf2_t vreinterpret_u8mf2(vint8mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8mf4_u8mf4))) +vuint8mf4_t vreinterpret_u8mf4(vint8mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i8mf8_u8mf8))) +vuint8mf8_t vreinterpret_u8mf8(vint8mf8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m1_u16m1))) +vuint16m1_t vreinterpret_u16m1(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m2_u16m2))) +vuint16m2_t vreinterpret_u16m2(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m4_u16m4))) +vuint16m4_t vreinterpret_u16m4(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m8_u16m8))) +vuint16m8_t vreinterpret_u16m8(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf2_u16mf2))) +vuint16mf2_t vreinterpret_u16mf2(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf4_u16mf4))) +vuint16mf4_t vreinterpret_u16mf4(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m1_u32m1))) +vuint32m1_t vreinterpret_u32m1(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m2_u32m2))) +vuint32m2_t vreinterpret_u32m2(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m4_u32m4))) +vuint32m4_t vreinterpret_u32m4(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m8_u32m8))) +vuint32m8_t vreinterpret_u32m8(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32mf2_u32mf2))) +vuint32mf2_t vreinterpret_u32mf2(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m1_u64m1))) +vuint64m1_t vreinterpret_u64m1(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m2_u64m2))) +vuint64m2_t vreinterpret_u64m2(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m4_u64m4))) +vuint64m4_t vreinterpret_u64m4(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m8_u64m8))) +vuint64m8_t vreinterpret_u64m8(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b8))) +void vse1(uint8_t * op0, vbool8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b4))) +void vse1(uint8_t * op0, vbool4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b2))) +void vse1(uint8_t * op0, vbool2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b1))) +void vse1(uint8_t * op0, vbool1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b16))) +void vse1(uint8_t * op0, vbool16_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b32))) +void vse1(uint8_t * op0, vbool32_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse1_v_b64))) +void vse1(uint8_t * op0, vbool64_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16mf4))) +vint16mf4_t vsext_vf2(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16mf4_m))) +vint16mf4_t vsext_vf2(vbool64_t op0, vint16mf4_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16mf2))) +vint16mf2_t vsext_vf2(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16mf2_m))) +vint16mf2_t vsext_vf2(vbool32_t op0, vint16mf2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m1))) +vint16m1_t vsext_vf2(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m1_m))) +vint16m1_t vsext_vf2(vbool16_t op0, vint16m1_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m2))) +vint16m2_t vsext_vf2(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m2_m))) +vint16m2_t vsext_vf2(vbool8_t op0, vint16m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m4))) +vint16m4_t vsext_vf2(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m4_m))) +vint16m4_t vsext_vf2(vbool4_t op0, vint16m4_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m8))) +vint16m8_t vsext_vf2(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i16m8_m))) +vint16m8_t vsext_vf2(vbool2_t op0, vint16m8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32mf2))) +vint32mf2_t vsext_vf2(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32mf2_m))) +vint32mf2_t vsext_vf2(vbool64_t op0, vint32mf2_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m1))) +vint32m1_t vsext_vf2(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m1_m))) +vint32m1_t vsext_vf2(vbool32_t op0, vint32m1_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m2))) +vint32m2_t vsext_vf2(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m2_m))) +vint32m2_t vsext_vf2(vbool16_t op0, vint32m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m4))) +vint32m4_t vsext_vf2(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m4_m))) +vint32m4_t vsext_vf2(vbool8_t op0, vint32m4_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m8))) +vint32m8_t vsext_vf2(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i32m8_m))) +vint32m8_t vsext_vf2(vbool4_t op0, vint32m8_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m1))) +vint64m1_t vsext_vf2(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m1_m))) +vint64m1_t vsext_vf2(vbool64_t op0, vint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m2))) +vint64m2_t vsext_vf2(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m2_m))) +vint64m2_t vsext_vf2(vbool32_t op0, vint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m4))) +vint64m4_t vsext_vf2(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m4_m))) +vint64m4_t vsext_vf2(vbool16_t op0, vint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m8))) +vint64m8_t vsext_vf2(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf2_i64m8_m))) +vint64m8_t vsext_vf2(vbool8_t op0, vint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32mf2))) +vint32mf2_t vsext_vf4(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32mf2_m))) +vint32mf2_t vsext_vf4(vbool64_t op0, vint32mf2_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m1))) +vint32m1_t vsext_vf4(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m1_m))) +vint32m1_t vsext_vf4(vbool32_t op0, vint32m1_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m2))) +vint32m2_t vsext_vf4(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m2_m))) +vint32m2_t vsext_vf4(vbool16_t op0, vint32m2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m4))) +vint32m4_t vsext_vf4(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m4_m))) +vint32m4_t vsext_vf4(vbool8_t op0, vint32m4_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m8))) +vint32m8_t vsext_vf4(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i32m8_m))) +vint32m8_t vsext_vf4(vbool4_t op0, vint32m8_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m1))) +vint64m1_t vsext_vf4(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m1_m))) +vint64m1_t vsext_vf4(vbool64_t op0, vint64m1_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m2))) +vint64m2_t vsext_vf4(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m2_m))) +vint64m2_t vsext_vf4(vbool32_t op0, vint64m2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m4))) +vint64m4_t vsext_vf4(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m4_m))) +vint64m4_t vsext_vf4(vbool16_t op0, vint64m4_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m8))) +vint64m8_t vsext_vf4(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf4_i64m8_m))) +vint64m8_t vsext_vf4(vbool8_t op0, vint64m8_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m1))) +vint64m1_t vsext_vf8(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m1_m))) +vint64m1_t vsext_vf8(vbool64_t op0, vint64m1_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m2))) +vint64m2_t vsext_vf8(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m2_m))) +vint64m2_t vsext_vf8(vbool32_t op0, vint64m2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m4))) +vint64m4_t vsext_vf8(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m4_m))) +vint64m4_t vsext_vf8(vbool16_t op0, vint64m4_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m8))) +vint64m8_t vsext_vf8(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsext_vf8_i64m8_m))) +vint64m8_t vsext_vf8(vbool8_t op0, vint64m8_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16mf4))) +vuint16mf4_t vzext_vf2(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16mf4_m))) +vuint16mf4_t vzext_vf2(vbool64_t op0, vuint16mf4_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16mf2))) +vuint16mf2_t vzext_vf2(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16mf2_m))) +vuint16mf2_t vzext_vf2(vbool32_t op0, vuint16mf2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m1))) +vuint16m1_t vzext_vf2(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m1_m))) +vuint16m1_t vzext_vf2(vbool16_t op0, vuint16m1_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m2))) +vuint16m2_t vzext_vf2(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m2_m))) +vuint16m2_t vzext_vf2(vbool8_t op0, vuint16m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m4))) +vuint16m4_t vzext_vf2(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m4_m))) +vuint16m4_t vzext_vf2(vbool4_t op0, vuint16m4_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m8))) +vuint16m8_t vzext_vf2(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u16m8_m))) +vuint16m8_t vzext_vf2(vbool2_t op0, vuint16m8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32mf2))) +vuint32mf2_t vzext_vf2(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32mf2_m))) +vuint32mf2_t vzext_vf2(vbool64_t op0, vuint32mf2_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m1))) +vuint32m1_t vzext_vf2(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m1_m))) +vuint32m1_t vzext_vf2(vbool32_t op0, vuint32m1_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m2))) +vuint32m2_t vzext_vf2(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m2_m))) +vuint32m2_t vzext_vf2(vbool16_t op0, vuint32m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m4))) +vuint32m4_t vzext_vf2(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m4_m))) +vuint32m4_t vzext_vf2(vbool8_t op0, vuint32m4_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m8))) +vuint32m8_t vzext_vf2(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u32m8_m))) +vuint32m8_t vzext_vf2(vbool4_t op0, vuint32m8_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m1))) +vuint64m1_t vzext_vf2(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m1_m))) +vuint64m1_t vzext_vf2(vbool64_t op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m2))) +vuint64m2_t vzext_vf2(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m2_m))) +vuint64m2_t vzext_vf2(vbool32_t op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m4))) +vuint64m4_t vzext_vf2(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m4_m))) +vuint64m4_t vzext_vf2(vbool16_t op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m8))) +vuint64m8_t vzext_vf2(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf2_u64m8_m))) +vuint64m8_t vzext_vf2(vbool8_t op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32mf2))) +vuint32mf2_t vzext_vf4(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32mf2_m))) +vuint32mf2_t vzext_vf4(vbool64_t op0, vuint32mf2_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m1))) +vuint32m1_t vzext_vf4(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m1_m))) +vuint32m1_t vzext_vf4(vbool32_t op0, vuint32m1_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m2))) +vuint32m2_t vzext_vf4(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m2_m))) +vuint32m2_t vzext_vf4(vbool16_t op0, vuint32m2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m4))) +vuint32m4_t vzext_vf4(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m4_m))) +vuint32m4_t vzext_vf4(vbool8_t op0, vuint32m4_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m8))) +vuint32m8_t vzext_vf4(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u32m8_m))) +vuint32m8_t vzext_vf4(vbool4_t op0, vuint32m8_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m1))) +vuint64m1_t vzext_vf4(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m1_m))) +vuint64m1_t vzext_vf4(vbool64_t op0, vuint64m1_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m2))) +vuint64m2_t vzext_vf4(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m2_m))) +vuint64m2_t vzext_vf4(vbool32_t op0, vuint64m2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m4))) +vuint64m4_t vzext_vf4(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m4_m))) +vuint64m4_t vzext_vf4(vbool16_t op0, vuint64m4_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m8))) +vuint64m8_t vzext_vf4(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf4_u64m8_m))) +vuint64m8_t vzext_vf4(vbool8_t op0, vuint64m8_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m1))) +vuint64m1_t vzext_vf8(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m1_m))) +vuint64m1_t vzext_vf8(vbool64_t op0, vuint64m1_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m2))) +vuint64m2_t vzext_vf8(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m2_m))) +vuint64m2_t vzext_vf8(vbool32_t op0, vuint64m2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m4))) +vuint64m4_t vzext_vf8(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m4_m))) +vuint64m4_t vzext_vf8(vbool16_t op0, vuint64m4_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m8))) +vuint64m8_t vzext_vf8(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vzext_vf8_u64m8_m))) +vuint64m8_t vzext_vf8(vbool8_t op0, vuint64m8_t op1, vuint8m1_t op2, size_t op3); + +#if defined(__riscv_f) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m1))) +void vsse32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m1_m))) +void vsse32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m2))) +void vsse32(float * op0, ptrdiff_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m2_m))) +void vsse32(vbool16_t op0, float * op1, ptrdiff_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m4))) +void vsse32(float * op0, ptrdiff_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m4_m))) +void vsse32(vbool8_t op0, float * op1, ptrdiff_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m8))) +void vsse32(float * op0, ptrdiff_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32m8_m))) +void vsse32(vbool4_t op0, float * op1, ptrdiff_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32mf2))) +void vsse32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse32_v_f32mf2_m))) +void vsse32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m1))) +vfloat32m1_t vluxei8(const float * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m1_m))) +vfloat32m1_t vluxei8(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m2))) +vfloat32m2_t vluxei8(const float * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m2_m))) +vfloat32m2_t vluxei8(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m4))) +vfloat32m4_t vluxei8(const float * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m4_m))) +vfloat32m4_t vluxei8(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m8))) +vfloat32m8_t vluxei8(const float * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32m8_m))) +vfloat32m8_t vluxei8(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32mf2))) +vfloat32mf2_t vluxei8(const float * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f32mf2_m))) +vfloat32mf2_t vluxei8(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m1))) +vfloat32m1_t vluxei16(const float * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m1_m))) +vfloat32m1_t vluxei16(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m2))) +vfloat32m2_t vluxei16(const float * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m2_m))) +vfloat32m2_t vluxei16(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m4))) +vfloat32m4_t vluxei16(const float * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m4_m))) +vfloat32m4_t vluxei16(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m8))) +vfloat32m8_t vluxei16(const float * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32m8_m))) +vfloat32m8_t vluxei16(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32mf2))) +vfloat32mf2_t vluxei16(const float * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f32mf2_m))) +vfloat32mf2_t vluxei16(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m1))) +vfloat32m1_t vluxei32(const float * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m1_m))) +vfloat32m1_t vluxei32(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m2))) +vfloat32m2_t vluxei32(const float * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m2_m))) +vfloat32m2_t vluxei32(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m4))) +vfloat32m4_t vluxei32(const float * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m4_m))) +vfloat32m4_t vluxei32(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m8))) +vfloat32m8_t vluxei32(const float * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32m8_m))) +vfloat32m8_t vluxei32(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32mf2))) +vfloat32mf2_t vluxei32(const float * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f32mf2_m))) +vfloat32mf2_t vluxei32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32m1))) +vfloat32m1_t vluxei64(const float * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32m1_m))) +vfloat32m1_t vluxei64(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32m2))) +vfloat32m2_t vluxei64(const float * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32m2_m))) +vfloat32m2_t vluxei64(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32m4))) +vfloat32m4_t vluxei64(const float * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32m4_m))) +vfloat32m4_t vluxei64(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32mf2))) +vfloat32mf2_t vluxei64(const float * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f32mf2_m))) +vfloat32mf2_t vluxei64(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m1))) +vfloat32m1_t vloxei8(const float * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m1_m))) +vfloat32m1_t vloxei8(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m2))) +vfloat32m2_t vloxei8(const float * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m2_m))) +vfloat32m2_t vloxei8(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m4))) +vfloat32m4_t vloxei8(const float * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m4_m))) +vfloat32m4_t vloxei8(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m8))) +vfloat32m8_t vloxei8(const float * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32m8_m))) +vfloat32m8_t vloxei8(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32mf2))) +vfloat32mf2_t vloxei8(const float * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f32mf2_m))) +vfloat32mf2_t vloxei8(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m1))) +vfloat32m1_t vloxei16(const float * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m1_m))) +vfloat32m1_t vloxei16(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m2))) +vfloat32m2_t vloxei16(const float * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m2_m))) +vfloat32m2_t vloxei16(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m4))) +vfloat32m4_t vloxei16(const float * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m4_m))) +vfloat32m4_t vloxei16(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m8))) +vfloat32m8_t vloxei16(const float * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32m8_m))) +vfloat32m8_t vloxei16(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32mf2))) +vfloat32mf2_t vloxei16(const float * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f32mf2_m))) +vfloat32mf2_t vloxei16(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m1))) +vfloat32m1_t vloxei32(const float * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m1_m))) +vfloat32m1_t vloxei32(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m2))) +vfloat32m2_t vloxei32(const float * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m2_m))) +vfloat32m2_t vloxei32(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m4))) +vfloat32m4_t vloxei32(const float * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m4_m))) +vfloat32m4_t vloxei32(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m8))) +vfloat32m8_t vloxei32(const float * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32m8_m))) +vfloat32m8_t vloxei32(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32mf2))) +vfloat32mf2_t vloxei32(const float * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f32mf2_m))) +vfloat32mf2_t vloxei32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32m1))) +vfloat32m1_t vloxei64(const float * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32m1_m))) +vfloat32m1_t vloxei64(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32m2))) +vfloat32m2_t vloxei64(const float * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32m2_m))) +vfloat32m2_t vloxei64(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32m4))) +vfloat32m4_t vloxei64(const float * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32m4_m))) +vfloat32m4_t vloxei64(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32mf2))) +vfloat32mf2_t vloxei64(const float * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f32mf2_m))) +vfloat32mf2_t vloxei64(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f32m1))) +vfloat32m1_t vmv_v(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f32m2))) +vfloat32m2_t vmv_v(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f32m4))) +vfloat32m4_t vmv_v(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f32m8))) +vfloat32m8_t vmv_v(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f32mf2))) +vfloat32mf2_t vmv_v(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m1))) +vfloat32m1_t vfadd(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m1_m))) +vfloat32m1_t vfadd(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m2))) +vfloat32m2_t vfadd(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m2_m))) +vfloat32m2_t vfadd(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m4))) +vfloat32m4_t vfadd(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m4_m))) +vfloat32m4_t vfadd(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m8))) +vfloat32m8_t vfadd(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32m8_m))) +vfloat32m8_t vfadd(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32mf2))) +vfloat32mf2_t vfadd(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f32mf2_m))) +vfloat32mf2_t vfadd(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m1))) +vfloat32m1_t vfadd(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m1_m))) +vfloat32m1_t vfadd(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m2))) +vfloat32m2_t vfadd(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m2_m))) +vfloat32m2_t vfadd(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m4))) +vfloat32m4_t vfadd(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m4_m))) +vfloat32m4_t vfadd(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m8))) +vfloat32m8_t vfadd(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32m8_m))) +vfloat32m8_t vfadd(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32mf2))) +vfloat32mf2_t vfadd(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f32mf2_m))) +vfloat32mf2_t vfadd(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m1))) +vfloat32m1_t vfsub(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m1_m))) +vfloat32m1_t vfsub(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m2))) +vfloat32m2_t vfsub(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m2_m))) +vfloat32m2_t vfsub(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m4))) +vfloat32m4_t vfsub(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m4_m))) +vfloat32m4_t vfsub(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m8))) +vfloat32m8_t vfsub(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32m8_m))) +vfloat32m8_t vfsub(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32mf2))) +vfloat32mf2_t vfsub(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f32mf2_m))) +vfloat32mf2_t vfsub(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m1))) +vfloat32m1_t vfsub(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m1_m))) +vfloat32m1_t vfsub(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m2))) +vfloat32m2_t vfsub(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m2_m))) +vfloat32m2_t vfsub(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m4))) +vfloat32m4_t vfsub(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m4_m))) +vfloat32m4_t vfsub(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m8))) +vfloat32m8_t vfsub(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32m8_m))) +vfloat32m8_t vfsub(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32mf2))) +vfloat32mf2_t vfsub(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f32mf2_m))) +vfloat32mf2_t vfsub(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m1))) +vfloat32m1_t vfrsub(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m1_m))) +vfloat32m1_t vfrsub(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m2))) +vfloat32m2_t vfrsub(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m2_m))) +vfloat32m2_t vfrsub(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m4))) +vfloat32m4_t vfrsub(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m4_m))) +vfloat32m4_t vfrsub(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m8))) +vfloat32m8_t vfrsub(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32m8_m))) +vfloat32m8_t vfrsub(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32mf2))) +vfloat32mf2_t vfrsub(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f32mf2_m))) +vfloat32mf2_t vfrsub(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m1))) +vfloat32m1_t vfmul(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m1_m))) +vfloat32m1_t vfmul(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m2))) +vfloat32m2_t vfmul(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m2_m))) +vfloat32m2_t vfmul(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m4))) +vfloat32m4_t vfmul(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m4_m))) +vfloat32m4_t vfmul(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m8))) +vfloat32m8_t vfmul(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32m8_m))) +vfloat32m8_t vfmul(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32mf2))) +vfloat32mf2_t vfmul(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f32mf2_m))) +vfloat32mf2_t vfmul(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m1))) +vfloat32m1_t vfmul(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m1_m))) +vfloat32m1_t vfmul(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m2))) +vfloat32m2_t vfmul(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m2_m))) +vfloat32m2_t vfmul(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m4))) +vfloat32m4_t vfmul(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m4_m))) +vfloat32m4_t vfmul(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m8))) +vfloat32m8_t vfmul(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32m8_m))) +vfloat32m8_t vfmul(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32mf2))) +vfloat32mf2_t vfmul(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f32mf2_m))) +vfloat32mf2_t vfmul(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m1))) +vfloat32m1_t vfdiv(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m1_m))) +vfloat32m1_t vfdiv(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m2))) +vfloat32m2_t vfdiv(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m2_m))) +vfloat32m2_t vfdiv(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m4))) +vfloat32m4_t vfdiv(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m4_m))) +vfloat32m4_t vfdiv(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m8))) +vfloat32m8_t vfdiv(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32m8_m))) +vfloat32m8_t vfdiv(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32mf2))) +vfloat32mf2_t vfdiv(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f32mf2_m))) +vfloat32mf2_t vfdiv(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m1))) +vfloat32m1_t vfdiv(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m1_m))) +vfloat32m1_t vfdiv(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m2))) +vfloat32m2_t vfdiv(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m2_m))) +vfloat32m2_t vfdiv(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m4))) +vfloat32m4_t vfdiv(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m4_m))) +vfloat32m4_t vfdiv(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m8))) +vfloat32m8_t vfdiv(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32m8_m))) +vfloat32m8_t vfdiv(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32mf2))) +vfloat32mf2_t vfdiv(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f32mf2_m))) +vfloat32mf2_t vfdiv(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m1))) +vfloat32m1_t vfrdiv(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m1_m))) +vfloat32m1_t vfrdiv(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m2))) +vfloat32m2_t vfrdiv(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m2_m))) +vfloat32m2_t vfrdiv(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m4))) +vfloat32m4_t vfrdiv(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m4_m))) +vfloat32m4_t vfrdiv(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m8))) +vfloat32m8_t vfrdiv(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32m8_m))) +vfloat32m8_t vfrdiv(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32mf2))) +vfloat32mf2_t vfrdiv(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f32mf2_m))) +vfloat32mf2_t vfrdiv(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m1))) +vfloat32m1_t vfmacc(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m1_m))) +vfloat32m1_t vfmacc(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m2))) +vfloat32m2_t vfmacc(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m2_m))) +vfloat32m2_t vfmacc(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m4))) +vfloat32m4_t vfmacc(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m4_m))) +vfloat32m4_t vfmacc(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m8))) +vfloat32m8_t vfmacc(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32m8_m))) +vfloat32m8_t vfmacc(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32mf2))) +vfloat32mf2_t vfmacc(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f32mf2_m))) +vfloat32mf2_t vfmacc(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m1))) +vfloat32m1_t vfmacc(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m1_m))) +vfloat32m1_t vfmacc(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m2))) +vfloat32m2_t vfmacc(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m2_m))) +vfloat32m2_t vfmacc(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m4))) +vfloat32m4_t vfmacc(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m4_m))) +vfloat32m4_t vfmacc(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m8))) +vfloat32m8_t vfmacc(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32m8_m))) +vfloat32m8_t vfmacc(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32mf2))) +vfloat32mf2_t vfmacc(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f32mf2_m))) +vfloat32mf2_t vfmacc(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m1))) +vfloat32m1_t vfnmacc(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m1_m))) +vfloat32m1_t vfnmacc(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m2))) +vfloat32m2_t vfnmacc(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m2_m))) +vfloat32m2_t vfnmacc(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m4))) +vfloat32m4_t vfnmacc(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m4_m))) +vfloat32m4_t vfnmacc(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m8))) +vfloat32m8_t vfnmacc(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32m8_m))) +vfloat32m8_t vfnmacc(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32mf2))) +vfloat32mf2_t vfnmacc(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f32mf2_m))) +vfloat32mf2_t vfnmacc(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m1))) +vfloat32m1_t vfnmacc(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m1_m))) +vfloat32m1_t vfnmacc(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m2))) +vfloat32m2_t vfnmacc(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m2_m))) +vfloat32m2_t vfnmacc(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m4))) +vfloat32m4_t vfnmacc(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m4_m))) +vfloat32m4_t vfnmacc(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m8))) +vfloat32m8_t vfnmacc(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32m8_m))) +vfloat32m8_t vfnmacc(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32mf2))) +vfloat32mf2_t vfnmacc(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f32mf2_m))) +vfloat32mf2_t vfnmacc(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m1))) +vfloat32m1_t vfmsac(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m1_m))) +vfloat32m1_t vfmsac(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m2))) +vfloat32m2_t vfmsac(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m2_m))) +vfloat32m2_t vfmsac(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m4))) +vfloat32m4_t vfmsac(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m4_m))) +vfloat32m4_t vfmsac(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m8))) +vfloat32m8_t vfmsac(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32m8_m))) +vfloat32m8_t vfmsac(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32mf2))) +vfloat32mf2_t vfmsac(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f32mf2_m))) +vfloat32mf2_t vfmsac(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m1))) +vfloat32m1_t vfmsac(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m1_m))) +vfloat32m1_t vfmsac(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m2))) +vfloat32m2_t vfmsac(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m2_m))) +vfloat32m2_t vfmsac(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m4))) +vfloat32m4_t vfmsac(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m4_m))) +vfloat32m4_t vfmsac(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m8))) +vfloat32m8_t vfmsac(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32m8_m))) +vfloat32m8_t vfmsac(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32mf2))) +vfloat32mf2_t vfmsac(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f32mf2_m))) +vfloat32mf2_t vfmsac(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m1))) +vfloat32m1_t vfnmsac(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m1_m))) +vfloat32m1_t vfnmsac(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m2))) +vfloat32m2_t vfnmsac(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m2_m))) +vfloat32m2_t vfnmsac(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m4))) +vfloat32m4_t vfnmsac(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m4_m))) +vfloat32m4_t vfnmsac(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m8))) +vfloat32m8_t vfnmsac(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32m8_m))) +vfloat32m8_t vfnmsac(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32mf2))) +vfloat32mf2_t vfnmsac(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f32mf2_m))) +vfloat32mf2_t vfnmsac(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m1))) +vfloat32m1_t vfnmsac(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m1_m))) +vfloat32m1_t vfnmsac(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m2))) +vfloat32m2_t vfnmsac(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m2_m))) +vfloat32m2_t vfnmsac(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m4))) +vfloat32m4_t vfnmsac(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m4_m))) +vfloat32m4_t vfnmsac(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m8))) +vfloat32m8_t vfnmsac(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32m8_m))) +vfloat32m8_t vfnmsac(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32mf2))) +vfloat32mf2_t vfnmsac(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f32mf2_m))) +vfloat32mf2_t vfnmsac(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m1))) +vfloat32m1_t vfmadd(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m1_m))) +vfloat32m1_t vfmadd(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m2))) +vfloat32m2_t vfmadd(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m2_m))) +vfloat32m2_t vfmadd(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m4))) +vfloat32m4_t vfmadd(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m4_m))) +vfloat32m4_t vfmadd(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m8))) +vfloat32m8_t vfmadd(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32m8_m))) +vfloat32m8_t vfmadd(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32mf2))) +vfloat32mf2_t vfmadd(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f32mf2_m))) +vfloat32mf2_t vfmadd(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m1))) +vfloat32m1_t vfmadd(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m1_m))) +vfloat32m1_t vfmadd(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m2))) +vfloat32m2_t vfmadd(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m2_m))) +vfloat32m2_t vfmadd(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m4))) +vfloat32m4_t vfmadd(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m4_m))) +vfloat32m4_t vfmadd(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m8))) +vfloat32m8_t vfmadd(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32m8_m))) +vfloat32m8_t vfmadd(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32mf2))) +vfloat32mf2_t vfmadd(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f32mf2_m))) +vfloat32mf2_t vfmadd(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m1))) +vfloat32m1_t vfnmadd(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m1_m))) +vfloat32m1_t vfnmadd(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m2))) +vfloat32m2_t vfnmadd(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m2_m))) +vfloat32m2_t vfnmadd(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m4))) +vfloat32m4_t vfnmadd(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m4_m))) +vfloat32m4_t vfnmadd(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m8))) +vfloat32m8_t vfnmadd(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32m8_m))) +vfloat32m8_t vfnmadd(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32mf2))) +vfloat32mf2_t vfnmadd(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f32mf2_m))) +vfloat32mf2_t vfnmadd(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m1))) +vfloat32m1_t vfnmadd(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m1_m))) +vfloat32m1_t vfnmadd(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m2))) +vfloat32m2_t vfnmadd(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m2_m))) +vfloat32m2_t vfnmadd(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m4))) +vfloat32m4_t vfnmadd(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m4_m))) +vfloat32m4_t vfnmadd(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m8))) +vfloat32m8_t vfnmadd(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32m8_m))) +vfloat32m8_t vfnmadd(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32mf2))) +vfloat32mf2_t vfnmadd(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f32mf2_m))) +vfloat32mf2_t vfnmadd(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m1))) +vfloat32m1_t vfmsub(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m1_m))) +vfloat32m1_t vfmsub(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m2))) +vfloat32m2_t vfmsub(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m2_m))) +vfloat32m2_t vfmsub(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m4))) +vfloat32m4_t vfmsub(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m4_m))) +vfloat32m4_t vfmsub(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m8))) +vfloat32m8_t vfmsub(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32m8_m))) +vfloat32m8_t vfmsub(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32mf2))) +vfloat32mf2_t vfmsub(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f32mf2_m))) +vfloat32mf2_t vfmsub(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m1))) +vfloat32m1_t vfmsub(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m1_m))) +vfloat32m1_t vfmsub(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m2))) +vfloat32m2_t vfmsub(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m2_m))) +vfloat32m2_t vfmsub(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m4))) +vfloat32m4_t vfmsub(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m4_m))) +vfloat32m4_t vfmsub(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m8))) +vfloat32m8_t vfmsub(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32m8_m))) +vfloat32m8_t vfmsub(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32mf2))) +vfloat32mf2_t vfmsub(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f32mf2_m))) +vfloat32mf2_t vfmsub(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m1))) +vfloat32m1_t vfnmsub(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m1_m))) +vfloat32m1_t vfnmsub(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m2))) +vfloat32m2_t vfnmsub(vfloat32m2_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m2_m))) +vfloat32m2_t vfnmsub(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m4))) +vfloat32m4_t vfnmsub(vfloat32m4_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m4_m))) +vfloat32m4_t vfnmsub(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m8))) +vfloat32m8_t vfnmsub(vfloat32m8_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32m8_m))) +vfloat32m8_t vfnmsub(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32mf2))) +vfloat32mf2_t vfnmsub(vfloat32mf2_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f32mf2_m))) +vfloat32mf2_t vfnmsub(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m1))) +vfloat32m1_t vfnmsub(vfloat32m1_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m1_m))) +vfloat32m1_t vfnmsub(vbool32_t op0, vfloat32m1_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m2))) +vfloat32m2_t vfnmsub(vfloat32m2_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m2_m))) +vfloat32m2_t vfnmsub(vbool16_t op0, vfloat32m2_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m4))) +vfloat32m4_t vfnmsub(vfloat32m4_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m4_m))) +vfloat32m4_t vfnmsub(vbool8_t op0, vfloat32m4_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m8))) +vfloat32m8_t vfnmsub(vfloat32m8_t op0, float op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32m8_m))) +vfloat32m8_t vfnmsub(vbool4_t op0, vfloat32m8_t op1, float op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32mf2))) +vfloat32mf2_t vfnmsub(vfloat32mf2_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f32mf2_m))) +vfloat32mf2_t vfnmsub(vbool64_t op0, vfloat32mf2_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m1))) +vfloat32m1_t vfmin(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m1_m))) +vfloat32m1_t vfmin(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m2))) +vfloat32m2_t vfmin(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m2_m))) +vfloat32m2_t vfmin(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m4))) +vfloat32m4_t vfmin(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m4_m))) +vfloat32m4_t vfmin(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m8))) +vfloat32m8_t vfmin(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32m8_m))) +vfloat32m8_t vfmin(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32mf2))) +vfloat32mf2_t vfmin(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f32mf2_m))) +vfloat32mf2_t vfmin(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m1))) +vfloat32m1_t vfmin(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m1_m))) +vfloat32m1_t vfmin(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m2))) +vfloat32m2_t vfmin(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m2_m))) +vfloat32m2_t vfmin(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m4))) +vfloat32m4_t vfmin(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m4_m))) +vfloat32m4_t vfmin(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m8))) +vfloat32m8_t vfmin(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32m8_m))) +vfloat32m8_t vfmin(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32mf2))) +vfloat32mf2_t vfmin(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f32mf2_m))) +vfloat32mf2_t vfmin(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m1))) +vfloat32m1_t vfmax(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m1_m))) +vfloat32m1_t vfmax(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m2))) +vfloat32m2_t vfmax(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m2_m))) +vfloat32m2_t vfmax(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m4))) +vfloat32m4_t vfmax(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m4_m))) +vfloat32m4_t vfmax(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m8))) +vfloat32m8_t vfmax(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32m8_m))) +vfloat32m8_t vfmax(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32mf2))) +vfloat32mf2_t vfmax(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f32mf2_m))) +vfloat32mf2_t vfmax(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m1))) +vfloat32m1_t vfmax(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m1_m))) +vfloat32m1_t vfmax(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m2))) +vfloat32m2_t vfmax(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m2_m))) +vfloat32m2_t vfmax(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m4))) +vfloat32m4_t vfmax(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m4_m))) +vfloat32m4_t vfmax(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m8))) +vfloat32m8_t vfmax(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32m8_m))) +vfloat32m8_t vfmax(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32mf2))) +vfloat32mf2_t vfmax(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f32mf2_m))) +vfloat32mf2_t vfmax(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m1))) +vfloat32m1_t vfsgnj(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m1_m))) +vfloat32m1_t vfsgnj(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m2))) +vfloat32m2_t vfsgnj(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m2_m))) +vfloat32m2_t vfsgnj(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m4))) +vfloat32m4_t vfsgnj(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m4_m))) +vfloat32m4_t vfsgnj(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m8))) +vfloat32m8_t vfsgnj(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32m8_m))) +vfloat32m8_t vfsgnj(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32mf2))) +vfloat32mf2_t vfsgnj(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f32mf2_m))) +vfloat32mf2_t vfsgnj(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m1))) +vfloat32m1_t vfsgnj(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m1_m))) +vfloat32m1_t vfsgnj(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m2))) +vfloat32m2_t vfsgnj(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m2_m))) +vfloat32m2_t vfsgnj(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m4))) +vfloat32m4_t vfsgnj(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m4_m))) +vfloat32m4_t vfsgnj(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m8))) +vfloat32m8_t vfsgnj(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32m8_m))) +vfloat32m8_t vfsgnj(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32mf2))) +vfloat32mf2_t vfsgnj(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f32mf2_m))) +vfloat32mf2_t vfsgnj(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m1))) +vfloat32m1_t vfsgnjn(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m1_m))) +vfloat32m1_t vfsgnjn(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m2))) +vfloat32m2_t vfsgnjn(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m2_m))) +vfloat32m2_t vfsgnjn(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m4))) +vfloat32m4_t vfsgnjn(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m4_m))) +vfloat32m4_t vfsgnjn(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m8))) +vfloat32m8_t vfsgnjn(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32m8_m))) +vfloat32m8_t vfsgnjn(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32mf2))) +vfloat32mf2_t vfsgnjn(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f32mf2_m))) +vfloat32mf2_t vfsgnjn(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m1))) +vfloat32m1_t vfsgnjn(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m1_m))) +vfloat32m1_t vfsgnjn(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m2))) +vfloat32m2_t vfsgnjn(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m2_m))) +vfloat32m2_t vfsgnjn(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m4))) +vfloat32m4_t vfsgnjn(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m4_m))) +vfloat32m4_t vfsgnjn(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m8))) +vfloat32m8_t vfsgnjn(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32m8_m))) +vfloat32m8_t vfsgnjn(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32mf2))) +vfloat32mf2_t vfsgnjn(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f32mf2_m))) +vfloat32mf2_t vfsgnjn(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m1))) +vfloat32m1_t vfsgnjx(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m1_m))) +vfloat32m1_t vfsgnjx(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m2))) +vfloat32m2_t vfsgnjx(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m2_m))) +vfloat32m2_t vfsgnjx(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m4))) +vfloat32m4_t vfsgnjx(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m4_m))) +vfloat32m4_t vfsgnjx(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m8))) +vfloat32m8_t vfsgnjx(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32m8_m))) +vfloat32m8_t vfsgnjx(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32mf2))) +vfloat32mf2_t vfsgnjx(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f32mf2_m))) +vfloat32mf2_t vfsgnjx(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m1))) +vfloat32m1_t vfsgnjx(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m1_m))) +vfloat32m1_t vfsgnjx(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m2))) +vfloat32m2_t vfsgnjx(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m2_m))) +vfloat32m2_t vfsgnjx(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m4))) +vfloat32m4_t vfsgnjx(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m4_m))) +vfloat32m4_t vfsgnjx(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m8))) +vfloat32m8_t vfsgnjx(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32m8_m))) +vfloat32m8_t vfsgnjx(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32mf2))) +vfloat32mf2_t vfsgnjx(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f32mf2_m))) +vfloat32mf2_t vfsgnjx(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m1))) +vfloat32m1_t vfabs(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m1_m))) +vfloat32m1_t vfabs(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m2))) +vfloat32m2_t vfabs(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m2_m))) +vfloat32m2_t vfabs(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m4))) +vfloat32m4_t vfabs(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m4_m))) +vfloat32m4_t vfabs(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m8))) +vfloat32m8_t vfabs(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32m8_m))) +vfloat32m8_t vfabs(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32mf2))) +vfloat32mf2_t vfabs(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f32mf2_m))) +vfloat32mf2_t vfabs(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m1_b32))) +vbool32_t vmfeq(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m1_b32_m))) +vbool32_t vmfeq(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m2_b16))) +vbool16_t vmfeq(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m2_b16_m))) +vbool16_t vmfeq(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m4_b8))) +vbool8_t vmfeq(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m4_b8_m))) +vbool8_t vmfeq(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m8_b4))) +vbool4_t vmfeq(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32m8_b4_m))) +vbool4_t vmfeq(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32mf2_b64))) +vbool64_t vmfeq(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f32mf2_b64_m))) +vbool64_t vmfeq(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m1_b32))) +vbool32_t vmfeq(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m1_b32_m))) +vbool32_t vmfeq(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m2_b16))) +vbool16_t vmfeq(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m2_b16_m))) +vbool16_t vmfeq(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m4_b8))) +vbool8_t vmfeq(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m4_b8_m))) +vbool8_t vmfeq(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m8_b4))) +vbool4_t vmfeq(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32m8_b4_m))) +vbool4_t vmfeq(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32mf2_b64))) +vbool64_t vmfeq(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f32mf2_b64_m))) +vbool64_t vmfeq(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m1_b32))) +vbool32_t vmfne(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m1_b32_m))) +vbool32_t vmfne(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m2_b16))) +vbool16_t vmfne(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m2_b16_m))) +vbool16_t vmfne(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m4_b8))) +vbool8_t vmfne(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m4_b8_m))) +vbool8_t vmfne(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m8_b4))) +vbool4_t vmfne(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32m8_b4_m))) +vbool4_t vmfne(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32mf2_b64))) +vbool64_t vmfne(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f32mf2_b64_m))) +vbool64_t vmfne(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m1_b32))) +vbool32_t vmfne(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m1_b32_m))) +vbool32_t vmfne(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m2_b16))) +vbool16_t vmfne(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m2_b16_m))) +vbool16_t vmfne(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m4_b8))) +vbool8_t vmfne(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m4_b8_m))) +vbool8_t vmfne(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m8_b4))) +vbool4_t vmfne(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32m8_b4_m))) +vbool4_t vmfne(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32mf2_b64))) +vbool64_t vmfne(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f32mf2_b64_m))) +vbool64_t vmfne(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m1_b32))) +vbool32_t vmflt(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m1_b32_m))) +vbool32_t vmflt(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m2_b16))) +vbool16_t vmflt(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m2_b16_m))) +vbool16_t vmflt(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m4_b8))) +vbool8_t vmflt(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m4_b8_m))) +vbool8_t vmflt(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m8_b4))) +vbool4_t vmflt(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32m8_b4_m))) +vbool4_t vmflt(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32mf2_b64))) +vbool64_t vmflt(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f32mf2_b64_m))) +vbool64_t vmflt(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m1_b32))) +vbool32_t vmflt(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m1_b32_m))) +vbool32_t vmflt(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m2_b16))) +vbool16_t vmflt(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m2_b16_m))) +vbool16_t vmflt(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m4_b8))) +vbool8_t vmflt(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m4_b8_m))) +vbool8_t vmflt(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m8_b4))) +vbool4_t vmflt(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32m8_b4_m))) +vbool4_t vmflt(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32mf2_b64))) +vbool64_t vmflt(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f32mf2_b64_m))) +vbool64_t vmflt(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m1_b32))) +vbool32_t vmfle(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m1_b32_m))) +vbool32_t vmfle(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m2_b16))) +vbool16_t vmfle(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m2_b16_m))) +vbool16_t vmfle(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m4_b8))) +vbool8_t vmfle(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m4_b8_m))) +vbool8_t vmfle(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m8_b4))) +vbool4_t vmfle(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32m8_b4_m))) +vbool4_t vmfle(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32mf2_b64))) +vbool64_t vmfle(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f32mf2_b64_m))) +vbool64_t vmfle(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m1_b32))) +vbool32_t vmfle(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m1_b32_m))) +vbool32_t vmfle(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m2_b16))) +vbool16_t vmfle(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m2_b16_m))) +vbool16_t vmfle(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m4_b8))) +vbool8_t vmfle(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m4_b8_m))) +vbool8_t vmfle(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m8_b4))) +vbool4_t vmfle(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32m8_b4_m))) +vbool4_t vmfle(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32mf2_b64))) +vbool64_t vmfle(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f32mf2_b64_m))) +vbool64_t vmfle(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m1_b32))) +vbool32_t vmfgt(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m1_b32_m))) +vbool32_t vmfgt(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m2_b16))) +vbool16_t vmfgt(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m2_b16_m))) +vbool16_t vmfgt(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m4_b8))) +vbool8_t vmfgt(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m4_b8_m))) +vbool8_t vmfgt(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m8_b4))) +vbool4_t vmfgt(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32m8_b4_m))) +vbool4_t vmfgt(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32mf2_b64))) +vbool64_t vmfgt(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f32mf2_b64_m))) +vbool64_t vmfgt(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m1_b32))) +vbool32_t vmfgt(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m1_b32_m))) +vbool32_t vmfgt(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m2_b16))) +vbool16_t vmfgt(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m2_b16_m))) +vbool16_t vmfgt(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m4_b8))) +vbool8_t vmfgt(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m4_b8_m))) +vbool8_t vmfgt(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m8_b4))) +vbool4_t vmfgt(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32m8_b4_m))) +vbool4_t vmfgt(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32mf2_b64))) +vbool64_t vmfgt(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f32mf2_b64_m))) +vbool64_t vmfgt(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m1_b32))) +vbool32_t vmfge(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m1_b32_m))) +vbool32_t vmfge(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m2_b16))) +vbool16_t vmfge(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m2_b16_m))) +vbool16_t vmfge(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m4_b8))) +vbool8_t vmfge(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m4_b8_m))) +vbool8_t vmfge(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m8_b4))) +vbool4_t vmfge(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32m8_b4_m))) +vbool4_t vmfge(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32mf2_b64))) +vbool64_t vmfge(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f32mf2_b64_m))) +vbool64_t vmfge(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m1_b32))) +vbool32_t vmfge(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m1_b32_m))) +vbool32_t vmfge(vbool32_t op0, vbool32_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m2_b16))) +vbool16_t vmfge(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m2_b16_m))) +vbool16_t vmfge(vbool16_t op0, vbool16_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m4_b8))) +vbool8_t vmfge(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m4_b8_m))) +vbool8_t vmfge(vbool8_t op0, vbool8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m8_b4))) +vbool4_t vmfge(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32m8_b4_m))) +vbool4_t vmfge(vbool4_t op0, vbool4_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32mf2_b64))) +vbool64_t vmfge(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f32mf2_b64_m))) +vbool64_t vmfge(vbool64_t op0, vbool64_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f32m1))) +vfloat32m1_t vmerge(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f32m2))) +vfloat32m2_t vmerge(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f32m4))) +vfloat32m4_t vmerge(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f32m8))) +vfloat32m8_t vmerge(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f32mf2))) +vfloat32mf2_t vmerge(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f32m1))) +vfloat32m1_t vfmerge(vbool32_t op0, vfloat32m1_t op1, float op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f32m2))) +vfloat32m2_t vfmerge(vbool16_t op0, vfloat32m2_t op1, float op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f32m4))) +vfloat32m4_t vfmerge(vbool8_t op0, vfloat32m4_t op1, float op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f32m8))) +vfloat32m8_t vfmerge(vbool4_t op0, vfloat32m8_t op1, float op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f32mf2))) +vfloat32mf2_t vfmerge(vbool64_t op0, vfloat32mf2_t op1, float op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m1_f32m1))) +vfloat32m1_t vfredmax(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m1_f32m1_m))) +vfloat32m1_t vfredmax(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m2_f32m1))) +vfloat32m1_t vfredmax(vfloat32m1_t op0, vfloat32m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m2_f32m1_m))) +vfloat32m1_t vfredmax(vbool16_t op0, vfloat32m1_t op1, vfloat32m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m4_f32m1))) +vfloat32m1_t vfredmax(vfloat32m1_t op0, vfloat32m4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m4_f32m1_m))) +vfloat32m1_t vfredmax(vbool8_t op0, vfloat32m1_t op1, vfloat32m4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m8_f32m1))) +vfloat32m1_t vfredmax(vfloat32m1_t op0, vfloat32m8_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32m8_f32m1_m))) +vfloat32m1_t vfredmax(vbool4_t op0, vfloat32m1_t op1, vfloat32m8_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32mf2_f32m1))) +vfloat32m1_t vfredmax(vfloat32m1_t op0, vfloat32mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f32mf2_f32m1_m))) +vfloat32m1_t vfredmax(vbool64_t op0, vfloat32m1_t op1, vfloat32mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m1_f32m1))) +vfloat32m1_t vfredmin(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m1_f32m1_m))) +vfloat32m1_t vfredmin(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m2_f32m1))) +vfloat32m1_t vfredmin(vfloat32m1_t op0, vfloat32m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m2_f32m1_m))) +vfloat32m1_t vfredmin(vbool16_t op0, vfloat32m1_t op1, vfloat32m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m4_f32m1))) +vfloat32m1_t vfredmin(vfloat32m1_t op0, vfloat32m4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m4_f32m1_m))) +vfloat32m1_t vfredmin(vbool8_t op0, vfloat32m1_t op1, vfloat32m4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m8_f32m1))) +vfloat32m1_t vfredmin(vfloat32m1_t op0, vfloat32m8_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32m8_f32m1_m))) +vfloat32m1_t vfredmin(vbool4_t op0, vfloat32m1_t op1, vfloat32m8_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32mf2_f32m1))) +vfloat32m1_t vfredmin(vfloat32m1_t op0, vfloat32mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f32mf2_f32m1_m))) +vfloat32m1_t vfredmin(vbool64_t op0, vfloat32m1_t op1, vfloat32mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m1_f32m1))) +vfloat32m1_t vfredsum(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m1_f32m1_m))) +vfloat32m1_t vfredsum(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m2_f32m1))) +vfloat32m1_t vfredsum(vfloat32m1_t op0, vfloat32m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m2_f32m1_m))) +vfloat32m1_t vfredsum(vbool16_t op0, vfloat32m1_t op1, vfloat32m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m4_f32m1))) +vfloat32m1_t vfredsum(vfloat32m1_t op0, vfloat32m4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m4_f32m1_m))) +vfloat32m1_t vfredsum(vbool8_t op0, vfloat32m1_t op1, vfloat32m4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m8_f32m1))) +vfloat32m1_t vfredsum(vfloat32m1_t op0, vfloat32m8_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32m8_f32m1_m))) +vfloat32m1_t vfredsum(vbool4_t op0, vfloat32m1_t op1, vfloat32m8_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32mf2_f32m1))) +vfloat32m1_t vfredsum(vfloat32m1_t op0, vfloat32mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f32mf2_f32m1_m))) +vfloat32m1_t vfredsum(vbool64_t op0, vfloat32m1_t op1, vfloat32mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m1_f32m1))) +vfloat32m1_t vfredosum(vfloat32m1_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m1_f32m1_m))) +vfloat32m1_t vfredosum(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m2_f32m1))) +vfloat32m1_t vfredosum(vfloat32m1_t op0, vfloat32m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m2_f32m1_m))) +vfloat32m1_t vfredosum(vbool16_t op0, vfloat32m1_t op1, vfloat32m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m4_f32m1))) +vfloat32m1_t vfredosum(vfloat32m1_t op0, vfloat32m4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m4_f32m1_m))) +vfloat32m1_t vfredosum(vbool8_t op0, vfloat32m1_t op1, vfloat32m4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m8_f32m1))) +vfloat32m1_t vfredosum(vfloat32m1_t op0, vfloat32m8_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32m8_f32m1_m))) +vfloat32m1_t vfredosum(vbool4_t op0, vfloat32m1_t op1, vfloat32m8_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32mf2_f32m1))) +vfloat32m1_t vfredosum(vfloat32m1_t op0, vfloat32mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f32mf2_f32m1_m))) +vfloat32m1_t vfredosum(vbool64_t op0, vfloat32m1_t op1, vfloat32mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f32m1_f32))) +float vfmv_f(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f32m2_f32))) +float vfmv_f(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f32m4_f32))) +float vfmv_f(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f32m8_f32))) +float vfmv_f(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f32mf2_f32))) +float vfmv_f(vfloat32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f32m1))) +vfloat32m1_t vfmv_s(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f32m2))) +vfloat32m2_t vfmv_s(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f32m4))) +vfloat32m4_t vfmv_s(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f32m8))) +vfloat32m8_t vfmv_s(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f32mf2))) +vfloat32mf2_t vfmv_s(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m1))) +vfloat32m1_t vslideup(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m1_m))) +vfloat32m1_t vslideup(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m2))) +vfloat32m2_t vslideup(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m2_m))) +vfloat32m2_t vslideup(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m4))) +vfloat32m4_t vslideup(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m4_m))) +vfloat32m4_t vslideup(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m8))) +vfloat32m8_t vslideup(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32m8_m))) +vfloat32m8_t vslideup(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32mf2))) +vfloat32mf2_t vslideup(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f32mf2_m))) +vfloat32mf2_t vslideup(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m1))) +vfloat32m1_t vslidedown(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m1_m))) +vfloat32m1_t vslidedown(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m2))) +vfloat32m2_t vslidedown(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m2_m))) +vfloat32m2_t vslidedown(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m4))) +vfloat32m4_t vslidedown(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m4_m))) +vfloat32m4_t vslidedown(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m8))) +vfloat32m8_t vslidedown(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32m8_m))) +vfloat32m8_t vslidedown(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32mf2))) +vfloat32mf2_t vslidedown(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f32mf2_m))) +vfloat32mf2_t vslidedown(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m1))) +vfloat32m1_t vfslide1up(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m1_m))) +vfloat32m1_t vfslide1up(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m2))) +vfloat32m2_t vfslide1up(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m2_m))) +vfloat32m2_t vfslide1up(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m4))) +vfloat32m4_t vfslide1up(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m4_m))) +vfloat32m4_t vfslide1up(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m8))) +vfloat32m8_t vfslide1up(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32m8_m))) +vfloat32m8_t vfslide1up(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32mf2))) +vfloat32mf2_t vfslide1up(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f32mf2_m))) +vfloat32mf2_t vfslide1up(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m1))) +vfloat32m1_t vfslide1down(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m1_m))) +vfloat32m1_t vfslide1down(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m2))) +vfloat32m2_t vfslide1down(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m2_m))) +vfloat32m2_t vfslide1down(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m4))) +vfloat32m4_t vfslide1down(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m4_m))) +vfloat32m4_t vfslide1down(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m8))) +vfloat32m8_t vfslide1down(vfloat32m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32m8_m))) +vfloat32m8_t vfslide1down(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32mf2))) +vfloat32mf2_t vfslide1down(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f32mf2_m))) +vfloat32mf2_t vfslide1down(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m1))) +vfloat32m1_t vrgather(vfloat32m1_t op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m1_m))) +vfloat32m1_t vrgather(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m2))) +vfloat32m2_t vrgather(vfloat32m2_t op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m2_m))) +vfloat32m2_t vrgather(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m4))) +vfloat32m4_t vrgather(vfloat32m4_t op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m4_m))) +vfloat32m4_t vrgather(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m8))) +vfloat32m8_t vrgather(vfloat32m8_t op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32m8_m))) +vfloat32m8_t vrgather(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32mf2))) +vfloat32mf2_t vrgather(vfloat32mf2_t op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f32mf2_m))) +vfloat32mf2_t vrgather(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m1))) +vfloat32m1_t vrgather(vfloat32m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m1_m))) +vfloat32m1_t vrgather(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m2))) +vfloat32m2_t vrgather(vfloat32m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m2_m))) +vfloat32m2_t vrgather(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m4))) +vfloat32m4_t vrgather(vfloat32m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m4_m))) +vfloat32m4_t vrgather(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m8))) +vfloat32m8_t vrgather(vfloat32m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32m8_m))) +vfloat32m8_t vrgather(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32mf2))) +vfloat32mf2_t vrgather(vfloat32mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f32mf2_m))) +vfloat32mf2_t vrgather(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m1))) +vfloat32m1_t vrgatherei16(vfloat32m1_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m1_m))) +vfloat32m1_t vrgatherei16(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m2))) +vfloat32m2_t vrgatherei16(vfloat32m2_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m2_m))) +vfloat32m2_t vrgatherei16(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m4))) +vfloat32m4_t vrgatherei16(vfloat32m4_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m4_m))) +vfloat32m4_t vrgatherei16(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m8))) +vfloat32m8_t vrgatherei16(vfloat32m8_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32m8_m))) +vfloat32m8_t vrgatherei16(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32mf2))) +vfloat32mf2_t vrgatherei16(vfloat32mf2_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f32mf2_m))) +vfloat32mf2_t vrgatherei16(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f32m1))) +vfloat32m1_t vcompress(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f32m2))) +vfloat32m2_t vcompress(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f32m4))) +vfloat32m4_t vcompress(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f32m8))) +vfloat32m8_t vcompress(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f32mf2))) +vfloat32mf2_t vcompress(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f32m2_f32m1))) +vfloat32m1_t vget_f32m1(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f32m4_f32m1))) +vfloat32m1_t vget_f32m1(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f32m8_f32m1))) +vfloat32m1_t vget_f32m1(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f32m4_f32m2))) +vfloat32m2_t vget_f32m2(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f32m8_f32m2))) +vfloat32m2_t vget_f32m2(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f32m8_f32m4))) +vfloat32m4_t vget_f32m4(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f32m1_f32m2))) +vfloat32m2_t vset(vfloat32m2_t op0, size_t op1, vfloat32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f32m1_f32m4))) +vfloat32m4_t vset(vfloat32m4_t op0, size_t op1, vfloat32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f32m2_f32m4))) +vfloat32m4_t vset(vfloat32m4_t op0, size_t op1, vfloat32m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m1))) +void vsuxei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m1_m))) +void vsuxei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m2))) +void vsuxei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m2_m))) +void vsuxei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m4))) +void vsuxei8(float * op0, vuint8m1_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m4_m))) +void vsuxei8(vbool8_t op0, float * op1, vuint8m1_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m8))) +void vsuxei8(float * op0, vuint8m2_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32m8_m))) +void vsuxei8(vbool4_t op0, float * op1, vuint8m2_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32mf2))) +void vsuxei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f32mf2_m))) +void vsuxei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f32m1_f32m8))) +vfloat32m8_t vset(vfloat32m8_t op0, size_t op1, vfloat32m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f32m2_f32m8))) +vfloat32m8_t vset(vfloat32m8_t op0, size_t op1, vfloat32m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f32m4_f32m8))) +vfloat32m8_t vset(vfloat32m8_t op0, size_t op1, vfloat32m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m1))) +void vsuxei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m1_m))) +void vsuxei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m2))) +void vsuxei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m2_m))) +void vsuxei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m4))) +void vsuxei16(float * op0, vuint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m4_m))) +void vsuxei16(vbool8_t op0, float * op1, vuint16m2_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m8))) +void vsuxei16(float * op0, vuint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32m8_m))) +void vsuxei16(vbool4_t op0, float * op1, vuint16m4_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32mf2))) +void vsuxei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f32mf2_m))) +void vsuxei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m1))) +void vsuxei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m1_m))) +void vsuxei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m2))) +void vsuxei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m2_m))) +void vsuxei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m4))) +void vsuxei32(float * op0, vuint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m4_m))) +void vsuxei32(vbool8_t op0, float * op1, vuint32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m8))) +void vsuxei32(float * op0, vuint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32m8_m))) +void vsuxei32(vbool4_t op0, float * op1, vuint32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32mf2))) +void vsuxei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f32mf2_m))) +void vsuxei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32m1))) +void vsuxei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32m1_m))) +void vsuxei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32m2))) +void vsuxei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32m2_m))) +void vsuxei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32m4))) +void vsuxei64(float * op0, vuint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32m4_m))) +void vsuxei64(vbool8_t op0, float * op1, vuint64m8_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32mf2))) +void vsuxei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f32mf2_m))) +void vsuxei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m1))) +void vsoxei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m1_m))) +void vsoxei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m2))) +void vsoxei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m2_m))) +void vsoxei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m4))) +void vsoxei8(float * op0, vuint8m1_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m4_m))) +void vsoxei8(vbool8_t op0, float * op1, vuint8m1_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m8))) +void vsoxei8(float * op0, vuint8m2_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32m8_m))) +void vsoxei8(vbool4_t op0, float * op1, vuint8m2_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32mf2))) +void vsoxei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f32mf2_m))) +void vsoxei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m1))) +void vsoxei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m1_m))) +void vsoxei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m2))) +void vsoxei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m2_m))) +void vsoxei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m4))) +void vsoxei16(float * op0, vuint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m4_m))) +void vsoxei16(vbool8_t op0, float * op1, vuint16m2_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m8))) +void vsoxei16(float * op0, vuint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32m8_m))) +void vsoxei16(vbool4_t op0, float * op1, vuint16m4_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32mf2))) +void vsoxei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f32mf2_m))) +void vsoxei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m1))) +void vsoxei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m1_m))) +void vsoxei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m2))) +void vsoxei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m2_m))) +void vsoxei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m4))) +void vsoxei32(float * op0, vuint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m4_m))) +void vsoxei32(vbool8_t op0, float * op1, vuint32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m8))) +void vsoxei32(float * op0, vuint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32m8_m))) +void vsoxei32(vbool4_t op0, float * op1, vuint32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32mf2))) +void vsoxei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f32mf2_m))) +void vsoxei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32m1))) +void vsoxei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32m1_m))) +void vsoxei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32m2))) +void vsoxei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32m2_m))) +void vsoxei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32m4))) +void vsoxei64(float * op0, vuint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32m4_m))) +void vsoxei64(vbool8_t op0, float * op1, vuint64m8_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32mf2))) +void vsoxei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f32mf2_m))) +void vsoxei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_f32m1_m))) +vfloat32m1_t vle32ff(vbool32_t op0, vfloat32m1_t op1, const float * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_f32m2_m))) +vfloat32m2_t vle32ff(vbool16_t op0, vfloat32m2_t op1, const float * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_f32m4_m))) +vfloat32m4_t vle32ff(vbool8_t op0, vfloat32m4_t op1, const float * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_f32m8_m))) +vfloat32m8_t vle32ff(vbool4_t op0, vfloat32m8_t op1, const float * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32ff_v_f32mf2_m))) +vfloat32mf2_t vle32ff(vbool64_t op0, vfloat32mf2_t op1, const float * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m1))) +vfloat32m1_t vfneg(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m1_m))) +vfloat32m1_t vfneg(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m2))) +vfloat32m2_t vfneg(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m2_m))) +vfloat32m2_t vfneg(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m4))) +vfloat32m4_t vfneg(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m4_m))) +vfloat32m4_t vfneg(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m8))) +vfloat32m8_t vfneg(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32m8_m))) +vfloat32m8_t vfneg(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32mf2))) +vfloat32mf2_t vfneg(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f32mf2_m))) +vfloat32mf2_t vfneg(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_f32m1_m))) +vfloat32m1_t vle32(vbool32_t op0, vfloat32m1_t op1, const float * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_f32m2_m))) +vfloat32m2_t vle32(vbool16_t op0, vfloat32m2_t op1, const float * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_f32m4_m))) +vfloat32m4_t vle32(vbool8_t op0, vfloat32m4_t op1, const float * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_f32m8_m))) +vfloat32m8_t vle32(vbool4_t op0, vfloat32m8_t op1, const float * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle32_v_f32mf2_m))) +vfloat32mf2_t vle32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m1))) +void vse32(float * op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m1_m))) +void vse32(vbool32_t op0, float * op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m2))) +void vse32(float * op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m2_m))) +void vse32(vbool16_t op0, float * op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m4))) +void vse32(float * op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m4_m))) +void vse32(vbool8_t op0, float * op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m8))) +void vse32(float * op0, vfloat32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32m8_m))) +void vse32(vbool4_t op0, float * op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32mf2))) +void vse32(float * op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse32_v_f32mf2_m))) +void vse32(vbool64_t op0, float * op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_f32m1_m))) +vfloat32m1_t vlse32(vbool32_t op0, vfloat32m1_t op1, const float * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_f32m2_m))) +vfloat32m2_t vlse32(vbool16_t op0, vfloat32m2_t op1, const float * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_f32m4_m))) +vfloat32m4_t vlse32(vbool8_t op0, vfloat32m4_t op1, const float * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_f32m8_m))) +vfloat32m8_t vlse32(vbool4_t op0, vfloat32m8_t op1, const float * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse32_v_f32mf2_m))) +vfloat32mf2_t vlse32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m1))) +vuint32m1_t vfclass(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m1_m))) +vuint32m1_t vfclass(vbool32_t op0, vuint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m2))) +vuint32m2_t vfclass(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m2_m))) +vuint32m2_t vfclass(vbool16_t op0, vuint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m4))) +vuint32m4_t vfclass(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m4_m))) +vuint32m4_t vfclass(vbool8_t op0, vuint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m8))) +vuint32m8_t vfclass(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32m8_m))) +vuint32m8_t vfclass(vbool4_t op0, vuint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32mf2))) +vuint32mf2_t vfclass(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u32mf2_m))) +vuint32mf2_t vfclass(vbool64_t op0, vuint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m1))) +vfloat32m1_t vfcvt_f(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m1_m))) +vfloat32m1_t vfcvt_f(vbool32_t op0, vfloat32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m2))) +vfloat32m2_t vfcvt_f(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m2_m))) +vfloat32m2_t vfcvt_f(vbool16_t op0, vfloat32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m4))) +vfloat32m4_t vfcvt_f(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m4_m))) +vfloat32m4_t vfcvt_f(vbool8_t op0, vfloat32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m8))) +vfloat32m8_t vfcvt_f(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32m8_m))) +vfloat32m8_t vfcvt_f(vbool4_t op0, vfloat32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32mf2))) +vfloat32mf2_t vfcvt_f(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f32mf2_m))) +vfloat32mf2_t vfcvt_f(vbool64_t op0, vfloat32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m1))) +vfloat32m1_t vfcvt_f(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m1_m))) +vfloat32m1_t vfcvt_f(vbool32_t op0, vfloat32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m2))) +vfloat32m2_t vfcvt_f(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m2_m))) +vfloat32m2_t vfcvt_f(vbool16_t op0, vfloat32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m4))) +vfloat32m4_t vfcvt_f(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m4_m))) +vfloat32m4_t vfcvt_f(vbool8_t op0, vfloat32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m8))) +vfloat32m8_t vfcvt_f(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32m8_m))) +vfloat32m8_t vfcvt_f(vbool4_t op0, vfloat32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32mf2))) +vfloat32mf2_t vfcvt_f(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f32mf2_m))) +vfloat32mf2_t vfcvt_f(vbool64_t op0, vfloat32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m1))) +vint32m1_t vfcvt_rtz_x(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m1_m))) +vint32m1_t vfcvt_rtz_x(vbool32_t op0, vint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m2))) +vint32m2_t vfcvt_rtz_x(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m2_m))) +vint32m2_t vfcvt_rtz_x(vbool16_t op0, vint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m4))) +vint32m4_t vfcvt_rtz_x(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m4_m))) +vint32m4_t vfcvt_rtz_x(vbool8_t op0, vint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m8))) +vint32m8_t vfcvt_rtz_x(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32m8_m))) +vint32m8_t vfcvt_rtz_x(vbool4_t op0, vint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32mf2))) +vint32mf2_t vfcvt_rtz_x(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i32mf2_m))) +vint32mf2_t vfcvt_rtz_x(vbool64_t op0, vint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m1))) +vuint32m1_t vfcvt_rtz_xu(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m1_m))) +vuint32m1_t vfcvt_rtz_xu(vbool32_t op0, vuint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m2))) +vuint32m2_t vfcvt_rtz_xu(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m2_m))) +vuint32m2_t vfcvt_rtz_xu(vbool16_t op0, vuint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m4))) +vuint32m4_t vfcvt_rtz_xu(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m4_m))) +vuint32m4_t vfcvt_rtz_xu(vbool8_t op0, vuint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m8))) +vuint32m8_t vfcvt_rtz_xu(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32m8_m))) +vuint32m8_t vfcvt_rtz_xu(vbool4_t op0, vuint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32mf2))) +vuint32mf2_t vfcvt_rtz_xu(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u32mf2_m))) +vuint32mf2_t vfcvt_rtz_xu(vbool64_t op0, vuint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m1))) +vint32m1_t vfcvt_x(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m1_m))) +vint32m1_t vfcvt_x(vbool32_t op0, vint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m2))) +vint32m2_t vfcvt_x(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m2_m))) +vint32m2_t vfcvt_x(vbool16_t op0, vint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m4))) +vint32m4_t vfcvt_x(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m4_m))) +vint32m4_t vfcvt_x(vbool8_t op0, vint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m8))) +vint32m8_t vfcvt_x(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32m8_m))) +vint32m8_t vfcvt_x(vbool4_t op0, vint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32mf2))) +vint32mf2_t vfcvt_x(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i32mf2_m))) +vint32mf2_t vfcvt_x(vbool64_t op0, vint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m1))) +vuint32m1_t vfcvt_xu(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m1_m))) +vuint32m1_t vfcvt_xu(vbool32_t op0, vuint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m2))) +vuint32m2_t vfcvt_xu(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m2_m))) +vuint32m2_t vfcvt_xu(vbool16_t op0, vuint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m4))) +vuint32m4_t vfcvt_xu(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m4_m))) +vuint32m4_t vfcvt_xu(vbool8_t op0, vuint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m8))) +vuint32m8_t vfcvt_xu(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32m8_m))) +vuint32m8_t vfcvt_xu(vbool4_t op0, vuint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32mf2))) +vuint32mf2_t vfcvt_xu(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u32mf2_m))) +vuint32mf2_t vfcvt_xu(vbool64_t op0, vuint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32mf2))) +vfloat32mf2_t vfncvt_f(vint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32mf2_m))) +vfloat32mf2_t vfncvt_f(vbool64_t op0, vfloat32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32m1))) +vfloat32m1_t vfncvt_f(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32m1_m))) +vfloat32m1_t vfncvt_f(vbool32_t op0, vfloat32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32m2))) +vfloat32m2_t vfncvt_f(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32m2_m))) +vfloat32m2_t vfncvt_f(vbool16_t op0, vfloat32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32m4))) +vfloat32m4_t vfncvt_f(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f32m4_m))) +vfloat32m4_t vfncvt_f(vbool8_t op0, vfloat32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32mf2))) +vfloat32mf2_t vfncvt_f(vuint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32mf2_m))) +vfloat32mf2_t vfncvt_f(vbool64_t op0, vfloat32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32m1))) +vfloat32m1_t vfncvt_f(vuint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32m1_m))) +vfloat32m1_t vfncvt_f(vbool32_t op0, vfloat32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32m2))) +vfloat32m2_t vfncvt_f(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32m2_m))) +vfloat32m2_t vfncvt_f(vbool16_t op0, vfloat32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32m4))) +vfloat32m4_t vfncvt_f(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f32m4_m))) +vfloat32m4_t vfncvt_f(vbool8_t op0, vfloat32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16mf4))) +vint16mf4_t vfncvt_rtz_x(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16mf4_m))) +vint16mf4_t vfncvt_rtz_x(vbool64_t op0, vint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16mf2))) +vint16mf2_t vfncvt_rtz_x(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16mf2_m))) +vint16mf2_t vfncvt_rtz_x(vbool32_t op0, vint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16m1))) +vint16m1_t vfncvt_rtz_x(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16m1_m))) +vint16m1_t vfncvt_rtz_x(vbool16_t op0, vint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16m2))) +vint16m2_t vfncvt_rtz_x(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16m2_m))) +vint16m2_t vfncvt_rtz_x(vbool8_t op0, vint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16m4))) +vint16m4_t vfncvt_rtz_x(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i16m4_m))) +vint16m4_t vfncvt_rtz_x(vbool4_t op0, vint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf4))) +vuint16mf4_t vfncvt_rtz_xu(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf4_m))) +vuint16mf4_t vfncvt_rtz_xu(vbool64_t op0, vuint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf2))) +vuint16mf2_t vfncvt_rtz_xu(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16mf2_m))) +vuint16mf2_t vfncvt_rtz_xu(vbool32_t op0, vuint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16m1))) +vuint16m1_t vfncvt_rtz_xu(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16m1_m))) +vuint16m1_t vfncvt_rtz_xu(vbool16_t op0, vuint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16m2))) +vuint16m2_t vfncvt_rtz_xu(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16m2_m))) +vuint16m2_t vfncvt_rtz_xu(vbool8_t op0, vuint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16m4))) +vuint16m4_t vfncvt_rtz_xu(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u16m4_m))) +vuint16m4_t vfncvt_rtz_xu(vbool4_t op0, vuint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16mf4))) +vint16mf4_t vfncvt_x(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16mf4_m))) +vint16mf4_t vfncvt_x(vbool64_t op0, vint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16mf2))) +vint16mf2_t vfncvt_x(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16mf2_m))) +vint16mf2_t vfncvt_x(vbool32_t op0, vint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16m1))) +vint16m1_t vfncvt_x(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16m1_m))) +vint16m1_t vfncvt_x(vbool16_t op0, vint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16m2))) +vint16m2_t vfncvt_x(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16m2_m))) +vint16m2_t vfncvt_x(vbool8_t op0, vint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16m4))) +vint16m4_t vfncvt_x(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i16m4_m))) +vint16m4_t vfncvt_x(vbool4_t op0, vint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16mf4))) +vuint16mf4_t vfncvt_xu(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16mf4_m))) +vuint16mf4_t vfncvt_xu(vbool64_t op0, vuint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16mf2))) +vuint16mf2_t vfncvt_xu(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16mf2_m))) +vuint16mf2_t vfncvt_xu(vbool32_t op0, vuint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16m1))) +vuint16m1_t vfncvt_xu(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16m1_m))) +vuint16m1_t vfncvt_xu(vbool16_t op0, vuint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16m2))) +vuint16m2_t vfncvt_xu(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16m2_m))) +vuint16m2_t vfncvt_xu(vbool8_t op0, vuint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16m4))) +vuint16m4_t vfncvt_xu(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u16m4_m))) +vuint16m4_t vfncvt_xu(vbool4_t op0, vuint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m1))) +vfloat32m1_t vfrec7(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m1_m))) +vfloat32m1_t vfrec7(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m2))) +vfloat32m2_t vfrec7(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m2_m))) +vfloat32m2_t vfrec7(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m4))) +vfloat32m4_t vfrec7(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m4_m))) +vfloat32m4_t vfrec7(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m8))) +vfloat32m8_t vfrec7(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32m8_m))) +vfloat32m8_t vfrec7(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32mf2))) +vfloat32mf2_t vfrec7(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f32mf2_m))) +vfloat32mf2_t vfrec7(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m1))) +vfloat32m1_t vfrsqrt7(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m1_m))) +vfloat32m1_t vfrsqrt7(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m2))) +vfloat32m2_t vfrsqrt7(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m2_m))) +vfloat32m2_t vfrsqrt7(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m4))) +vfloat32m4_t vfrsqrt7(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m4_m))) +vfloat32m4_t vfrsqrt7(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m8))) +vfloat32m8_t vfrsqrt7(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32m8_m))) +vfloat32m8_t vfrsqrt7(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32mf2))) +vfloat32mf2_t vfrsqrt7(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f32mf2_m))) +vfloat32mf2_t vfrsqrt7(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m1))) +vfloat32m1_t vfsqrt(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m1_m))) +vfloat32m1_t vfsqrt(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m2))) +vfloat32m2_t vfsqrt(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m2_m))) +vfloat32m2_t vfsqrt(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m4))) +vfloat32m4_t vfsqrt(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m4_m))) +vfloat32m4_t vfsqrt(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m8))) +vfloat32m8_t vfsqrt(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32m8_m))) +vfloat32m8_t vfsqrt(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32mf2))) +vfloat32mf2_t vfsqrt(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f32mf2_m))) +vfloat32mf2_t vfsqrt(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32mf2))) +vfloat32mf2_t vfwcvt_f(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32mf2_m))) +vfloat32mf2_t vfwcvt_f(vbool64_t op0, vfloat32mf2_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m1))) +vfloat32m1_t vfwcvt_f(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m1_m))) +vfloat32m1_t vfwcvt_f(vbool32_t op0, vfloat32m1_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m2))) +vfloat32m2_t vfwcvt_f(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m2_m))) +vfloat32m2_t vfwcvt_f(vbool16_t op0, vfloat32m2_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m4))) +vfloat32m4_t vfwcvt_f(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m4_m))) +vfloat32m4_t vfwcvt_f(vbool8_t op0, vfloat32m4_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m8))) +vfloat32m8_t vfwcvt_f(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f32m8_m))) +vfloat32m8_t vfwcvt_f(vbool4_t op0, vfloat32m8_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32mf2))) +vfloat32mf2_t vfwcvt_f(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32mf2_m))) +vfloat32mf2_t vfwcvt_f(vbool64_t op0, vfloat32mf2_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m1))) +vfloat32m1_t vfwcvt_f(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m1_m))) +vfloat32m1_t vfwcvt_f(vbool32_t op0, vfloat32m1_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m2))) +vfloat32m2_t vfwcvt_f(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m2_m))) +vfloat32m2_t vfwcvt_f(vbool16_t op0, vfloat32m2_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m4))) +vfloat32m4_t vfwcvt_f(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m4_m))) +vfloat32m4_t vfwcvt_f(vbool8_t op0, vfloat32m4_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m8))) +vfloat32m8_t vfwcvt_f(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f32m8_m))) +vfloat32m8_t vfwcvt_f(vbool4_t op0, vfloat32m8_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m1))) +vint64m1_t vfwcvt_rtz_x(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m1_m))) +vint64m1_t vfwcvt_rtz_x(vbool64_t op0, vint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m2))) +vint64m2_t vfwcvt_rtz_x(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m2_m))) +vint64m2_t vfwcvt_rtz_x(vbool32_t op0, vint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m4))) +vint64m4_t vfwcvt_rtz_x(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m4_m))) +vint64m4_t vfwcvt_rtz_x(vbool16_t op0, vint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m8))) +vint64m8_t vfwcvt_rtz_x(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i64m8_m))) +vint64m8_t vfwcvt_rtz_x(vbool8_t op0, vint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m1))) +vuint64m1_t vfwcvt_rtz_xu(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m1_m))) +vuint64m1_t vfwcvt_rtz_xu(vbool64_t op0, vuint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m2))) +vuint64m2_t vfwcvt_rtz_xu(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m2_m))) +vuint64m2_t vfwcvt_rtz_xu(vbool32_t op0, vuint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m4))) +vuint64m4_t vfwcvt_rtz_xu(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m4_m))) +vuint64m4_t vfwcvt_rtz_xu(vbool16_t op0, vuint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m8))) +vuint64m8_t vfwcvt_rtz_xu(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u64m8_m))) +vuint64m8_t vfwcvt_rtz_xu(vbool8_t op0, vuint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m1))) +vint64m1_t vfwcvt_x(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m1_m))) +vint64m1_t vfwcvt_x(vbool64_t op0, vint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m2))) +vint64m2_t vfwcvt_x(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m2_m))) +vint64m2_t vfwcvt_x(vbool32_t op0, vint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m4))) +vint64m4_t vfwcvt_x(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m4_m))) +vint64m4_t vfwcvt_x(vbool16_t op0, vint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m8))) +vint64m8_t vfwcvt_x(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i64m8_m))) +vint64m8_t vfwcvt_x(vbool8_t op0, vint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m1))) +vuint64m1_t vfwcvt_xu(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m1_m))) +vuint64m1_t vfwcvt_xu(vbool64_t op0, vuint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m2))) +vuint64m2_t vfwcvt_xu(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m2_m))) +vuint64m2_t vfwcvt_xu(vbool32_t op0, vuint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m4))) +vuint64m4_t vfwcvt_xu(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m4_m))) +vuint64m4_t vfwcvt_xu(vbool16_t op0, vuint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m8))) +vuint64m8_t vfwcvt_xu(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u64m8_m))) +vuint64m8_t vfwcvt_xu(vbool8_t op0, vuint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32mf2_f32m1))) +vfloat32m1_t vlmul_ext_f32m1(vfloat32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32m1_f32m2))) +vfloat32m2_t vlmul_ext_f32m2(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32mf2_f32m2))) +vfloat32m2_t vlmul_ext_f32m2(vfloat32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32m1_f32m4))) +vfloat32m4_t vlmul_ext_f32m4(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32m2_f32m4))) +vfloat32m4_t vlmul_ext_f32m4(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32mf2_f32m4))) +vfloat32m4_t vlmul_ext_f32m4(vfloat32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32m1_f32m8))) +vfloat32m8_t vlmul_ext_f32m8(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32m2_f32m8))) +vfloat32m8_t vlmul_ext_f32m8(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32m4_f32m8))) +vfloat32m8_t vlmul_ext_f32m8(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f32mf2_f32m8))) +vfloat32m8_t vlmul_ext_f32m8(vfloat32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m1_f32mf2))) +vfloat32mf2_t vlmul_trunc_f32mf2(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m2_f32mf2))) +vfloat32mf2_t vlmul_trunc_f32mf2(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m4_f32mf2))) +vfloat32mf2_t vlmul_trunc_f32mf2(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m8_f32mf2))) +vfloat32mf2_t vlmul_trunc_f32mf2(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m2_f32m1))) +vfloat32m1_t vlmul_trunc_f32m1(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m4_f32m1))) +vfloat32m1_t vlmul_trunc_f32m1(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m8_f32m1))) +vfloat32m1_t vlmul_trunc_f32m1(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m4_f32m2))) +vfloat32m2_t vlmul_trunc_f32m2(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m8_f32m2))) +vfloat32m2_t vlmul_trunc_f32m2(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f32m8_f32m4))) +vfloat32m4_t vlmul_trunc_f32m4(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m1_f32m1))) +vfloat32m1_t vreinterpret_f32m1(vint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m2_f32m2))) +vfloat32m2_t vreinterpret_f32m2(vint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m4_f32m4))) +vfloat32m4_t vreinterpret_f32m4(vint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32m8_f32m8))) +vfloat32m8_t vreinterpret_f32m8(vint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i32mf2_f32mf2))) +vfloat32mf2_t vreinterpret_f32mf2(vint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m1_f32m1))) +vfloat32m1_t vreinterpret_f32m1(vuint32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m2_f32m2))) +vfloat32m2_t vreinterpret_f32m2(vuint32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m4_f32m4))) +vfloat32m4_t vreinterpret_f32m4(vuint32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32m8_f32m8))) +vfloat32m8_t vreinterpret_f32m8(vuint32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u32mf2_f32mf2))) +vfloat32mf2_t vreinterpret_f32mf2(vuint32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m1_i32m1))) +vint32m1_t vreinterpret_i32m1(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m2_i32m2))) +vint32m2_t vreinterpret_i32m2(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m4_i32m4))) +vint32m4_t vreinterpret_i32m4(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m8_i32m8))) +vint32m8_t vreinterpret_i32m8(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32mf2_i32mf2))) +vint32mf2_t vreinterpret_i32mf2(vfloat32mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m1_u32m1))) +vuint32m1_t vreinterpret_u32m1(vfloat32m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m2_u32m2))) +vuint32m2_t vreinterpret_u32m2(vfloat32m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m4_u32m4))) +vuint32m4_t vreinterpret_u32m4(vfloat32m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32m8_u32m8))) +vuint32m8_t vreinterpret_u32m8(vfloat32m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f32mf2_u32mf2))) +vuint32mf2_t vreinterpret_u32mf2(vfloat32mf2_t op0); + +#endif + +#if defined(__riscv_d) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_f64m1_m))) +vfloat64m1_t vlse64(vbool64_t op0, vfloat64m1_t op1, const double * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_f64m2_m))) +vfloat64m2_t vlse64(vbool32_t op0, vfloat64m2_t op1, const double * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_f64m4_m))) +vfloat64m4_t vlse64(vbool16_t op0, vfloat64m4_t op1, const double * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse64_v_f64m8_m))) +vfloat64m8_t vlse64(vbool8_t op0, vfloat64m8_t op1, const double * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m1))) +void vsse64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m1_m))) +void vsse64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m2))) +void vsse64(double * op0, ptrdiff_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m2_m))) +void vsse64(vbool32_t op0, double * op1, ptrdiff_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m4))) +void vsse64(double * op0, ptrdiff_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m4_m))) +void vsse64(vbool16_t op0, double * op1, ptrdiff_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m8))) +void vsse64(double * op0, ptrdiff_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse64_v_f64m8_m))) +void vsse64(vbool8_t op0, double * op1, ptrdiff_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m1))) +vfloat64m1_t vluxei8(const double * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m1_m))) +vfloat64m1_t vluxei8(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m2))) +vfloat64m2_t vluxei8(const double * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m2_m))) +vfloat64m2_t vluxei8(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m4))) +vfloat64m4_t vluxei8(const double * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m4_m))) +vfloat64m4_t vluxei8(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m8))) +vfloat64m8_t vluxei8(const double * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f64m8_m))) +vfloat64m8_t vluxei8(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m1))) +vfloat64m1_t vluxei16(const double * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m1_m))) +vfloat64m1_t vluxei16(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m2))) +vfloat64m2_t vluxei16(const double * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m2_m))) +vfloat64m2_t vluxei16(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m4))) +vfloat64m4_t vluxei16(const double * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m4_m))) +vfloat64m4_t vluxei16(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m8))) +vfloat64m8_t vluxei16(const double * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f64m8_m))) +vfloat64m8_t vluxei16(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m1))) +vfloat64m1_t vluxei32(const double * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m1_m))) +vfloat64m1_t vluxei32(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m2))) +vfloat64m2_t vluxei32(const double * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m2_m))) +vfloat64m2_t vluxei32(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m4))) +vfloat64m4_t vluxei32(const double * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m4_m))) +vfloat64m4_t vluxei32(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m8))) +vfloat64m8_t vluxei32(const double * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f64m8_m))) +vfloat64m8_t vluxei32(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m1))) +vfloat64m1_t vluxei64(const double * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m1_m))) +vfloat64m1_t vluxei64(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m2))) +vfloat64m2_t vluxei64(const double * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m2_m))) +vfloat64m2_t vluxei64(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m4))) +vfloat64m4_t vluxei64(const double * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m4_m))) +vfloat64m4_t vluxei64(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m8))) +vfloat64m8_t vluxei64(const double * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f64m8_m))) +vfloat64m8_t vluxei64(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m1))) +vfloat64m1_t vloxei8(const double * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m1_m))) +vfloat64m1_t vloxei8(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m2))) +vfloat64m2_t vloxei8(const double * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m2_m))) +vfloat64m2_t vloxei8(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m4))) +vfloat64m4_t vloxei8(const double * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m4_m))) +vfloat64m4_t vloxei8(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m8))) +vfloat64m8_t vloxei8(const double * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f64m8_m))) +vfloat64m8_t vloxei8(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m1))) +vfloat64m1_t vloxei16(const double * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m1_m))) +vfloat64m1_t vloxei16(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m2))) +vfloat64m2_t vloxei16(const double * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m2_m))) +vfloat64m2_t vloxei16(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m4))) +vfloat64m4_t vloxei16(const double * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m4_m))) +vfloat64m4_t vloxei16(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m8))) +vfloat64m8_t vloxei16(const double * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f64m8_m))) +vfloat64m8_t vloxei16(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m1))) +vfloat64m1_t vloxei32(const double * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m1_m))) +vfloat64m1_t vloxei32(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m2))) +vfloat64m2_t vloxei32(const double * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m2_m))) +vfloat64m2_t vloxei32(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m4))) +vfloat64m4_t vloxei32(const double * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m4_m))) +vfloat64m4_t vloxei32(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m8))) +vfloat64m8_t vloxei32(const double * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f64m8_m))) +vfloat64m8_t vloxei32(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m1))) +vfloat64m1_t vloxei64(const double * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m1_m))) +vfloat64m1_t vloxei64(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m2))) +vfloat64m2_t vloxei64(const double * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m2_m))) +vfloat64m2_t vloxei64(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m4))) +vfloat64m4_t vloxei64(const double * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m4_m))) +vfloat64m4_t vloxei64(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m8))) +vfloat64m8_t vloxei64(const double * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f64m8_m))) +vfloat64m8_t vloxei64(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f64m1))) +vfloat64m1_t vmv_v(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f64m2))) +vfloat64m2_t vmv_v(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f64m4))) +vfloat64m4_t vmv_v(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f64m8))) +vfloat64m8_t vmv_v(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m1))) +vfloat64m1_t vfadd(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m1_m))) +vfloat64m1_t vfadd(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m2))) +vfloat64m2_t vfadd(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m2_m))) +vfloat64m2_t vfadd(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m4))) +vfloat64m4_t vfadd(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m4_m))) +vfloat64m4_t vfadd(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m8))) +vfloat64m8_t vfadd(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f64m8_m))) +vfloat64m8_t vfadd(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m1))) +vfloat64m1_t vfadd(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m1_m))) +vfloat64m1_t vfadd(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m2))) +vfloat64m2_t vfadd(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m2_m))) +vfloat64m2_t vfadd(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m4))) +vfloat64m4_t vfadd(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m4_m))) +vfloat64m4_t vfadd(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m8))) +vfloat64m8_t vfadd(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f64m8_m))) +vfloat64m8_t vfadd(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m1))) +vfloat64m1_t vfsub(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m1_m))) +vfloat64m1_t vfsub(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m2))) +vfloat64m2_t vfsub(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m2_m))) +vfloat64m2_t vfsub(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m4))) +vfloat64m4_t vfsub(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m4_m))) +vfloat64m4_t vfsub(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m8))) +vfloat64m8_t vfsub(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f64m8_m))) +vfloat64m8_t vfsub(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m1))) +vfloat64m1_t vfsub(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m1_m))) +vfloat64m1_t vfsub(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m2))) +vfloat64m2_t vfsub(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m2_m))) +vfloat64m2_t vfsub(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m4))) +vfloat64m4_t vfsub(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m4_m))) +vfloat64m4_t vfsub(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m8))) +vfloat64m8_t vfsub(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f64m8_m))) +vfloat64m8_t vfsub(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m1))) +vfloat64m1_t vfrsub(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m1_m))) +vfloat64m1_t vfrsub(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m2))) +vfloat64m2_t vfrsub(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m2_m))) +vfloat64m2_t vfrsub(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m4))) +vfloat64m4_t vfrsub(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m4_m))) +vfloat64m4_t vfrsub(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m8))) +vfloat64m8_t vfrsub(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f64m8_m))) +vfloat64m8_t vfrsub(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m1))) +vfloat64m1_t vfmul(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m1_m))) +vfloat64m1_t vfmul(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m2))) +vfloat64m2_t vfmul(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m2_m))) +vfloat64m2_t vfmul(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m4))) +vfloat64m4_t vfmul(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m4_m))) +vfloat64m4_t vfmul(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m8))) +vfloat64m8_t vfmul(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f64m8_m))) +vfloat64m8_t vfmul(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m1))) +vfloat64m1_t vfmul(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m1_m))) +vfloat64m1_t vfmul(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m2))) +vfloat64m2_t vfmul(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m2_m))) +vfloat64m2_t vfmul(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m4))) +vfloat64m4_t vfmul(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m4_m))) +vfloat64m4_t vfmul(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m8))) +vfloat64m8_t vfmul(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f64m8_m))) +vfloat64m8_t vfmul(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m1))) +vfloat64m1_t vfdiv(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m1_m))) +vfloat64m1_t vfdiv(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m2))) +vfloat64m2_t vfdiv(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m2_m))) +vfloat64m2_t vfdiv(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m4))) +vfloat64m4_t vfdiv(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m4_m))) +vfloat64m4_t vfdiv(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m8))) +vfloat64m8_t vfdiv(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f64m8_m))) +vfloat64m8_t vfdiv(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m1))) +vfloat64m1_t vfdiv(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m1_m))) +vfloat64m1_t vfdiv(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m2))) +vfloat64m2_t vfdiv(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m2_m))) +vfloat64m2_t vfdiv(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m4))) +vfloat64m4_t vfdiv(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m4_m))) +vfloat64m4_t vfdiv(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m8))) +vfloat64m8_t vfdiv(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f64m8_m))) +vfloat64m8_t vfdiv(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m1))) +vfloat64m1_t vfrdiv(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m1_m))) +vfloat64m1_t vfrdiv(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m2))) +vfloat64m2_t vfrdiv(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m2_m))) +vfloat64m2_t vfrdiv(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m4))) +vfloat64m4_t vfrdiv(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m4_m))) +vfloat64m4_t vfrdiv(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m8))) +vfloat64m8_t vfrdiv(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f64m8_m))) +vfloat64m8_t vfrdiv(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m1))) +vfloat64m1_t vfmacc(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m1_m))) +vfloat64m1_t vfmacc(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m2))) +vfloat64m2_t vfmacc(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m2_m))) +vfloat64m2_t vfmacc(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m4))) +vfloat64m4_t vfmacc(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m4_m))) +vfloat64m4_t vfmacc(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m8))) +vfloat64m8_t vfmacc(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f64m8_m))) +vfloat64m8_t vfmacc(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m1))) +vfloat64m1_t vfmacc(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m1_m))) +vfloat64m1_t vfmacc(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m2))) +vfloat64m2_t vfmacc(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m2_m))) +vfloat64m2_t vfmacc(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m4))) +vfloat64m4_t vfmacc(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m4_m))) +vfloat64m4_t vfmacc(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m8))) +vfloat64m8_t vfmacc(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f64m8_m))) +vfloat64m8_t vfmacc(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m1))) +vfloat64m1_t vfnmacc(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m1_m))) +vfloat64m1_t vfnmacc(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m2))) +vfloat64m2_t vfnmacc(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m2_m))) +vfloat64m2_t vfnmacc(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m4))) +vfloat64m4_t vfnmacc(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m4_m))) +vfloat64m4_t vfnmacc(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m8))) +vfloat64m8_t vfnmacc(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f64m8_m))) +vfloat64m8_t vfnmacc(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m1))) +vfloat64m1_t vfnmacc(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m1_m))) +vfloat64m1_t vfnmacc(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m2))) +vfloat64m2_t vfnmacc(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m2_m))) +vfloat64m2_t vfnmacc(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m4))) +vfloat64m4_t vfnmacc(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m4_m))) +vfloat64m4_t vfnmacc(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m8))) +vfloat64m8_t vfnmacc(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f64m8_m))) +vfloat64m8_t vfnmacc(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m1))) +vfloat64m1_t vfmsac(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m1_m))) +vfloat64m1_t vfmsac(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m2))) +vfloat64m2_t vfmsac(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m2_m))) +vfloat64m2_t vfmsac(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m4))) +vfloat64m4_t vfmsac(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m4_m))) +vfloat64m4_t vfmsac(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m8))) +vfloat64m8_t vfmsac(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f64m8_m))) +vfloat64m8_t vfmsac(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m1))) +vfloat64m1_t vfmsac(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m1_m))) +vfloat64m1_t vfmsac(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m2))) +vfloat64m2_t vfmsac(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m2_m))) +vfloat64m2_t vfmsac(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m4))) +vfloat64m4_t vfmsac(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m4_m))) +vfloat64m4_t vfmsac(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m8))) +vfloat64m8_t vfmsac(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f64m8_m))) +vfloat64m8_t vfmsac(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m1))) +vfloat64m1_t vfnmsac(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m1_m))) +vfloat64m1_t vfnmsac(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m2))) +vfloat64m2_t vfnmsac(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m2_m))) +vfloat64m2_t vfnmsac(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m4))) +vfloat64m4_t vfnmsac(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m4_m))) +vfloat64m4_t vfnmsac(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m8))) +vfloat64m8_t vfnmsac(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f64m8_m))) +vfloat64m8_t vfnmsac(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m1))) +vfloat64m1_t vfnmsac(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m1_m))) +vfloat64m1_t vfnmsac(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m2))) +vfloat64m2_t vfnmsac(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m2_m))) +vfloat64m2_t vfnmsac(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m4))) +vfloat64m4_t vfnmsac(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m4_m))) +vfloat64m4_t vfnmsac(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m8))) +vfloat64m8_t vfnmsac(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f64m8_m))) +vfloat64m8_t vfnmsac(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m1))) +vfloat64m1_t vfmadd(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m1_m))) +vfloat64m1_t vfmadd(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m2))) +vfloat64m2_t vfmadd(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m2_m))) +vfloat64m2_t vfmadd(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m4))) +vfloat64m4_t vfmadd(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m4_m))) +vfloat64m4_t vfmadd(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m8))) +vfloat64m8_t vfmadd(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f64m8_m))) +vfloat64m8_t vfmadd(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m1))) +vfloat64m1_t vfmadd(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m1_m))) +vfloat64m1_t vfmadd(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m2))) +vfloat64m2_t vfmadd(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m2_m))) +vfloat64m2_t vfmadd(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m4))) +vfloat64m4_t vfmadd(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m4_m))) +vfloat64m4_t vfmadd(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m8))) +vfloat64m8_t vfmadd(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f64m8_m))) +vfloat64m8_t vfmadd(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m1))) +vfloat64m1_t vfnmadd(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m1_m))) +vfloat64m1_t vfnmadd(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m2))) +vfloat64m2_t vfnmadd(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m2_m))) +vfloat64m2_t vfnmadd(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m4))) +vfloat64m4_t vfnmadd(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m4_m))) +vfloat64m4_t vfnmadd(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m8))) +vfloat64m8_t vfnmadd(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f64m8_m))) +vfloat64m8_t vfnmadd(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m1))) +vfloat64m1_t vfnmadd(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m1_m))) +vfloat64m1_t vfnmadd(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m2))) +vfloat64m2_t vfnmadd(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m2_m))) +vfloat64m2_t vfnmadd(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m4))) +vfloat64m4_t vfnmadd(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m4_m))) +vfloat64m4_t vfnmadd(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m8))) +vfloat64m8_t vfnmadd(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f64m8_m))) +vfloat64m8_t vfnmadd(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m1))) +vfloat64m1_t vfmsub(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m1_m))) +vfloat64m1_t vfmsub(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m2))) +vfloat64m2_t vfmsub(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m2_m))) +vfloat64m2_t vfmsub(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m4))) +vfloat64m4_t vfmsub(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m4_m))) +vfloat64m4_t vfmsub(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m8))) +vfloat64m8_t vfmsub(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f64m8_m))) +vfloat64m8_t vfmsub(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m1))) +vfloat64m1_t vfmsub(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m1_m))) +vfloat64m1_t vfmsub(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m2))) +vfloat64m2_t vfmsub(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m2_m))) +vfloat64m2_t vfmsub(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m4))) +vfloat64m4_t vfmsub(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m4_m))) +vfloat64m4_t vfmsub(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m8))) +vfloat64m8_t vfmsub(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f64m8_m))) +vfloat64m8_t vfmsub(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m1))) +vfloat64m1_t vfnmsub(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m1_m))) +vfloat64m1_t vfnmsub(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m2))) +vfloat64m2_t vfnmsub(vfloat64m2_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m2_m))) +vfloat64m2_t vfnmsub(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m4))) +vfloat64m4_t vfnmsub(vfloat64m4_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m4_m))) +vfloat64m4_t vfnmsub(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m8))) +vfloat64m8_t vfnmsub(vfloat64m8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f64m8_m))) +vfloat64m8_t vfnmsub(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m1))) +vfloat64m1_t vfnmsub(vfloat64m1_t op0, double op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m1_m))) +vfloat64m1_t vfnmsub(vbool64_t op0, vfloat64m1_t op1, double op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m2))) +vfloat64m2_t vfnmsub(vfloat64m2_t op0, double op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m2_m))) +vfloat64m2_t vfnmsub(vbool32_t op0, vfloat64m2_t op1, double op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m4))) +vfloat64m4_t vfnmsub(vfloat64m4_t op0, double op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m4_m))) +vfloat64m4_t vfnmsub(vbool16_t op0, vfloat64m4_t op1, double op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m8))) +vfloat64m8_t vfnmsub(vfloat64m8_t op0, double op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f64m8_m))) +vfloat64m8_t vfnmsub(vbool8_t op0, vfloat64m8_t op1, double op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m1))) +vfloat64m1_t vfmin(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m1_m))) +vfloat64m1_t vfmin(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m2))) +vfloat64m2_t vfmin(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m2_m))) +vfloat64m2_t vfmin(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m4))) +vfloat64m4_t vfmin(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m4_m))) +vfloat64m4_t vfmin(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m8))) +vfloat64m8_t vfmin(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f64m8_m))) +vfloat64m8_t vfmin(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m1))) +vfloat64m1_t vfmin(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m1_m))) +vfloat64m1_t vfmin(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m2))) +vfloat64m2_t vfmin(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m2_m))) +vfloat64m2_t vfmin(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m4))) +vfloat64m4_t vfmin(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m4_m))) +vfloat64m4_t vfmin(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m8))) +vfloat64m8_t vfmin(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f64m8_m))) +vfloat64m8_t vfmin(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m1))) +vfloat64m1_t vfmax(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m1_m))) +vfloat64m1_t vfmax(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m2))) +vfloat64m2_t vfmax(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m2_m))) +vfloat64m2_t vfmax(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m4))) +vfloat64m4_t vfmax(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m4_m))) +vfloat64m4_t vfmax(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m8))) +vfloat64m8_t vfmax(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f64m8_m))) +vfloat64m8_t vfmax(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m1))) +vfloat64m1_t vfmax(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m1_m))) +vfloat64m1_t vfmax(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m2))) +vfloat64m2_t vfmax(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m2_m))) +vfloat64m2_t vfmax(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m4))) +vfloat64m4_t vfmax(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m4_m))) +vfloat64m4_t vfmax(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m8))) +vfloat64m8_t vfmax(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f64m8_m))) +vfloat64m8_t vfmax(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m1))) +vfloat64m1_t vfsgnj(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m1_m))) +vfloat64m1_t vfsgnj(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m2))) +vfloat64m2_t vfsgnj(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m2_m))) +vfloat64m2_t vfsgnj(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m4))) +vfloat64m4_t vfsgnj(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m4_m))) +vfloat64m4_t vfsgnj(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m8))) +vfloat64m8_t vfsgnj(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f64m8_m))) +vfloat64m8_t vfsgnj(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m1))) +vfloat64m1_t vfsgnj(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m1_m))) +vfloat64m1_t vfsgnj(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m2))) +vfloat64m2_t vfsgnj(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m2_m))) +vfloat64m2_t vfsgnj(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m4))) +vfloat64m4_t vfsgnj(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m4_m))) +vfloat64m4_t vfsgnj(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m8))) +vfloat64m8_t vfsgnj(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f64m8_m))) +vfloat64m8_t vfsgnj(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m1))) +vfloat64m1_t vfsgnjn(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m1_m))) +vfloat64m1_t vfsgnjn(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m2))) +vfloat64m2_t vfsgnjn(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m2_m))) +vfloat64m2_t vfsgnjn(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m4))) +vfloat64m4_t vfsgnjn(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m4_m))) +vfloat64m4_t vfsgnjn(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m8))) +vfloat64m8_t vfsgnjn(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f64m8_m))) +vfloat64m8_t vfsgnjn(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m1))) +vfloat64m1_t vfsgnjn(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m1_m))) +vfloat64m1_t vfsgnjn(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m2))) +vfloat64m2_t vfsgnjn(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m2_m))) +vfloat64m2_t vfsgnjn(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m4))) +vfloat64m4_t vfsgnjn(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m4_m))) +vfloat64m4_t vfsgnjn(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m8))) +vfloat64m8_t vfsgnjn(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f64m8_m))) +vfloat64m8_t vfsgnjn(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m1))) +vfloat64m1_t vfsgnjx(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m1_m))) +vfloat64m1_t vfsgnjx(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m2))) +vfloat64m2_t vfsgnjx(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m2_m))) +vfloat64m2_t vfsgnjx(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m4))) +vfloat64m4_t vfsgnjx(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m4_m))) +vfloat64m4_t vfsgnjx(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m8))) +vfloat64m8_t vfsgnjx(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f64m8_m))) +vfloat64m8_t vfsgnjx(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m1))) +vfloat64m1_t vfsgnjx(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m1_m))) +vfloat64m1_t vfsgnjx(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m2))) +vfloat64m2_t vfsgnjx(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m2_m))) +vfloat64m2_t vfsgnjx(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m4))) +vfloat64m4_t vfsgnjx(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m4_m))) +vfloat64m4_t vfsgnjx(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m8))) +vfloat64m8_t vfsgnjx(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f64m8_m))) +vfloat64m8_t vfsgnjx(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m1))) +vfloat64m1_t vfabs(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m1_m))) +vfloat64m1_t vfabs(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m2))) +vfloat64m2_t vfabs(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m2_m))) +vfloat64m2_t vfabs(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m4))) +vfloat64m4_t vfabs(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m4_m))) +vfloat64m4_t vfabs(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m8))) +vfloat64m8_t vfabs(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f64m8_m))) +vfloat64m8_t vfabs(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m1_b64))) +vbool64_t vmfeq(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m1_b64_m))) +vbool64_t vmfeq(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m2_b32))) +vbool32_t vmfeq(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m2_b32_m))) +vbool32_t vmfeq(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m4_b16))) +vbool16_t vmfeq(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m4_b16_m))) +vbool16_t vmfeq(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m8_b8))) +vbool8_t vmfeq(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f64m8_b8_m))) +vbool8_t vmfeq(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m1_b64))) +vbool64_t vmfeq(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m1_b64_m))) +vbool64_t vmfeq(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m2_b32))) +vbool32_t vmfeq(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m2_b32_m))) +vbool32_t vmfeq(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m4_b16))) +vbool16_t vmfeq(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m4_b16_m))) +vbool16_t vmfeq(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m8_b8))) +vbool8_t vmfeq(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f64m8_b8_m))) +vbool8_t vmfeq(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m1_b64))) +vbool64_t vmfne(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m1_b64_m))) +vbool64_t vmfne(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m2_b32))) +vbool32_t vmfne(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m2_b32_m))) +vbool32_t vmfne(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m4_b16))) +vbool16_t vmfne(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m4_b16_m))) +vbool16_t vmfne(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m8_b8))) +vbool8_t vmfne(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f64m8_b8_m))) +vbool8_t vmfne(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m1_b64))) +vbool64_t vmfne(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m1_b64_m))) +vbool64_t vmfne(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m2_b32))) +vbool32_t vmfne(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m2_b32_m))) +vbool32_t vmfne(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m4_b16))) +vbool16_t vmfne(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m4_b16_m))) +vbool16_t vmfne(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m8_b8))) +vbool8_t vmfne(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f64m8_b8_m))) +vbool8_t vmfne(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m1_b64))) +vbool64_t vmflt(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m1_b64_m))) +vbool64_t vmflt(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m2_b32))) +vbool32_t vmflt(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m2_b32_m))) +vbool32_t vmflt(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m4_b16))) +vbool16_t vmflt(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m4_b16_m))) +vbool16_t vmflt(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m8_b8))) +vbool8_t vmflt(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f64m8_b8_m))) +vbool8_t vmflt(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m1_b64))) +vbool64_t vmflt(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m1_b64_m))) +vbool64_t vmflt(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m2_b32))) +vbool32_t vmflt(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m2_b32_m))) +vbool32_t vmflt(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m4_b16))) +vbool16_t vmflt(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m4_b16_m))) +vbool16_t vmflt(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m8_b8))) +vbool8_t vmflt(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f64m8_b8_m))) +vbool8_t vmflt(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m1_b64))) +vbool64_t vmfle(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m1_b64_m))) +vbool64_t vmfle(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m2_b32))) +vbool32_t vmfle(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m2_b32_m))) +vbool32_t vmfle(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m4_b16))) +vbool16_t vmfle(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m4_b16_m))) +vbool16_t vmfle(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m8_b8))) +vbool8_t vmfle(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f64m8_b8_m))) +vbool8_t vmfle(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m1_b64))) +vbool64_t vmfle(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m1_b64_m))) +vbool64_t vmfle(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m2_b32))) +vbool32_t vmfle(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m2_b32_m))) +vbool32_t vmfle(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m4_b16))) +vbool16_t vmfle(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m4_b16_m))) +vbool16_t vmfle(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m8_b8))) +vbool8_t vmfle(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f64m8_b8_m))) +vbool8_t vmfle(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m1_b64))) +vbool64_t vmfgt(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m1_b64_m))) +vbool64_t vmfgt(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m2_b32))) +vbool32_t vmfgt(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m2_b32_m))) +vbool32_t vmfgt(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m4_b16))) +vbool16_t vmfgt(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m4_b16_m))) +vbool16_t vmfgt(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m8_b8))) +vbool8_t vmfgt(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f64m8_b8_m))) +vbool8_t vmfgt(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m1_b64))) +vbool64_t vmfgt(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m1_b64_m))) +vbool64_t vmfgt(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m2_b32))) +vbool32_t vmfgt(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m2_b32_m))) +vbool32_t vmfgt(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m4_b16))) +vbool16_t vmfgt(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m4_b16_m))) +vbool16_t vmfgt(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m8_b8))) +vbool8_t vmfgt(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f64m8_b8_m))) +vbool8_t vmfgt(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m1_b64))) +vbool64_t vmfge(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m1_b64_m))) +vbool64_t vmfge(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m2_b32))) +vbool32_t vmfge(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m2_b32_m))) +vbool32_t vmfge(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m4_b16))) +vbool16_t vmfge(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m4_b16_m))) +vbool16_t vmfge(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m8_b8))) +vbool8_t vmfge(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f64m8_b8_m))) +vbool8_t vmfge(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m1_b64))) +vbool64_t vmfge(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m1_b64_m))) +vbool64_t vmfge(vbool64_t op0, vbool64_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m2_b32))) +vbool32_t vmfge(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m2_b32_m))) +vbool32_t vmfge(vbool32_t op0, vbool32_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m4_b16))) +vbool16_t vmfge(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m4_b16_m))) +vbool16_t vmfge(vbool16_t op0, vbool16_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m8_b8))) +vbool8_t vmfge(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f64m8_b8_m))) +vbool8_t vmfge(vbool8_t op0, vbool8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f64m1))) +vfloat64m1_t vmerge(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f64m2))) +vfloat64m2_t vmerge(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f64m4))) +vfloat64m4_t vmerge(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f64m8))) +vfloat64m8_t vmerge(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f64m1))) +vfloat64m1_t vfmerge(vbool64_t op0, vfloat64m1_t op1, double op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f64m2))) +vfloat64m2_t vfmerge(vbool32_t op0, vfloat64m2_t op1, double op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f64m4))) +vfloat64m4_t vfmerge(vbool16_t op0, vfloat64m4_t op1, double op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f64m8))) +vfloat64m8_t vfmerge(vbool8_t op0, vfloat64m8_t op1, double op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m1_f64m1))) +vfloat64m1_t vfredmax(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m1_f64m1_m))) +vfloat64m1_t vfredmax(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m2_f64m1))) +vfloat64m1_t vfredmax(vfloat64m1_t op0, vfloat64m2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m2_f64m1_m))) +vfloat64m1_t vfredmax(vbool32_t op0, vfloat64m1_t op1, vfloat64m2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m4_f64m1))) +vfloat64m1_t vfredmax(vfloat64m1_t op0, vfloat64m4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m4_f64m1_m))) +vfloat64m1_t vfredmax(vbool16_t op0, vfloat64m1_t op1, vfloat64m4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m8_f64m1))) +vfloat64m1_t vfredmax(vfloat64m1_t op0, vfloat64m8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f64m8_f64m1_m))) +vfloat64m1_t vfredmax(vbool8_t op0, vfloat64m1_t op1, vfloat64m8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m1_f64m1))) +vfloat64m1_t vfredmin(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m1_f64m1_m))) +vfloat64m1_t vfredmin(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m2_f64m1))) +vfloat64m1_t vfredmin(vfloat64m1_t op0, vfloat64m2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m2_f64m1_m))) +vfloat64m1_t vfredmin(vbool32_t op0, vfloat64m1_t op1, vfloat64m2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m4_f64m1))) +vfloat64m1_t vfredmin(vfloat64m1_t op0, vfloat64m4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m4_f64m1_m))) +vfloat64m1_t vfredmin(vbool16_t op0, vfloat64m1_t op1, vfloat64m4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m8_f64m1))) +vfloat64m1_t vfredmin(vfloat64m1_t op0, vfloat64m8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f64m8_f64m1_m))) +vfloat64m1_t vfredmin(vbool8_t op0, vfloat64m1_t op1, vfloat64m8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m1_f64m1))) +vfloat64m1_t vfredsum(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m1_f64m1_m))) +vfloat64m1_t vfredsum(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m2_f64m1))) +vfloat64m1_t vfredsum(vfloat64m1_t op0, vfloat64m2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m2_f64m1_m))) +vfloat64m1_t vfredsum(vbool32_t op0, vfloat64m1_t op1, vfloat64m2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m4_f64m1))) +vfloat64m1_t vfredsum(vfloat64m1_t op0, vfloat64m4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m4_f64m1_m))) +vfloat64m1_t vfredsum(vbool16_t op0, vfloat64m1_t op1, vfloat64m4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m8_f64m1))) +vfloat64m1_t vfredsum(vfloat64m1_t op0, vfloat64m8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f64m8_f64m1_m))) +vfloat64m1_t vfredsum(vbool8_t op0, vfloat64m1_t op1, vfloat64m8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m1_f64m1))) +vfloat64m1_t vfredosum(vfloat64m1_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m1_f64m1_m))) +vfloat64m1_t vfredosum(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m2_f64m1))) +vfloat64m1_t vfredosum(vfloat64m1_t op0, vfloat64m2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m2_f64m1_m))) +vfloat64m1_t vfredosum(vbool32_t op0, vfloat64m1_t op1, vfloat64m2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m4_f64m1))) +vfloat64m1_t vfredosum(vfloat64m1_t op0, vfloat64m4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m4_f64m1_m))) +vfloat64m1_t vfredosum(vbool16_t op0, vfloat64m1_t op1, vfloat64m4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m8_f64m1))) +vfloat64m1_t vfredosum(vfloat64m1_t op0, vfloat64m8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f64m8_f64m1_m))) +vfloat64m1_t vfredosum(vbool8_t op0, vfloat64m1_t op1, vfloat64m8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f64m1_f64))) +double vfmv_f(vfloat64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f64m2_f64))) +double vfmv_f(vfloat64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f64m4_f64))) +double vfmv_f(vfloat64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f64m8_f64))) +double vfmv_f(vfloat64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f64m1))) +vfloat64m1_t vfmv_s(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f64m2))) +vfloat64m2_t vfmv_s(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f64m4))) +vfloat64m4_t vfmv_s(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f64m8))) +vfloat64m8_t vfmv_s(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m1))) +vfloat64m1_t vslideup(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m1_m))) +vfloat64m1_t vslideup(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m2))) +vfloat64m2_t vslideup(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m2_m))) +vfloat64m2_t vslideup(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m4))) +vfloat64m4_t vslideup(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m4_m))) +vfloat64m4_t vslideup(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m8))) +vfloat64m8_t vslideup(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f64m8_m))) +vfloat64m8_t vslideup(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m1))) +vfloat64m1_t vslidedown(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m1_m))) +vfloat64m1_t vslidedown(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m2))) +vfloat64m2_t vslidedown(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m2_m))) +vfloat64m2_t vslidedown(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m4))) +vfloat64m4_t vslidedown(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m4_m))) +vfloat64m4_t vslidedown(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m8))) +vfloat64m8_t vslidedown(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f64m8_m))) +vfloat64m8_t vslidedown(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m1))) +vfloat64m1_t vfslide1up(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m1_m))) +vfloat64m1_t vfslide1up(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m2))) +vfloat64m2_t vfslide1up(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m2_m))) +vfloat64m2_t vfslide1up(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m4))) +vfloat64m4_t vfslide1up(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m4_m))) +vfloat64m4_t vfslide1up(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m8))) +vfloat64m8_t vfslide1up(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f64m8_m))) +vfloat64m8_t vfslide1up(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m1))) +vfloat64m1_t vfslide1down(vfloat64m1_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m1_m))) +vfloat64m1_t vfslide1down(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m2))) +vfloat64m2_t vfslide1down(vfloat64m2_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m2_m))) +vfloat64m2_t vfslide1down(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m4))) +vfloat64m4_t vfslide1down(vfloat64m4_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m4_m))) +vfloat64m4_t vfslide1down(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m8))) +vfloat64m8_t vfslide1down(vfloat64m8_t op0, double op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f64m8_m))) +vfloat64m8_t vfslide1down(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m1))) +vfloat64m1_t vrgather(vfloat64m1_t op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m1_m))) +vfloat64m1_t vrgather(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m2))) +vfloat64m2_t vrgather(vfloat64m2_t op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m2_m))) +vfloat64m2_t vrgather(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m4))) +vfloat64m4_t vrgather(vfloat64m4_t op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m4_m))) +vfloat64m4_t vrgather(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m8))) +vfloat64m8_t vrgather(vfloat64m8_t op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f64m8_m))) +vfloat64m8_t vrgather(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m1))) +vfloat64m1_t vrgather(vfloat64m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m1_m))) +vfloat64m1_t vrgather(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m2))) +vfloat64m2_t vrgather(vfloat64m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m2_m))) +vfloat64m2_t vrgather(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m4))) +vfloat64m4_t vrgather(vfloat64m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m4_m))) +vfloat64m4_t vrgather(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m8))) +vfloat64m8_t vrgather(vfloat64m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f64m8_m))) +vfloat64m8_t vrgather(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m1))) +vfloat64m1_t vrgatherei16(vfloat64m1_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m1_m))) +vfloat64m1_t vrgatherei16(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m2))) +vfloat64m2_t vrgatherei16(vfloat64m2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m2_m))) +vfloat64m2_t vrgatherei16(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m4))) +vfloat64m4_t vrgatherei16(vfloat64m4_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m4_m))) +vfloat64m4_t vrgatherei16(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m8))) +vfloat64m8_t vrgatherei16(vfloat64m8_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f64m8_m))) +vfloat64m8_t vrgatherei16(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f64m1))) +vfloat64m1_t vcompress(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f64m2))) +vfloat64m2_t vcompress(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f64m4))) +vfloat64m4_t vcompress(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f64m8))) +vfloat64m8_t vcompress(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f64m2_f64m1))) +vfloat64m1_t vget_f64m1(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f64m4_f64m1))) +vfloat64m1_t vget_f64m1(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f64m8_f64m1))) +vfloat64m1_t vget_f64m1(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f64m4_f64m2))) +vfloat64m2_t vget_f64m2(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f64m8_f64m2))) +vfloat64m2_t vget_f64m2(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vget_v_f64m8_f64m4))) +vfloat64m4_t vget_f64m4(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f64m1_f64m2))) +vfloat64m2_t vset(vfloat64m2_t op0, size_t op1, vfloat64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f64m1_f64m4))) +vfloat64m4_t vset(vfloat64m4_t op0, size_t op1, vfloat64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f64m2_f64m4))) +vfloat64m4_t vset(vfloat64m4_t op0, size_t op1, vfloat64m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f64m1_f64m8))) +vfloat64m8_t vset(vfloat64m8_t op0, size_t op1, vfloat64m1_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f64m2_f64m8))) +vfloat64m8_t vset(vfloat64m8_t op0, size_t op1, vfloat64m2_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vset_v_f64m4_f64m8))) +vfloat64m8_t vset(vfloat64m8_t op0, size_t op1, vfloat64m4_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m1))) +void vsuxei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m1_m))) +void vsuxei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m2))) +void vsuxei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m2_m))) +void vsuxei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m4))) +void vsuxei8(double * op0, vuint8mf2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m4_m))) +void vsuxei8(vbool16_t op0, double * op1, vuint8mf2_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m8))) +void vsuxei8(double * op0, vuint8m1_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f64m8_m))) +void vsuxei8(vbool8_t op0, double * op1, vuint8m1_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m1))) +void vsuxei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m1_m))) +void vsuxei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m2))) +void vsuxei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m2_m))) +void vsuxei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m4))) +void vsuxei16(double * op0, vuint16m1_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m4_m))) +void vsuxei16(vbool16_t op0, double * op1, vuint16m1_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m8))) +void vsuxei16(double * op0, vuint16m2_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f64m8_m))) +void vsuxei16(vbool8_t op0, double * op1, vuint16m2_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m1))) +void vsuxei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m1_m))) +void vsuxei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m2))) +void vsuxei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m2_m))) +void vsuxei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m4))) +void vsuxei32(double * op0, vuint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m4_m))) +void vsuxei32(vbool16_t op0, double * op1, vuint32m2_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m8))) +void vsuxei32(double * op0, vuint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f64m8_m))) +void vsuxei32(vbool8_t op0, double * op1, vuint32m4_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m1))) +void vsuxei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m1_m))) +void vsuxei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m2))) +void vsuxei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m2_m))) +void vsuxei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m4))) +void vsuxei64(double * op0, vuint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m4_m))) +void vsuxei64(vbool16_t op0, double * op1, vuint64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m8))) +void vsuxei64(double * op0, vuint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f64m8_m))) +void vsuxei64(vbool8_t op0, double * op1, vuint64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m1))) +void vsoxei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m1_m))) +void vsoxei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m2))) +void vsoxei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m2_m))) +void vsoxei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m4))) +void vsoxei8(double * op0, vuint8mf2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m4_m))) +void vsoxei8(vbool16_t op0, double * op1, vuint8mf2_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m8))) +void vsoxei8(double * op0, vuint8m1_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f64m8_m))) +void vsoxei8(vbool8_t op0, double * op1, vuint8m1_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m1))) +void vsoxei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m1_m))) +void vsoxei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m2))) +void vsoxei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m2_m))) +void vsoxei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m4))) +void vsoxei16(double * op0, vuint16m1_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m4_m))) +void vsoxei16(vbool16_t op0, double * op1, vuint16m1_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m8))) +void vsoxei16(double * op0, vuint16m2_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f64m8_m))) +void vsoxei16(vbool8_t op0, double * op1, vuint16m2_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m1))) +void vsoxei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m1_m))) +void vsoxei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m2))) +void vsoxei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m2_m))) +void vsoxei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m4))) +void vsoxei32(double * op0, vuint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m4_m))) +void vsoxei32(vbool16_t op0, double * op1, vuint32m2_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m8))) +void vsoxei32(double * op0, vuint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f64m8_m))) +void vsoxei32(vbool8_t op0, double * op1, vuint32m4_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m1))) +void vsoxei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m1_m))) +void vsoxei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m2))) +void vsoxei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m2_m))) +void vsoxei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m4))) +void vsoxei64(double * op0, vuint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m4_m))) +void vsoxei64(vbool16_t op0, double * op1, vuint64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m8))) +void vsoxei64(double * op0, vuint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f64m8_m))) +void vsoxei64(vbool8_t op0, double * op1, vuint64m8_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_f64m1_m))) +vfloat64m1_t vle64ff(vbool64_t op0, vfloat64m1_t op1, const double * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_f64m2_m))) +vfloat64m2_t vle64ff(vbool32_t op0, vfloat64m2_t op1, const double * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_f64m4_m))) +vfloat64m4_t vle64ff(vbool16_t op0, vfloat64m4_t op1, const double * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64ff_v_f64m8_m))) +vfloat64m8_t vle64ff(vbool8_t op0, vfloat64m8_t op1, const double * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m1))) +vfloat64m1_t vfneg(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m1_m))) +vfloat64m1_t vfneg(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m2))) +vfloat64m2_t vfneg(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m2_m))) +vfloat64m2_t vfneg(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m4))) +vfloat64m4_t vfneg(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m4_m))) +vfloat64m4_t vfneg(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m8))) +vfloat64m8_t vfneg(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f64m8_m))) +vfloat64m8_t vfneg(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_f64m1_m))) +vfloat64m1_t vle64(vbool64_t op0, vfloat64m1_t op1, const double * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_f64m2_m))) +vfloat64m2_t vle64(vbool32_t op0, vfloat64m2_t op1, const double * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_f64m4_m))) +vfloat64m4_t vle64(vbool16_t op0, vfloat64m4_t op1, const double * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle64_v_f64m8_m))) +vfloat64m8_t vle64(vbool8_t op0, vfloat64m8_t op1, const double * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m1))) +void vse64(double * op0, vfloat64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m1_m))) +void vse64(vbool64_t op0, double * op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m2))) +void vse64(double * op0, vfloat64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m2_m))) +void vse64(vbool32_t op0, double * op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m4))) +void vse64(double * op0, vfloat64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m4_m))) +void vse64(vbool16_t op0, double * op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m8))) +void vse64(double * op0, vfloat64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse64_v_f64m8_m))) +void vse64(vbool8_t op0, double * op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m1))) +vuint64m1_t vfclass(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m1_m))) +vuint64m1_t vfclass(vbool64_t op0, vuint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m2))) +vuint64m2_t vfclass(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m2_m))) +vuint64m2_t vfclass(vbool32_t op0, vuint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m4))) +vuint64m4_t vfclass(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m4_m))) +vuint64m4_t vfclass(vbool16_t op0, vuint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m8))) +vuint64m8_t vfclass(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u64m8_m))) +vuint64m8_t vfclass(vbool8_t op0, vuint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m1))) +vfloat64m1_t vfcvt_f(vint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m1_m))) +vfloat64m1_t vfcvt_f(vbool64_t op0, vfloat64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m2))) +vfloat64m2_t vfcvt_f(vint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m2_m))) +vfloat64m2_t vfcvt_f(vbool32_t op0, vfloat64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m4))) +vfloat64m4_t vfcvt_f(vint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m4_m))) +vfloat64m4_t vfcvt_f(vbool16_t op0, vfloat64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m8))) +vfloat64m8_t vfcvt_f(vint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f64m8_m))) +vfloat64m8_t vfcvt_f(vbool8_t op0, vfloat64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m1))) +vfloat64m1_t vfcvt_f(vuint64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m1_m))) +vfloat64m1_t vfcvt_f(vbool64_t op0, vfloat64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m2))) +vfloat64m2_t vfcvt_f(vuint64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m2_m))) +vfloat64m2_t vfcvt_f(vbool32_t op0, vfloat64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m4))) +vfloat64m4_t vfcvt_f(vuint64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m4_m))) +vfloat64m4_t vfcvt_f(vbool16_t op0, vfloat64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m8))) +vfloat64m8_t vfcvt_f(vuint64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f64m8_m))) +vfloat64m8_t vfcvt_f(vbool8_t op0, vfloat64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m1))) +vint64m1_t vfcvt_rtz_x(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m1_m))) +vint64m1_t vfcvt_rtz_x(vbool64_t op0, vint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m2))) +vint64m2_t vfcvt_rtz_x(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m2_m))) +vint64m2_t vfcvt_rtz_x(vbool32_t op0, vint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m4))) +vint64m4_t vfcvt_rtz_x(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m4_m))) +vint64m4_t vfcvt_rtz_x(vbool16_t op0, vint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m8))) +vint64m8_t vfcvt_rtz_x(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i64m8_m))) +vint64m8_t vfcvt_rtz_x(vbool8_t op0, vint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m1))) +vuint64m1_t vfcvt_rtz_xu(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m1_m))) +vuint64m1_t vfcvt_rtz_xu(vbool64_t op0, vuint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m2))) +vuint64m2_t vfcvt_rtz_xu(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m2_m))) +vuint64m2_t vfcvt_rtz_xu(vbool32_t op0, vuint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m4))) +vuint64m4_t vfcvt_rtz_xu(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m4_m))) +vuint64m4_t vfcvt_rtz_xu(vbool16_t op0, vuint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m8))) +vuint64m8_t vfcvt_rtz_xu(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u64m8_m))) +vuint64m8_t vfcvt_rtz_xu(vbool8_t op0, vuint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m1))) +vint64m1_t vfcvt_x(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m1_m))) +vint64m1_t vfcvt_x(vbool64_t op0, vint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m2))) +vint64m2_t vfcvt_x(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m2_m))) +vint64m2_t vfcvt_x(vbool32_t op0, vint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m4))) +vint64m4_t vfcvt_x(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m4_m))) +vint64m4_t vfcvt_x(vbool16_t op0, vint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m8))) +vint64m8_t vfcvt_x(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i64m8_m))) +vint64m8_t vfcvt_x(vbool8_t op0, vint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m1))) +vuint64m1_t vfcvt_xu(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m1_m))) +vuint64m1_t vfcvt_xu(vbool64_t op0, vuint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m2))) +vuint64m2_t vfcvt_xu(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m2_m))) +vuint64m2_t vfcvt_xu(vbool32_t op0, vuint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m4))) +vuint64m4_t vfcvt_xu(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m4_m))) +vuint64m4_t vfcvt_xu(vbool16_t op0, vuint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m8))) +vuint64m8_t vfcvt_xu(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u64m8_m))) +vuint64m8_t vfcvt_xu(vbool8_t op0, vuint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32mf2))) +vint32mf2_t vfncvt_rtz_x(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32mf2_m))) +vint32mf2_t vfncvt_rtz_x(vbool64_t op0, vint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32m1))) +vint32m1_t vfncvt_rtz_x(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32m1_m))) +vint32m1_t vfncvt_rtz_x(vbool32_t op0, vint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32m2))) +vint32m2_t vfncvt_rtz_x(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32m2_m))) +vint32m2_t vfncvt_rtz_x(vbool16_t op0, vint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32m4))) +vint32m4_t vfncvt_rtz_x(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i32m4_m))) +vint32m4_t vfncvt_rtz_x(vbool8_t op0, vint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32mf2))) +vuint32mf2_t vfncvt_rtz_xu(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32mf2_m))) +vuint32mf2_t vfncvt_rtz_xu(vbool64_t op0, vuint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32m1))) +vuint32m1_t vfncvt_rtz_xu(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32m1_m))) +vuint32m1_t vfncvt_rtz_xu(vbool32_t op0, vuint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32m2))) +vuint32m2_t vfncvt_rtz_xu(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32m2_m))) +vuint32m2_t vfncvt_rtz_xu(vbool16_t op0, vuint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32m4))) +vuint32m4_t vfncvt_rtz_xu(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u32m4_m))) +vuint32m4_t vfncvt_rtz_xu(vbool8_t op0, vuint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32mf2))) +vint32mf2_t vfncvt_x(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32mf2_m))) +vint32mf2_t vfncvt_x(vbool64_t op0, vint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32m1))) +vint32m1_t vfncvt_x(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32m1_m))) +vint32m1_t vfncvt_x(vbool32_t op0, vint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32m2))) +vint32m2_t vfncvt_x(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32m2_m))) +vint32m2_t vfncvt_x(vbool16_t op0, vint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32m4))) +vint32m4_t vfncvt_x(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i32m4_m))) +vint32m4_t vfncvt_x(vbool8_t op0, vint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32mf2))) +vuint32mf2_t vfncvt_xu(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32mf2_m))) +vuint32mf2_t vfncvt_xu(vbool64_t op0, vuint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32m1))) +vuint32m1_t vfncvt_xu(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32m1_m))) +vuint32m1_t vfncvt_xu(vbool32_t op0, vuint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32m2))) +vuint32m2_t vfncvt_xu(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32m2_m))) +vuint32m2_t vfncvt_xu(vbool16_t op0, vuint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32m4))) +vuint32m4_t vfncvt_xu(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u32m4_m))) +vuint32m4_t vfncvt_xu(vbool8_t op0, vuint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m1))) +vfloat64m1_t vfrec7(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m1_m))) +vfloat64m1_t vfrec7(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m2))) +vfloat64m2_t vfrec7(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m2_m))) +vfloat64m2_t vfrec7(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m4))) +vfloat64m4_t vfrec7(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m4_m))) +vfloat64m4_t vfrec7(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m8))) +vfloat64m8_t vfrec7(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f64m8_m))) +vfloat64m8_t vfrec7(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m1))) +vfloat64m1_t vfrsqrt7(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m1_m))) +vfloat64m1_t vfrsqrt7(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m2))) +vfloat64m2_t vfrsqrt7(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m2_m))) +vfloat64m2_t vfrsqrt7(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m4))) +vfloat64m4_t vfrsqrt7(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m4_m))) +vfloat64m4_t vfrsqrt7(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m8))) +vfloat64m8_t vfrsqrt7(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f64m8_m))) +vfloat64m8_t vfrsqrt7(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m1))) +vfloat64m1_t vfsqrt(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m1_m))) +vfloat64m1_t vfsqrt(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m2))) +vfloat64m2_t vfsqrt(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m2_m))) +vfloat64m2_t vfsqrt(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m4))) +vfloat64m4_t vfsqrt(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m4_m))) +vfloat64m4_t vfsqrt(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m8))) +vfloat64m8_t vfsqrt(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f64m8_m))) +vfloat64m8_t vfsqrt(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m1))) +vfloat64m1_t vfwcvt_f(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m1_m))) +vfloat64m1_t vfwcvt_f(vbool64_t op0, vfloat64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m2))) +vfloat64m2_t vfwcvt_f(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m2_m))) +vfloat64m2_t vfwcvt_f(vbool32_t op0, vfloat64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m4))) +vfloat64m4_t vfwcvt_f(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m4_m))) +vfloat64m4_t vfwcvt_f(vbool16_t op0, vfloat64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m8))) +vfloat64m8_t vfwcvt_f(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f64m8_m))) +vfloat64m8_t vfwcvt_f(vbool8_t op0, vfloat64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m1))) +vfloat64m1_t vfwcvt_f(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m1_m))) +vfloat64m1_t vfwcvt_f(vbool64_t op0, vfloat64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m2))) +vfloat64m2_t vfwcvt_f(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m2_m))) +vfloat64m2_t vfwcvt_f(vbool32_t op0, vfloat64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m4))) +vfloat64m4_t vfwcvt_f(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m4_m))) +vfloat64m4_t vfwcvt_f(vbool16_t op0, vfloat64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m8))) +vfloat64m8_t vfwcvt_f(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f64m8_m))) +vfloat64m8_t vfwcvt_f(vbool8_t op0, vfloat64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f64m1_f64m2))) +vfloat64m2_t vlmul_ext_f64m2(vfloat64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f64m1_f64m4))) +vfloat64m4_t vlmul_ext_f64m4(vfloat64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f64m2_f64m4))) +vfloat64m4_t vlmul_ext_f64m4(vfloat64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f64m1_f64m8))) +vfloat64m8_t vlmul_ext_f64m8(vfloat64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f64m2_f64m8))) +vfloat64m8_t vlmul_ext_f64m8(vfloat64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f64m4_f64m8))) +vfloat64m8_t vlmul_ext_f64m8(vfloat64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f64m2_f64m1))) +vfloat64m1_t vlmul_trunc_f64m1(vfloat64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f64m4_f64m1))) +vfloat64m1_t vlmul_trunc_f64m1(vfloat64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f64m8_f64m1))) +vfloat64m1_t vlmul_trunc_f64m1(vfloat64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f64m4_f64m2))) +vfloat64m2_t vlmul_trunc_f64m2(vfloat64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f64m8_f64m2))) +vfloat64m2_t vlmul_trunc_f64m2(vfloat64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f64m8_f64m4))) +vfloat64m4_t vlmul_trunc_f64m4(vfloat64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m1_f64m1))) +vfloat64m1_t vreinterpret_f64m1(vint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m2_f64m2))) +vfloat64m2_t vreinterpret_f64m2(vint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m4_f64m4))) +vfloat64m4_t vreinterpret_f64m4(vint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i64m8_f64m8))) +vfloat64m8_t vreinterpret_f64m8(vint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m1_f64m1))) +vfloat64m1_t vreinterpret_f64m1(vuint64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m2_f64m2))) +vfloat64m2_t vreinterpret_f64m2(vuint64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m4_f64m4))) +vfloat64m4_t vreinterpret_f64m4(vuint64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u64m8_f64m8))) +vfloat64m8_t vreinterpret_f64m8(vuint64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m1_i64m1))) +vint64m1_t vreinterpret_i64m1(vfloat64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m2_i64m2))) +vint64m2_t vreinterpret_i64m2(vfloat64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m4_i64m4))) +vint64m4_t vreinterpret_i64m4(vfloat64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m8_i64m8))) +vint64m8_t vreinterpret_i64m8(vfloat64m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m1_u64m1))) +vuint64m1_t vreinterpret_u64m1(vfloat64m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m2_u64m2))) +vuint64m2_t vreinterpret_u64m2(vfloat64m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m4_u64m4))) +vuint64m4_t vreinterpret_u64m4(vfloat64m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f64m8_u64m8))) +vuint64m8_t vreinterpret_u64m8(vfloat64m8_t op0); + +#endif + +#if defined(__riscv_f) && defined(__riscv_d) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m1))) +vfloat64m1_t vfwadd_vv(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m1_m))) +vfloat64m1_t vfwadd_vv(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m2))) +vfloat64m2_t vfwadd_vv(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m2_m))) +vfloat64m2_t vfwadd_vv(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m4))) +vfloat64m4_t vfwadd_vv(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m4_m))) +vfloat64m4_t vfwadd_vv(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m8))) +vfloat64m8_t vfwadd_vv(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f64m8_m))) +vfloat64m8_t vfwadd_vv(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m1))) +vfloat64m1_t vfwadd_vf(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m1_m))) +vfloat64m1_t vfwadd_vf(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m2))) +vfloat64m2_t vfwadd_vf(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m2_m))) +vfloat64m2_t vfwadd_vf(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m4))) +vfloat64m4_t vfwadd_vf(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m4_m))) +vfloat64m4_t vfwadd_vf(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m8))) +vfloat64m8_t vfwadd_vf(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f64m8_m))) +vfloat64m8_t vfwadd_vf(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m1))) +vfloat64m1_t vfwsub_vv(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m1_m))) +vfloat64m1_t vfwsub_vv(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m2))) +vfloat64m2_t vfwsub_vv(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m2_m))) +vfloat64m2_t vfwsub_vv(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m4))) +vfloat64m4_t vfwsub_vv(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m4_m))) +vfloat64m4_t vfwsub_vv(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m8))) +vfloat64m8_t vfwsub_vv(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f64m8_m))) +vfloat64m8_t vfwsub_vv(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m1))) +vfloat64m1_t vfwsub_vf(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m1_m))) +vfloat64m1_t vfwsub_vf(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m2))) +vfloat64m2_t vfwsub_vf(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m2_m))) +vfloat64m2_t vfwsub_vf(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m4))) +vfloat64m4_t vfwsub_vf(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m4_m))) +vfloat64m4_t vfwsub_vf(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m8))) +vfloat64m8_t vfwsub_vf(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f64m8_m))) +vfloat64m8_t vfwsub_vf(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m1))) +vfloat64m1_t vfwadd_wv(vfloat64m1_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m1_m))) +vfloat64m1_t vfwadd_wv(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m2))) +vfloat64m2_t vfwadd_wv(vfloat64m2_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m2_m))) +vfloat64m2_t vfwadd_wv(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m4))) +vfloat64m4_t vfwadd_wv(vfloat64m4_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m4_m))) +vfloat64m4_t vfwadd_wv(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m8))) +vfloat64m8_t vfwadd_wv(vfloat64m8_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f64m8_m))) +vfloat64m8_t vfwadd_wv(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m1))) +vfloat64m1_t vfwadd_wf(vfloat64m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m1_m))) +vfloat64m1_t vfwadd_wf(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m2))) +vfloat64m2_t vfwadd_wf(vfloat64m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m2_m))) +vfloat64m2_t vfwadd_wf(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m4))) +vfloat64m4_t vfwadd_wf(vfloat64m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m4_m))) +vfloat64m4_t vfwadd_wf(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m8))) +vfloat64m8_t vfwadd_wf(vfloat64m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f64m8_m))) +vfloat64m8_t vfwadd_wf(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m1))) +vfloat64m1_t vfwsub_wv(vfloat64m1_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m1_m))) +vfloat64m1_t vfwsub_wv(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m2))) +vfloat64m2_t vfwsub_wv(vfloat64m2_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m2_m))) +vfloat64m2_t vfwsub_wv(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m4))) +vfloat64m4_t vfwsub_wv(vfloat64m4_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m4_m))) +vfloat64m4_t vfwsub_wv(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m8))) +vfloat64m8_t vfwsub_wv(vfloat64m8_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f64m8_m))) +vfloat64m8_t vfwsub_wv(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m1))) +vfloat64m1_t vfwsub_wf(vfloat64m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m1_m))) +vfloat64m1_t vfwsub_wf(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m2))) +vfloat64m2_t vfwsub_wf(vfloat64m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m2_m))) +vfloat64m2_t vfwsub_wf(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m4))) +vfloat64m4_t vfwsub_wf(vfloat64m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m4_m))) +vfloat64m4_t vfwsub_wf(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m8))) +vfloat64m8_t vfwsub_wf(vfloat64m8_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f64m8_m))) +vfloat64m8_t vfwsub_wf(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m1))) +vfloat64m1_t vfwmul(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m1_m))) +vfloat64m1_t vfwmul(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m2))) +vfloat64m2_t vfwmul(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m2_m))) +vfloat64m2_t vfwmul(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m4))) +vfloat64m4_t vfwmul(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m4_m))) +vfloat64m4_t vfwmul(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m8))) +vfloat64m8_t vfwmul(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f64m8_m))) +vfloat64m8_t vfwmul(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m1))) +vfloat64m1_t vfwmul(vfloat32mf2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m1_m))) +vfloat64m1_t vfwmul(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m2))) +vfloat64m2_t vfwmul(vfloat32m1_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m2_m))) +vfloat64m2_t vfwmul(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m4))) +vfloat64m4_t vfwmul(vfloat32m2_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m4_m))) +vfloat64m4_t vfwmul(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m8))) +vfloat64m8_t vfwmul(vfloat32m4_t op0, float op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f64m8_m))) +vfloat64m8_t vfwmul(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, float op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m1))) +vfloat64m1_t vfwmacc(vfloat64m1_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m1_m))) +vfloat64m1_t vfwmacc(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m2))) +vfloat64m2_t vfwmacc(vfloat64m2_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m2_m))) +vfloat64m2_t vfwmacc(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m4))) +vfloat64m4_t vfwmacc(vfloat64m4_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m4_m))) +vfloat64m4_t vfwmacc(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m8))) +vfloat64m8_t vfwmacc(vfloat64m8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f64m8_m))) +vfloat64m8_t vfwmacc(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m1))) +vfloat64m1_t vfwmacc(vfloat64m1_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m1_m))) +vfloat64m1_t vfwmacc(vbool64_t op0, vfloat64m1_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m2))) +vfloat64m2_t vfwmacc(vfloat64m2_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m2_m))) +vfloat64m2_t vfwmacc(vbool32_t op0, vfloat64m2_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m4))) +vfloat64m4_t vfwmacc(vfloat64m4_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m4_m))) +vfloat64m4_t vfwmacc(vbool16_t op0, vfloat64m4_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m8))) +vfloat64m8_t vfwmacc(vfloat64m8_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f64m8_m))) +vfloat64m8_t vfwmacc(vbool8_t op0, vfloat64m8_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m1))) +vfloat64m1_t vfwnmacc(vfloat64m1_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m1_m))) +vfloat64m1_t vfwnmacc(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m2))) +vfloat64m2_t vfwnmacc(vfloat64m2_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m2_m))) +vfloat64m2_t vfwnmacc(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m4))) +vfloat64m4_t vfwnmacc(vfloat64m4_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m4_m))) +vfloat64m4_t vfwnmacc(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m8))) +vfloat64m8_t vfwnmacc(vfloat64m8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f64m8_m))) +vfloat64m8_t vfwnmacc(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m1))) +vfloat64m1_t vfwnmacc(vfloat64m1_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m1_m))) +vfloat64m1_t vfwnmacc(vbool64_t op0, vfloat64m1_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m2))) +vfloat64m2_t vfwnmacc(vfloat64m2_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m2_m))) +vfloat64m2_t vfwnmacc(vbool32_t op0, vfloat64m2_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m4))) +vfloat64m4_t vfwnmacc(vfloat64m4_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m4_m))) +vfloat64m4_t vfwnmacc(vbool16_t op0, vfloat64m4_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m8))) +vfloat64m8_t vfwnmacc(vfloat64m8_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f64m8_m))) +vfloat64m8_t vfwnmacc(vbool8_t op0, vfloat64m8_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m1))) +vfloat64m1_t vfwmsac(vfloat64m1_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m1_m))) +vfloat64m1_t vfwmsac(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m2))) +vfloat64m2_t vfwmsac(vfloat64m2_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m2_m))) +vfloat64m2_t vfwmsac(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m4))) +vfloat64m4_t vfwmsac(vfloat64m4_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m4_m))) +vfloat64m4_t vfwmsac(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m8))) +vfloat64m8_t vfwmsac(vfloat64m8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f64m8_m))) +vfloat64m8_t vfwmsac(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m1))) +vfloat64m1_t vfwmsac(vfloat64m1_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m1_m))) +vfloat64m1_t vfwmsac(vbool64_t op0, vfloat64m1_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m2))) +vfloat64m2_t vfwmsac(vfloat64m2_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m2_m))) +vfloat64m2_t vfwmsac(vbool32_t op0, vfloat64m2_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m4))) +vfloat64m4_t vfwmsac(vfloat64m4_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m4_m))) +vfloat64m4_t vfwmsac(vbool16_t op0, vfloat64m4_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m8))) +vfloat64m8_t vfwmsac(vfloat64m8_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f64m8_m))) +vfloat64m8_t vfwmsac(vbool8_t op0, vfloat64m8_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m1))) +vfloat64m1_t vfwnmsac(vfloat64m1_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m1_m))) +vfloat64m1_t vfwnmsac(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m2))) +vfloat64m2_t vfwnmsac(vfloat64m2_t op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m2_m))) +vfloat64m2_t vfwnmsac(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m4))) +vfloat64m4_t vfwnmsac(vfloat64m4_t op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m4_m))) +vfloat64m4_t vfwnmsac(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m8))) +vfloat64m8_t vfwnmsac(vfloat64m8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f64m8_m))) +vfloat64m8_t vfwnmsac(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m1))) +vfloat64m1_t vfwnmsac(vfloat64m1_t op0, float op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m1_m))) +vfloat64m1_t vfwnmsac(vbool64_t op0, vfloat64m1_t op1, float op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m2))) +vfloat64m2_t vfwnmsac(vfloat64m2_t op0, float op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m2_m))) +vfloat64m2_t vfwnmsac(vbool32_t op0, vfloat64m2_t op1, float op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m4))) +vfloat64m4_t vfwnmsac(vfloat64m4_t op0, float op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m4_m))) +vfloat64m4_t vfwnmsac(vbool16_t op0, vfloat64m4_t op1, float op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m8))) +vfloat64m8_t vfwnmsac(vfloat64m8_t op0, float op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f64m8_m))) +vfloat64m8_t vfwnmsac(vbool8_t op0, vfloat64m8_t op1, float op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m1_f64m1))) +vfloat64m1_t vfwredsum(vfloat64m1_t op0, vfloat32m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m1_f64m1_m))) +vfloat64m1_t vfwredsum(vbool32_t op0, vfloat64m1_t op1, vfloat32m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m2_f64m1))) +vfloat64m1_t vfwredsum(vfloat64m1_t op0, vfloat32m2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m2_f64m1_m))) +vfloat64m1_t vfwredsum(vbool16_t op0, vfloat64m1_t op1, vfloat32m2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m4_f64m1))) +vfloat64m1_t vfwredsum(vfloat64m1_t op0, vfloat32m4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m4_f64m1_m))) +vfloat64m1_t vfwredsum(vbool8_t op0, vfloat64m1_t op1, vfloat32m4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m8_f64m1))) +vfloat64m1_t vfwredsum(vfloat64m1_t op0, vfloat32m8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32m8_f64m1_m))) +vfloat64m1_t vfwredsum(vbool4_t op0, vfloat64m1_t op1, vfloat32m8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32mf2_f64m1))) +vfloat64m1_t vfwredsum(vfloat64m1_t op0, vfloat32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f32mf2_f64m1_m))) +vfloat64m1_t vfwredsum(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m1_f64m1))) +vfloat64m1_t vfwredosum(vfloat64m1_t op0, vfloat32m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m1_f64m1_m))) +vfloat64m1_t vfwredosum(vbool32_t op0, vfloat64m1_t op1, vfloat32m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m2_f64m1))) +vfloat64m1_t vfwredosum(vfloat64m1_t op0, vfloat32m2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m2_f64m1_m))) +vfloat64m1_t vfwredosum(vbool16_t op0, vfloat64m1_t op1, vfloat32m2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m4_f64m1))) +vfloat64m1_t vfwredosum(vfloat64m1_t op0, vfloat32m4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m4_f64m1_m))) +vfloat64m1_t vfwredosum(vbool8_t op0, vfloat64m1_t op1, vfloat32m4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m8_f64m1))) +vfloat64m1_t vfwredosum(vfloat64m1_t op0, vfloat32m8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32m8_f64m1_m))) +vfloat64m1_t vfwredosum(vbool4_t op0, vfloat64m1_t op1, vfloat32m8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32mf2_f64m1))) +vfloat64m1_t vfwredosum(vfloat64m1_t op0, vfloat32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f32mf2_f64m1_m))) +vfloat64m1_t vfwredosum(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32mf2))) +vfloat32mf2_t vfncvt_f(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32mf2_m))) +vfloat32mf2_t vfncvt_f(vbool64_t op0, vfloat32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32m1))) +vfloat32m1_t vfncvt_f(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32m1_m))) +vfloat32m1_t vfncvt_f(vbool32_t op0, vfloat32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32m2))) +vfloat32m2_t vfncvt_f(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32m2_m))) +vfloat32m2_t vfncvt_f(vbool16_t op0, vfloat32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32m4))) +vfloat32m4_t vfncvt_f(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f32m4_m))) +vfloat32m4_t vfncvt_f(vbool8_t op0, vfloat32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32mf2))) +vfloat32mf2_t vfncvt_rod_f(vfloat64m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32mf2_m))) +vfloat32mf2_t vfncvt_rod_f(vbool64_t op0, vfloat32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32m1))) +vfloat32m1_t vfncvt_rod_f(vfloat64m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32m1_m))) +vfloat32m1_t vfncvt_rod_f(vbool32_t op0, vfloat32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32m2))) +vfloat32m2_t vfncvt_rod_f(vfloat64m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32m2_m))) +vfloat32m2_t vfncvt_rod_f(vbool16_t op0, vfloat32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32m4))) +vfloat32m4_t vfncvt_rod_f(vfloat64m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f32m4_m))) +vfloat32m4_t vfncvt_rod_f(vbool8_t op0, vfloat32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m1))) +vfloat64m1_t vfwcvt_f(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m1_m))) +vfloat64m1_t vfwcvt_f(vbool64_t op0, vfloat64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m2))) +vfloat64m2_t vfwcvt_f(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m2_m))) +vfloat64m2_t vfwcvt_f(vbool32_t op0, vfloat64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m4))) +vfloat64m4_t vfwcvt_f(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m4_m))) +vfloat64m4_t vfwcvt_f(vbool16_t op0, vfloat64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m8))) +vfloat64m8_t vfwcvt_f(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f64m8_m))) +vfloat64m8_t vfwcvt_f(vbool8_t op0, vfloat64m8_t op1, vfloat32m4_t op2, size_t op3); + +#endif + +#if defined(__riscv_zfh) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m1))) +void vsse16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m1_m))) +void vsse16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m2))) +void vsse16(_Float16 * op0, ptrdiff_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m2_m))) +void vsse16(vbool8_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m4))) +void vsse16(_Float16 * op0, ptrdiff_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m4_m))) +void vsse16(vbool4_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m8))) +void vsse16(_Float16 * op0, ptrdiff_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16m8_m))) +void vsse16(vbool2_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16mf2))) +void vsse16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16mf2_m))) +void vsse16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16mf4))) +void vsse16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsse16_v_f16mf4_m))) +void vsse16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m1))) +vfloat16m1_t vluxei8(const _Float16 * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m1_m))) +vfloat16m1_t vluxei8(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m2))) +vfloat16m2_t vluxei8(const _Float16 * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m2_m))) +vfloat16m2_t vluxei8(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m4))) +vfloat16m4_t vluxei8(const _Float16 * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m4_m))) +vfloat16m4_t vluxei8(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m8))) +vfloat16m8_t vluxei8(const _Float16 * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16m8_m))) +vfloat16m8_t vluxei8(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16mf2))) +vfloat16mf2_t vluxei8(const _Float16 * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16mf2_m))) +vfloat16mf2_t vluxei8(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16mf4))) +vfloat16mf4_t vluxei8(const _Float16 * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei8_v_f16mf4_m))) +vfloat16mf4_t vluxei8(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m1))) +vfloat16m1_t vluxei16(const _Float16 * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m1_m))) +vfloat16m1_t vluxei16(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m2))) +vfloat16m2_t vluxei16(const _Float16 * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m2_m))) +vfloat16m2_t vluxei16(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m4))) +vfloat16m4_t vluxei16(const _Float16 * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m4_m))) +vfloat16m4_t vluxei16(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m8))) +vfloat16m8_t vluxei16(const _Float16 * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16m8_m))) +vfloat16m8_t vluxei16(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16mf2))) +vfloat16mf2_t vluxei16(const _Float16 * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16mf2_m))) +vfloat16mf2_t vluxei16(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16mf4))) +vfloat16mf4_t vluxei16(const _Float16 * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei16_v_f16mf4_m))) +vfloat16mf4_t vluxei16(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16m1))) +vfloat16m1_t vluxei32(const _Float16 * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16m1_m))) +vfloat16m1_t vluxei32(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16m2))) +vfloat16m2_t vluxei32(const _Float16 * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16m2_m))) +vfloat16m2_t vluxei32(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16m4))) +vfloat16m4_t vluxei32(const _Float16 * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16m4_m))) +vfloat16m4_t vluxei32(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16mf2))) +vfloat16mf2_t vluxei32(const _Float16 * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16mf2_m))) +vfloat16mf2_t vluxei32(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16mf4))) +vfloat16mf4_t vluxei32(const _Float16 * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei32_v_f16mf4_m))) +vfloat16mf4_t vluxei32(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16m1))) +vfloat16m1_t vluxei64(const _Float16 * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16m1_m))) +vfloat16m1_t vluxei64(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16m2))) +vfloat16m2_t vluxei64(const _Float16 * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16m2_m))) +vfloat16m2_t vluxei64(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16mf2))) +vfloat16mf2_t vluxei64(const _Float16 * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16mf2_m))) +vfloat16mf2_t vluxei64(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16mf4))) +vfloat16mf4_t vluxei64(const _Float16 * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxei64_v_f16mf4_m))) +vfloat16mf4_t vluxei64(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m1))) +vfloat16m1_t vloxei8(const _Float16 * op0, vuint8mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m1_m))) +vfloat16m1_t vloxei8(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m2))) +vfloat16m2_t vloxei8(const _Float16 * op0, vuint8m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m2_m))) +vfloat16m2_t vloxei8(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m4))) +vfloat16m4_t vloxei8(const _Float16 * op0, vuint8m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m4_m))) +vfloat16m4_t vloxei8(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m8))) +vfloat16m8_t vloxei8(const _Float16 * op0, vuint8m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16m8_m))) +vfloat16m8_t vloxei8(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16mf2))) +vfloat16mf2_t vloxei8(const _Float16 * op0, vuint8mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16mf2_m))) +vfloat16mf2_t vloxei8(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16mf4))) +vfloat16mf4_t vloxei8(const _Float16 * op0, vuint8mf8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei8_v_f16mf4_m))) +vfloat16mf4_t vloxei8(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m1))) +vfloat16m1_t vloxei16(const _Float16 * op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m1_m))) +vfloat16m1_t vloxei16(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m2))) +vfloat16m2_t vloxei16(const _Float16 * op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m2_m))) +vfloat16m2_t vloxei16(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m4))) +vfloat16m4_t vloxei16(const _Float16 * op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m4_m))) +vfloat16m4_t vloxei16(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m8))) +vfloat16m8_t vloxei16(const _Float16 * op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16m8_m))) +vfloat16m8_t vloxei16(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16mf2))) +vfloat16mf2_t vloxei16(const _Float16 * op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16mf2_m))) +vfloat16mf2_t vloxei16(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16mf4))) +vfloat16mf4_t vloxei16(const _Float16 * op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei16_v_f16mf4_m))) +vfloat16mf4_t vloxei16(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16m1))) +vfloat16m1_t vloxei32(const _Float16 * op0, vuint32m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16m1_m))) +vfloat16m1_t vloxei32(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16m2))) +vfloat16m2_t vloxei32(const _Float16 * op0, vuint32m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16m2_m))) +vfloat16m2_t vloxei32(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16m4))) +vfloat16m4_t vloxei32(const _Float16 * op0, vuint32m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16m4_m))) +vfloat16m4_t vloxei32(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16mf2))) +vfloat16mf2_t vloxei32(const _Float16 * op0, vuint32m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16mf2_m))) +vfloat16mf2_t vloxei32(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16mf4))) +vfloat16mf4_t vloxei32(const _Float16 * op0, vuint32mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei32_v_f16mf4_m))) +vfloat16mf4_t vloxei32(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16m1))) +vfloat16m1_t vloxei64(const _Float16 * op0, vuint64m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16m1_m))) +vfloat16m1_t vloxei64(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16m2))) +vfloat16m2_t vloxei64(const _Float16 * op0, vuint64m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16m2_m))) +vfloat16m2_t vloxei64(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16mf2))) +vfloat16mf2_t vloxei64(const _Float16 * op0, vuint64m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16mf2_m))) +vfloat16mf2_t vloxei64(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16mf4))) +vfloat16mf4_t vloxei64(const _Float16 * op0, vuint64m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxei64_v_f16mf4_m))) +vfloat16mf4_t vloxei64(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f16m1))) +vfloat16m1_t vmv_v(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f16m2))) +vfloat16m2_t vmv_v(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f16m4))) +vfloat16m4_t vmv_v(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f16m8))) +vfloat16m8_t vmv_v(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f16mf2))) +vfloat16mf2_t vmv_v(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmv_v_v_f16mf4))) +vfloat16mf4_t vmv_v(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m1))) +vfloat16m1_t vfadd(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m1_m))) +vfloat16m1_t vfadd(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m2))) +vfloat16m2_t vfadd(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m2_m))) +vfloat16m2_t vfadd(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m4))) +vfloat16m4_t vfadd(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m4_m))) +vfloat16m4_t vfadd(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m8))) +vfloat16m8_t vfadd(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16m8_m))) +vfloat16m8_t vfadd(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16mf2))) +vfloat16mf2_t vfadd(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16mf2_m))) +vfloat16mf2_t vfadd(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16mf4))) +vfloat16mf4_t vfadd(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vv_f16mf4_m))) +vfloat16mf4_t vfadd(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m1))) +vfloat16m1_t vfadd(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m1_m))) +vfloat16m1_t vfadd(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m2))) +vfloat16m2_t vfadd(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m2_m))) +vfloat16m2_t vfadd(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m4))) +vfloat16m4_t vfadd(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m4_m))) +vfloat16m4_t vfadd(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m8))) +vfloat16m8_t vfadd(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16m8_m))) +vfloat16m8_t vfadd(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16mf2))) +vfloat16mf2_t vfadd(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16mf2_m))) +vfloat16mf2_t vfadd(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16mf4))) +vfloat16mf4_t vfadd(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfadd_vf_f16mf4_m))) +vfloat16mf4_t vfadd(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m1))) +vfloat16m1_t vfsub(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m1_m))) +vfloat16m1_t vfsub(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m2))) +vfloat16m2_t vfsub(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m2_m))) +vfloat16m2_t vfsub(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m4))) +vfloat16m4_t vfsub(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m4_m))) +vfloat16m4_t vfsub(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m8))) +vfloat16m8_t vfsub(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16m8_m))) +vfloat16m8_t vfsub(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16mf2))) +vfloat16mf2_t vfsub(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16mf2_m))) +vfloat16mf2_t vfsub(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16mf4))) +vfloat16mf4_t vfsub(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vv_f16mf4_m))) +vfloat16mf4_t vfsub(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m1))) +vfloat16m1_t vfsub(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m1_m))) +vfloat16m1_t vfsub(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m2))) +vfloat16m2_t vfsub(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m2_m))) +vfloat16m2_t vfsub(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m4))) +vfloat16m4_t vfsub(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m4_m))) +vfloat16m4_t vfsub(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m8))) +vfloat16m8_t vfsub(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16m8_m))) +vfloat16m8_t vfsub(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16mf2))) +vfloat16mf2_t vfsub(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16mf2_m))) +vfloat16mf2_t vfsub(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16mf4))) +vfloat16mf4_t vfsub(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsub_vf_f16mf4_m))) +vfloat16mf4_t vfsub(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m1))) +vfloat16m1_t vfrsub(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m1_m))) +vfloat16m1_t vfrsub(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m2))) +vfloat16m2_t vfrsub(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m2_m))) +vfloat16m2_t vfrsub(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m4))) +vfloat16m4_t vfrsub(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m4_m))) +vfloat16m4_t vfrsub(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m8))) +vfloat16m8_t vfrsub(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16m8_m))) +vfloat16m8_t vfrsub(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16mf2))) +vfloat16mf2_t vfrsub(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16mf2_m))) +vfloat16mf2_t vfrsub(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16mf4))) +vfloat16mf4_t vfrsub(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsub_vf_f16mf4_m))) +vfloat16mf4_t vfrsub(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m1))) +vfloat16m1_t vfmul(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m1_m))) +vfloat16m1_t vfmul(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m2))) +vfloat16m2_t vfmul(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m2_m))) +vfloat16m2_t vfmul(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m4))) +vfloat16m4_t vfmul(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m4_m))) +vfloat16m4_t vfmul(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m8))) +vfloat16m8_t vfmul(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16m8_m))) +vfloat16m8_t vfmul(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16mf2))) +vfloat16mf2_t vfmul(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16mf2_m))) +vfloat16mf2_t vfmul(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16mf4))) +vfloat16mf4_t vfmul(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vv_f16mf4_m))) +vfloat16mf4_t vfmul(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m1))) +vfloat16m1_t vfmul(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m1_m))) +vfloat16m1_t vfmul(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m2))) +vfloat16m2_t vfmul(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m2_m))) +vfloat16m2_t vfmul(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m4))) +vfloat16m4_t vfmul(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m4_m))) +vfloat16m4_t vfmul(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m8))) +vfloat16m8_t vfmul(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16m8_m))) +vfloat16m8_t vfmul(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16mf2))) +vfloat16mf2_t vfmul(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16mf2_m))) +vfloat16mf2_t vfmul(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16mf4))) +vfloat16mf4_t vfmul(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmul_vf_f16mf4_m))) +vfloat16mf4_t vfmul(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m1))) +vfloat16m1_t vfdiv(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m1_m))) +vfloat16m1_t vfdiv(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m2))) +vfloat16m2_t vfdiv(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m2_m))) +vfloat16m2_t vfdiv(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m4))) +vfloat16m4_t vfdiv(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m4_m))) +vfloat16m4_t vfdiv(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m8))) +vfloat16m8_t vfdiv(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16m8_m))) +vfloat16m8_t vfdiv(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16mf2))) +vfloat16mf2_t vfdiv(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16mf2_m))) +vfloat16mf2_t vfdiv(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16mf4))) +vfloat16mf4_t vfdiv(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vv_f16mf4_m))) +vfloat16mf4_t vfdiv(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m1))) +vfloat16m1_t vfdiv(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m1_m))) +vfloat16m1_t vfdiv(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m2))) +vfloat16m2_t vfdiv(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m2_m))) +vfloat16m2_t vfdiv(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m4))) +vfloat16m4_t vfdiv(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m4_m))) +vfloat16m4_t vfdiv(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m8))) +vfloat16m8_t vfdiv(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16m8_m))) +vfloat16m8_t vfdiv(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16mf2))) +vfloat16mf2_t vfdiv(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16mf2_m))) +vfloat16mf2_t vfdiv(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16mf4))) +vfloat16mf4_t vfdiv(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfdiv_vf_f16mf4_m))) +vfloat16mf4_t vfdiv(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m1))) +vfloat16m1_t vfrdiv(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m1_m))) +vfloat16m1_t vfrdiv(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m2))) +vfloat16m2_t vfrdiv(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m2_m))) +vfloat16m2_t vfrdiv(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m4))) +vfloat16m4_t vfrdiv(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m4_m))) +vfloat16m4_t vfrdiv(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m8))) +vfloat16m8_t vfrdiv(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16m8_m))) +vfloat16m8_t vfrdiv(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16mf2))) +vfloat16mf2_t vfrdiv(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16mf2_m))) +vfloat16mf2_t vfrdiv(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16mf4))) +vfloat16mf4_t vfrdiv(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrdiv_vf_f16mf4_m))) +vfloat16mf4_t vfrdiv(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m1))) +vfloat16m1_t vfmacc(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m1_m))) +vfloat16m1_t vfmacc(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m2))) +vfloat16m2_t vfmacc(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m2_m))) +vfloat16m2_t vfmacc(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m4))) +vfloat16m4_t vfmacc(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m4_m))) +vfloat16m4_t vfmacc(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m8))) +vfloat16m8_t vfmacc(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16m8_m))) +vfloat16m8_t vfmacc(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16mf2))) +vfloat16mf2_t vfmacc(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16mf2_m))) +vfloat16mf2_t vfmacc(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16mf4))) +vfloat16mf4_t vfmacc(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vv_f16mf4_m))) +vfloat16mf4_t vfmacc(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m1))) +vfloat16m1_t vfmacc(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m1_m))) +vfloat16m1_t vfmacc(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m2))) +vfloat16m2_t vfmacc(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m2_m))) +vfloat16m2_t vfmacc(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m4))) +vfloat16m4_t vfmacc(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m4_m))) +vfloat16m4_t vfmacc(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m8))) +vfloat16m8_t vfmacc(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16m8_m))) +vfloat16m8_t vfmacc(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16mf2))) +vfloat16mf2_t vfmacc(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16mf2_m))) +vfloat16mf2_t vfmacc(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16mf4))) +vfloat16mf4_t vfmacc(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmacc_vf_f16mf4_m))) +vfloat16mf4_t vfmacc(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m1))) +vfloat16m1_t vfnmacc(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m1_m))) +vfloat16m1_t vfnmacc(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m2))) +vfloat16m2_t vfnmacc(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m2_m))) +vfloat16m2_t vfnmacc(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m4))) +vfloat16m4_t vfnmacc(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m4_m))) +vfloat16m4_t vfnmacc(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m8))) +vfloat16m8_t vfnmacc(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16m8_m))) +vfloat16m8_t vfnmacc(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16mf2))) +vfloat16mf2_t vfnmacc(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16mf2_m))) +vfloat16mf2_t vfnmacc(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16mf4))) +vfloat16mf4_t vfnmacc(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vv_f16mf4_m))) +vfloat16mf4_t vfnmacc(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m1))) +vfloat16m1_t vfnmacc(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m1_m))) +vfloat16m1_t vfnmacc(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m2))) +vfloat16m2_t vfnmacc(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m2_m))) +vfloat16m2_t vfnmacc(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m4))) +vfloat16m4_t vfnmacc(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m4_m))) +vfloat16m4_t vfnmacc(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m8))) +vfloat16m8_t vfnmacc(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16m8_m))) +vfloat16m8_t vfnmacc(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16mf2))) +vfloat16mf2_t vfnmacc(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16mf2_m))) +vfloat16mf2_t vfnmacc(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16mf4))) +vfloat16mf4_t vfnmacc(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmacc_vf_f16mf4_m))) +vfloat16mf4_t vfnmacc(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m1))) +vfloat16m1_t vfmsac(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m1_m))) +vfloat16m1_t vfmsac(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m2))) +vfloat16m2_t vfmsac(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m2_m))) +vfloat16m2_t vfmsac(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m4))) +vfloat16m4_t vfmsac(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m4_m))) +vfloat16m4_t vfmsac(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m8))) +vfloat16m8_t vfmsac(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16m8_m))) +vfloat16m8_t vfmsac(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16mf2))) +vfloat16mf2_t vfmsac(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16mf2_m))) +vfloat16mf2_t vfmsac(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16mf4))) +vfloat16mf4_t vfmsac(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vv_f16mf4_m))) +vfloat16mf4_t vfmsac(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m1))) +vfloat16m1_t vfmsac(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m1_m))) +vfloat16m1_t vfmsac(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m2))) +vfloat16m2_t vfmsac(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m2_m))) +vfloat16m2_t vfmsac(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m4))) +vfloat16m4_t vfmsac(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m4_m))) +vfloat16m4_t vfmsac(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m8))) +vfloat16m8_t vfmsac(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16m8_m))) +vfloat16m8_t vfmsac(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16mf2))) +vfloat16mf2_t vfmsac(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16mf2_m))) +vfloat16mf2_t vfmsac(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16mf4))) +vfloat16mf4_t vfmsac(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsac_vf_f16mf4_m))) +vfloat16mf4_t vfmsac(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m1))) +vfloat16m1_t vfnmsac(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m1_m))) +vfloat16m1_t vfnmsac(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m2))) +vfloat16m2_t vfnmsac(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m2_m))) +vfloat16m2_t vfnmsac(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m4))) +vfloat16m4_t vfnmsac(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m4_m))) +vfloat16m4_t vfnmsac(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m8))) +vfloat16m8_t vfnmsac(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16m8_m))) +vfloat16m8_t vfnmsac(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16mf2))) +vfloat16mf2_t vfnmsac(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16mf2_m))) +vfloat16mf2_t vfnmsac(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16mf4))) +vfloat16mf4_t vfnmsac(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vv_f16mf4_m))) +vfloat16mf4_t vfnmsac(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m1))) +vfloat16m1_t vfnmsac(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m1_m))) +vfloat16m1_t vfnmsac(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m2))) +vfloat16m2_t vfnmsac(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m2_m))) +vfloat16m2_t vfnmsac(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m4))) +vfloat16m4_t vfnmsac(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m4_m))) +vfloat16m4_t vfnmsac(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m8))) +vfloat16m8_t vfnmsac(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16m8_m))) +vfloat16m8_t vfnmsac(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16mf2))) +vfloat16mf2_t vfnmsac(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16mf2_m))) +vfloat16mf2_t vfnmsac(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16mf4))) +vfloat16mf4_t vfnmsac(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsac_vf_f16mf4_m))) +vfloat16mf4_t vfnmsac(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m1))) +vfloat16m1_t vfmadd(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m1_m))) +vfloat16m1_t vfmadd(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m2))) +vfloat16m2_t vfmadd(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m2_m))) +vfloat16m2_t vfmadd(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m4))) +vfloat16m4_t vfmadd(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m4_m))) +vfloat16m4_t vfmadd(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m8))) +vfloat16m8_t vfmadd(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16m8_m))) +vfloat16m8_t vfmadd(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16mf2))) +vfloat16mf2_t vfmadd(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16mf2_m))) +vfloat16mf2_t vfmadd(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16mf4))) +vfloat16mf4_t vfmadd(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vv_f16mf4_m))) +vfloat16mf4_t vfmadd(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m1))) +vfloat16m1_t vfmadd(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m1_m))) +vfloat16m1_t vfmadd(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m2))) +vfloat16m2_t vfmadd(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m2_m))) +vfloat16m2_t vfmadd(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m4))) +vfloat16m4_t vfmadd(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m4_m))) +vfloat16m4_t vfmadd(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m8))) +vfloat16m8_t vfmadd(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16m8_m))) +vfloat16m8_t vfmadd(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16mf2))) +vfloat16mf2_t vfmadd(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16mf2_m))) +vfloat16mf2_t vfmadd(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16mf4))) +vfloat16mf4_t vfmadd(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmadd_vf_f16mf4_m))) +vfloat16mf4_t vfmadd(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m1))) +vfloat16m1_t vfnmadd(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m1_m))) +vfloat16m1_t vfnmadd(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m2))) +vfloat16m2_t vfnmadd(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m2_m))) +vfloat16m2_t vfnmadd(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m4))) +vfloat16m4_t vfnmadd(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m4_m))) +vfloat16m4_t vfnmadd(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m8))) +vfloat16m8_t vfnmadd(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16m8_m))) +vfloat16m8_t vfnmadd(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16mf2))) +vfloat16mf2_t vfnmadd(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16mf2_m))) +vfloat16mf2_t vfnmadd(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16mf4))) +vfloat16mf4_t vfnmadd(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vv_f16mf4_m))) +vfloat16mf4_t vfnmadd(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m1))) +vfloat16m1_t vfnmadd(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m1_m))) +vfloat16m1_t vfnmadd(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m2))) +vfloat16m2_t vfnmadd(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m2_m))) +vfloat16m2_t vfnmadd(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m4))) +vfloat16m4_t vfnmadd(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m4_m))) +vfloat16m4_t vfnmadd(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m8))) +vfloat16m8_t vfnmadd(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16m8_m))) +vfloat16m8_t vfnmadd(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16mf2))) +vfloat16mf2_t vfnmadd(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16mf2_m))) +vfloat16mf2_t vfnmadd(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16mf4))) +vfloat16mf4_t vfnmadd(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmadd_vf_f16mf4_m))) +vfloat16mf4_t vfnmadd(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m1))) +vfloat16m1_t vfmsub(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m1_m))) +vfloat16m1_t vfmsub(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m2))) +vfloat16m2_t vfmsub(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m2_m))) +vfloat16m2_t vfmsub(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m4))) +vfloat16m4_t vfmsub(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m4_m))) +vfloat16m4_t vfmsub(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m8))) +vfloat16m8_t vfmsub(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16m8_m))) +vfloat16m8_t vfmsub(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16mf2))) +vfloat16mf2_t vfmsub(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16mf2_m))) +vfloat16mf2_t vfmsub(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16mf4))) +vfloat16mf4_t vfmsub(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vv_f16mf4_m))) +vfloat16mf4_t vfmsub(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m1))) +vfloat16m1_t vfmsub(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m1_m))) +vfloat16m1_t vfmsub(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m2))) +vfloat16m2_t vfmsub(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m2_m))) +vfloat16m2_t vfmsub(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m4))) +vfloat16m4_t vfmsub(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m4_m))) +vfloat16m4_t vfmsub(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m8))) +vfloat16m8_t vfmsub(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16m8_m))) +vfloat16m8_t vfmsub(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16mf2))) +vfloat16mf2_t vfmsub(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16mf2_m))) +vfloat16mf2_t vfmsub(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16mf4))) +vfloat16mf4_t vfmsub(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmsub_vf_f16mf4_m))) +vfloat16mf4_t vfmsub(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m1))) +vfloat16m1_t vfnmsub(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m1_m))) +vfloat16m1_t vfnmsub(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m2))) +vfloat16m2_t vfnmsub(vfloat16m2_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m2_m))) +vfloat16m2_t vfnmsub(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m4))) +vfloat16m4_t vfnmsub(vfloat16m4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m4_m))) +vfloat16m4_t vfnmsub(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m8))) +vfloat16m8_t vfnmsub(vfloat16m8_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16m8_m))) +vfloat16m8_t vfnmsub(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16mf2))) +vfloat16mf2_t vfnmsub(vfloat16mf2_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16mf2_m))) +vfloat16mf2_t vfnmsub(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16mf4))) +vfloat16mf4_t vfnmsub(vfloat16mf4_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vv_f16mf4_m))) +vfloat16mf4_t vfnmsub(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m1))) +vfloat16m1_t vfnmsub(vfloat16m1_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m1_m))) +vfloat16m1_t vfnmsub(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m2))) +vfloat16m2_t vfnmsub(vfloat16m2_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m2_m))) +vfloat16m2_t vfnmsub(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m4))) +vfloat16m4_t vfnmsub(vfloat16m4_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m4_m))) +vfloat16m4_t vfnmsub(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m8))) +vfloat16m8_t vfnmsub(vfloat16m8_t op0, _Float16 op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16m8_m))) +vfloat16m8_t vfnmsub(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16mf2))) +vfloat16mf2_t vfnmsub(vfloat16mf2_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16mf2_m))) +vfloat16mf2_t vfnmsub(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16mf4))) +vfloat16mf4_t vfnmsub(vfloat16mf4_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfnmsub_vf_f16mf4_m))) +vfloat16mf4_t vfnmsub(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m1))) +vfloat16m1_t vfmin(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m1_m))) +vfloat16m1_t vfmin(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m2))) +vfloat16m2_t vfmin(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m2_m))) +vfloat16m2_t vfmin(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m4))) +vfloat16m4_t vfmin(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m4_m))) +vfloat16m4_t vfmin(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m8))) +vfloat16m8_t vfmin(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16m8_m))) +vfloat16m8_t vfmin(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16mf2))) +vfloat16mf2_t vfmin(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16mf2_m))) +vfloat16mf2_t vfmin(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16mf4))) +vfloat16mf4_t vfmin(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vv_f16mf4_m))) +vfloat16mf4_t vfmin(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m1))) +vfloat16m1_t vfmin(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m1_m))) +vfloat16m1_t vfmin(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m2))) +vfloat16m2_t vfmin(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m2_m))) +vfloat16m2_t vfmin(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m4))) +vfloat16m4_t vfmin(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m4_m))) +vfloat16m4_t vfmin(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m8))) +vfloat16m8_t vfmin(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16m8_m))) +vfloat16m8_t vfmin(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16mf2))) +vfloat16mf2_t vfmin(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16mf2_m))) +vfloat16mf2_t vfmin(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16mf4))) +vfloat16mf4_t vfmin(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmin_vf_f16mf4_m))) +vfloat16mf4_t vfmin(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m1))) +vfloat16m1_t vfmax(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m1_m))) +vfloat16m1_t vfmax(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m2))) +vfloat16m2_t vfmax(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m2_m))) +vfloat16m2_t vfmax(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m4))) +vfloat16m4_t vfmax(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m4_m))) +vfloat16m4_t vfmax(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m8))) +vfloat16m8_t vfmax(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16m8_m))) +vfloat16m8_t vfmax(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16mf2))) +vfloat16mf2_t vfmax(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16mf2_m))) +vfloat16mf2_t vfmax(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16mf4))) +vfloat16mf4_t vfmax(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vv_f16mf4_m))) +vfloat16mf4_t vfmax(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m1))) +vfloat16m1_t vfmax(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m1_m))) +vfloat16m1_t vfmax(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m2))) +vfloat16m2_t vfmax(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m2_m))) +vfloat16m2_t vfmax(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m4))) +vfloat16m4_t vfmax(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m4_m))) +vfloat16m4_t vfmax(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m8))) +vfloat16m8_t vfmax(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16m8_m))) +vfloat16m8_t vfmax(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16mf2))) +vfloat16mf2_t vfmax(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16mf2_m))) +vfloat16mf2_t vfmax(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16mf4))) +vfloat16mf4_t vfmax(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmax_vf_f16mf4_m))) +vfloat16mf4_t vfmax(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m1))) +vfloat16m1_t vfsgnj(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m1_m))) +vfloat16m1_t vfsgnj(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m2))) +vfloat16m2_t vfsgnj(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m2_m))) +vfloat16m2_t vfsgnj(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m4))) +vfloat16m4_t vfsgnj(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m4_m))) +vfloat16m4_t vfsgnj(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m8))) +vfloat16m8_t vfsgnj(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16m8_m))) +vfloat16m8_t vfsgnj(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16mf2))) +vfloat16mf2_t vfsgnj(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16mf2_m))) +vfloat16mf2_t vfsgnj(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16mf4))) +vfloat16mf4_t vfsgnj(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vv_f16mf4_m))) +vfloat16mf4_t vfsgnj(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m1))) +vfloat16m1_t vfsgnj(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m1_m))) +vfloat16m1_t vfsgnj(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m2))) +vfloat16m2_t vfsgnj(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m2_m))) +vfloat16m2_t vfsgnj(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m4))) +vfloat16m4_t vfsgnj(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m4_m))) +vfloat16m4_t vfsgnj(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m8))) +vfloat16m8_t vfsgnj(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16m8_m))) +vfloat16m8_t vfsgnj(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16mf2))) +vfloat16mf2_t vfsgnj(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16mf2_m))) +vfloat16mf2_t vfsgnj(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16mf4))) +vfloat16mf4_t vfsgnj(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnj_vf_f16mf4_m))) +vfloat16mf4_t vfsgnj(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m1))) +vfloat16m1_t vfsgnjn(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m1_m))) +vfloat16m1_t vfsgnjn(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m2))) +vfloat16m2_t vfsgnjn(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m2_m))) +vfloat16m2_t vfsgnjn(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m4))) +vfloat16m4_t vfsgnjn(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m4_m))) +vfloat16m4_t vfsgnjn(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m8))) +vfloat16m8_t vfsgnjn(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16m8_m))) +vfloat16m8_t vfsgnjn(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16mf2))) +vfloat16mf2_t vfsgnjn(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16mf2_m))) +vfloat16mf2_t vfsgnjn(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16mf4))) +vfloat16mf4_t vfsgnjn(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vv_f16mf4_m))) +vfloat16mf4_t vfsgnjn(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m1))) +vfloat16m1_t vfsgnjn(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m1_m))) +vfloat16m1_t vfsgnjn(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m2))) +vfloat16m2_t vfsgnjn(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m2_m))) +vfloat16m2_t vfsgnjn(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m4))) +vfloat16m4_t vfsgnjn(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m4_m))) +vfloat16m4_t vfsgnjn(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m8))) +vfloat16m8_t vfsgnjn(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16m8_m))) +vfloat16m8_t vfsgnjn(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16mf2))) +vfloat16mf2_t vfsgnjn(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16mf2_m))) +vfloat16mf2_t vfsgnjn(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16mf4))) +vfloat16mf4_t vfsgnjn(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjn_vf_f16mf4_m))) +vfloat16mf4_t vfsgnjn(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m1))) +vfloat16m1_t vfsgnjx(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m1_m))) +vfloat16m1_t vfsgnjx(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m2))) +vfloat16m2_t vfsgnjx(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m2_m))) +vfloat16m2_t vfsgnjx(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m4))) +vfloat16m4_t vfsgnjx(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m4_m))) +vfloat16m4_t vfsgnjx(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m8))) +vfloat16m8_t vfsgnjx(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16m8_m))) +vfloat16m8_t vfsgnjx(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16mf2))) +vfloat16mf2_t vfsgnjx(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16mf2_m))) +vfloat16mf2_t vfsgnjx(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16mf4))) +vfloat16mf4_t vfsgnjx(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vv_f16mf4_m))) +vfloat16mf4_t vfsgnjx(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m1))) +vfloat16m1_t vfsgnjx(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m1_m))) +vfloat16m1_t vfsgnjx(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m2))) +vfloat16m2_t vfsgnjx(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m2_m))) +vfloat16m2_t vfsgnjx(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m4))) +vfloat16m4_t vfsgnjx(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m4_m))) +vfloat16m4_t vfsgnjx(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m8))) +vfloat16m8_t vfsgnjx(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16m8_m))) +vfloat16m8_t vfsgnjx(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16mf2))) +vfloat16mf2_t vfsgnjx(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16mf2_m))) +vfloat16mf2_t vfsgnjx(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16mf4))) +vfloat16mf4_t vfsgnjx(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsgnjx_vf_f16mf4_m))) +vfloat16mf4_t vfsgnjx(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m1))) +vfloat16m1_t vfabs(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m1_m))) +vfloat16m1_t vfabs(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m2))) +vfloat16m2_t vfabs(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m2_m))) +vfloat16m2_t vfabs(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m4))) +vfloat16m4_t vfabs(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m4_m))) +vfloat16m4_t vfabs(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m8))) +vfloat16m8_t vfabs(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16m8_m))) +vfloat16m8_t vfabs(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16mf2))) +vfloat16mf2_t vfabs(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16mf2_m))) +vfloat16mf2_t vfabs(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16mf4))) +vfloat16mf4_t vfabs(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfabs_v_f16mf4_m))) +vfloat16mf4_t vfabs(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m1_b16))) +vbool16_t vmfeq(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m1_b16_m))) +vbool16_t vmfeq(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m2_b8))) +vbool8_t vmfeq(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m2_b8_m))) +vbool8_t vmfeq(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m4_b4))) +vbool4_t vmfeq(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m4_b4_m))) +vbool4_t vmfeq(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m8_b2))) +vbool2_t vmfeq(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16m8_b2_m))) +vbool2_t vmfeq(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16mf2_b32))) +vbool32_t vmfeq(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16mf2_b32_m))) +vbool32_t vmfeq(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16mf4_b64))) +vbool64_t vmfeq(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vv_f16mf4_b64_m))) +vbool64_t vmfeq(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m1_b16))) +vbool16_t vmfeq(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m1_b16_m))) +vbool16_t vmfeq(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m2_b8))) +vbool8_t vmfeq(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m2_b8_m))) +vbool8_t vmfeq(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m4_b4))) +vbool4_t vmfeq(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m4_b4_m))) +vbool4_t vmfeq(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m8_b2))) +vbool2_t vmfeq(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16m8_b2_m))) +vbool2_t vmfeq(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16mf2_b32))) +vbool32_t vmfeq(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16mf2_b32_m))) +vbool32_t vmfeq(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16mf4_b64))) +vbool64_t vmfeq(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfeq_vf_f16mf4_b64_m))) +vbool64_t vmfeq(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m1_b16))) +vbool16_t vmfne(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m1_b16_m))) +vbool16_t vmfne(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m2_b8))) +vbool8_t vmfne(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m2_b8_m))) +vbool8_t vmfne(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m4_b4))) +vbool4_t vmfne(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m4_b4_m))) +vbool4_t vmfne(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m8_b2))) +vbool2_t vmfne(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16m8_b2_m))) +vbool2_t vmfne(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16mf2_b32))) +vbool32_t vmfne(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16mf2_b32_m))) +vbool32_t vmfne(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16mf4_b64))) +vbool64_t vmfne(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vv_f16mf4_b64_m))) +vbool64_t vmfne(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m1_b16))) +vbool16_t vmfne(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m1_b16_m))) +vbool16_t vmfne(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m2_b8))) +vbool8_t vmfne(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m2_b8_m))) +vbool8_t vmfne(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m4_b4))) +vbool4_t vmfne(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m4_b4_m))) +vbool4_t vmfne(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m8_b2))) +vbool2_t vmfne(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16m8_b2_m))) +vbool2_t vmfne(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16mf2_b32))) +vbool32_t vmfne(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16mf2_b32_m))) +vbool32_t vmfne(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16mf4_b64))) +vbool64_t vmfne(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfne_vf_f16mf4_b64_m))) +vbool64_t vmfne(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m1_b16))) +vbool16_t vmflt(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m1_b16_m))) +vbool16_t vmflt(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m2_b8))) +vbool8_t vmflt(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m2_b8_m))) +vbool8_t vmflt(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m4_b4))) +vbool4_t vmflt(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m4_b4_m))) +vbool4_t vmflt(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m8_b2))) +vbool2_t vmflt(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16m8_b2_m))) +vbool2_t vmflt(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16mf2_b32))) +vbool32_t vmflt(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16mf2_b32_m))) +vbool32_t vmflt(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16mf4_b64))) +vbool64_t vmflt(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vv_f16mf4_b64_m))) +vbool64_t vmflt(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m1_b16))) +vbool16_t vmflt(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m1_b16_m))) +vbool16_t vmflt(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m2_b8))) +vbool8_t vmflt(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m2_b8_m))) +vbool8_t vmflt(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m4_b4))) +vbool4_t vmflt(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m4_b4_m))) +vbool4_t vmflt(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m8_b2))) +vbool2_t vmflt(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16m8_b2_m))) +vbool2_t vmflt(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16mf2_b32))) +vbool32_t vmflt(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16mf2_b32_m))) +vbool32_t vmflt(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16mf4_b64))) +vbool64_t vmflt(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmflt_vf_f16mf4_b64_m))) +vbool64_t vmflt(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m1_b16))) +vbool16_t vmfle(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m1_b16_m))) +vbool16_t vmfle(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m2_b8))) +vbool8_t vmfle(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m2_b8_m))) +vbool8_t vmfle(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m4_b4))) +vbool4_t vmfle(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m4_b4_m))) +vbool4_t vmfle(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m8_b2))) +vbool2_t vmfle(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16m8_b2_m))) +vbool2_t vmfle(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16mf2_b32))) +vbool32_t vmfle(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16mf2_b32_m))) +vbool32_t vmfle(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16mf4_b64))) +vbool64_t vmfle(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vv_f16mf4_b64_m))) +vbool64_t vmfle(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m1_b16))) +vbool16_t vmfle(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m1_b16_m))) +vbool16_t vmfle(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m2_b8))) +vbool8_t vmfle(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m2_b8_m))) +vbool8_t vmfle(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m4_b4))) +vbool4_t vmfle(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m4_b4_m))) +vbool4_t vmfle(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m8_b2))) +vbool2_t vmfle(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16m8_b2_m))) +vbool2_t vmfle(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16mf2_b32))) +vbool32_t vmfle(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16mf2_b32_m))) +vbool32_t vmfle(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16mf4_b64))) +vbool64_t vmfle(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfle_vf_f16mf4_b64_m))) +vbool64_t vmfle(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m1_b16))) +vbool16_t vmfgt(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m1_b16_m))) +vbool16_t vmfgt(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m2_b8))) +vbool8_t vmfgt(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m2_b8_m))) +vbool8_t vmfgt(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m4_b4))) +vbool4_t vmfgt(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m4_b4_m))) +vbool4_t vmfgt(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m8_b2))) +vbool2_t vmfgt(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16m8_b2_m))) +vbool2_t vmfgt(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16mf2_b32))) +vbool32_t vmfgt(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16mf2_b32_m))) +vbool32_t vmfgt(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16mf4_b64))) +vbool64_t vmfgt(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vv_f16mf4_b64_m))) +vbool64_t vmfgt(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m1_b16))) +vbool16_t vmfgt(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m1_b16_m))) +vbool16_t vmfgt(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m2_b8))) +vbool8_t vmfgt(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m2_b8_m))) +vbool8_t vmfgt(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m4_b4))) +vbool4_t vmfgt(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m4_b4_m))) +vbool4_t vmfgt(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m8_b2))) +vbool2_t vmfgt(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16m8_b2_m))) +vbool2_t vmfgt(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16mf2_b32))) +vbool32_t vmfgt(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16mf2_b32_m))) +vbool32_t vmfgt(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16mf4_b64))) +vbool64_t vmfgt(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfgt_vf_f16mf4_b64_m))) +vbool64_t vmfgt(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m1_b16))) +vbool16_t vmfge(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m1_b16_m))) +vbool16_t vmfge(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m2_b8))) +vbool8_t vmfge(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m2_b8_m))) +vbool8_t vmfge(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m4_b4))) +vbool4_t vmfge(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m4_b4_m))) +vbool4_t vmfge(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m8_b2))) +vbool2_t vmfge(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16m8_b2_m))) +vbool2_t vmfge(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16mf2_b32))) +vbool32_t vmfge(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16mf2_b32_m))) +vbool32_t vmfge(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16mf4_b64))) +vbool64_t vmfge(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vv_f16mf4_b64_m))) +vbool64_t vmfge(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m1_b16))) +vbool16_t vmfge(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m1_b16_m))) +vbool16_t vmfge(vbool16_t op0, vbool16_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m2_b8))) +vbool8_t vmfge(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m2_b8_m))) +vbool8_t vmfge(vbool8_t op0, vbool8_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m4_b4))) +vbool4_t vmfge(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m4_b4_m))) +vbool4_t vmfge(vbool4_t op0, vbool4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m8_b2))) +vbool2_t vmfge(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16m8_b2_m))) +vbool2_t vmfge(vbool2_t op0, vbool2_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16mf2_b32))) +vbool32_t vmfge(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16mf2_b32_m))) +vbool32_t vmfge(vbool32_t op0, vbool32_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16mf4_b64))) +vbool64_t vmfge(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmfge_vf_f16mf4_b64_m))) +vbool64_t vmfge(vbool64_t op0, vbool64_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f16m1))) +vfloat16m1_t vmerge(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f16m2))) +vfloat16m2_t vmerge(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f16m4))) +vfloat16m4_t vmerge(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f16m8))) +vfloat16m8_t vmerge(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f16mf2))) +vfloat16mf2_t vmerge(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vmerge_vvm_f16mf4))) +vfloat16mf4_t vmerge(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f16m1))) +vfloat16m1_t vfmerge(vbool16_t op0, vfloat16m1_t op1, _Float16 op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f16m2))) +vfloat16m2_t vfmerge(vbool8_t op0, vfloat16m2_t op1, _Float16 op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f16m4))) +vfloat16m4_t vfmerge(vbool4_t op0, vfloat16m4_t op1, _Float16 op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f16m8))) +vfloat16m8_t vfmerge(vbool2_t op0, vfloat16m8_t op1, _Float16 op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f16mf2))) +vfloat16mf2_t vfmerge(vbool32_t op0, vfloat16mf2_t op1, _Float16 op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmerge_vfm_f16mf4))) +vfloat16mf4_t vfmerge(vbool64_t op0, vfloat16mf4_t op1, _Float16 op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m1_f16m1))) +vfloat16m1_t vfredmax(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m1_f16m1_m))) +vfloat16m1_t vfredmax(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m2_f16m1))) +vfloat16m1_t vfredmax(vfloat16m1_t op0, vfloat16m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m2_f16m1_m))) +vfloat16m1_t vfredmax(vbool8_t op0, vfloat16m1_t op1, vfloat16m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m4_f16m1))) +vfloat16m1_t vfredmax(vfloat16m1_t op0, vfloat16m4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m4_f16m1_m))) +vfloat16m1_t vfredmax(vbool4_t op0, vfloat16m1_t op1, vfloat16m4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m8_f16m1))) +vfloat16m1_t vfredmax(vfloat16m1_t op0, vfloat16m8_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16m8_f16m1_m))) +vfloat16m1_t vfredmax(vbool2_t op0, vfloat16m1_t op1, vfloat16m8_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16mf2_f16m1))) +vfloat16m1_t vfredmax(vfloat16m1_t op0, vfloat16mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16mf2_f16m1_m))) +vfloat16m1_t vfredmax(vbool32_t op0, vfloat16m1_t op1, vfloat16mf2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16mf4_f16m1))) +vfloat16m1_t vfredmax(vfloat16m1_t op0, vfloat16mf4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmax_vs_f16mf4_f16m1_m))) +vfloat16m1_t vfredmax(vbool64_t op0, vfloat16m1_t op1, vfloat16mf4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m1_f16m1))) +vfloat16m1_t vfredmin(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m1_f16m1_m))) +vfloat16m1_t vfredmin(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m2_f16m1))) +vfloat16m1_t vfredmin(vfloat16m1_t op0, vfloat16m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m2_f16m1_m))) +vfloat16m1_t vfredmin(vbool8_t op0, vfloat16m1_t op1, vfloat16m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m4_f16m1))) +vfloat16m1_t vfredmin(vfloat16m1_t op0, vfloat16m4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m4_f16m1_m))) +vfloat16m1_t vfredmin(vbool4_t op0, vfloat16m1_t op1, vfloat16m4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m8_f16m1))) +vfloat16m1_t vfredmin(vfloat16m1_t op0, vfloat16m8_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16m8_f16m1_m))) +vfloat16m1_t vfredmin(vbool2_t op0, vfloat16m1_t op1, vfloat16m8_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16mf2_f16m1))) +vfloat16m1_t vfredmin(vfloat16m1_t op0, vfloat16mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16mf2_f16m1_m))) +vfloat16m1_t vfredmin(vbool32_t op0, vfloat16m1_t op1, vfloat16mf2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16mf4_f16m1))) +vfloat16m1_t vfredmin(vfloat16m1_t op0, vfloat16mf4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredmin_vs_f16mf4_f16m1_m))) +vfloat16m1_t vfredmin(vbool64_t op0, vfloat16m1_t op1, vfloat16mf4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m1_f16m1))) +vfloat16m1_t vfredsum(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m1_f16m1_m))) +vfloat16m1_t vfredsum(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m2_f16m1))) +vfloat16m1_t vfredsum(vfloat16m1_t op0, vfloat16m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m2_f16m1_m))) +vfloat16m1_t vfredsum(vbool8_t op0, vfloat16m1_t op1, vfloat16m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m4_f16m1))) +vfloat16m1_t vfredsum(vfloat16m1_t op0, vfloat16m4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m4_f16m1_m))) +vfloat16m1_t vfredsum(vbool4_t op0, vfloat16m1_t op1, vfloat16m4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m8_f16m1))) +vfloat16m1_t vfredsum(vfloat16m1_t op0, vfloat16m8_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16m8_f16m1_m))) +vfloat16m1_t vfredsum(vbool2_t op0, vfloat16m1_t op1, vfloat16m8_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16mf2_f16m1))) +vfloat16m1_t vfredsum(vfloat16m1_t op0, vfloat16mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16mf2_f16m1_m))) +vfloat16m1_t vfredsum(vbool32_t op0, vfloat16m1_t op1, vfloat16mf2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16mf4_f16m1))) +vfloat16m1_t vfredsum(vfloat16m1_t op0, vfloat16mf4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredsum_vs_f16mf4_f16m1_m))) +vfloat16m1_t vfredsum(vbool64_t op0, vfloat16m1_t op1, vfloat16mf4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m1_f16m1))) +vfloat16m1_t vfredosum(vfloat16m1_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m1_f16m1_m))) +vfloat16m1_t vfredosum(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m2_f16m1))) +vfloat16m1_t vfredosum(vfloat16m1_t op0, vfloat16m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m2_f16m1_m))) +vfloat16m1_t vfredosum(vbool8_t op0, vfloat16m1_t op1, vfloat16m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m4_f16m1))) +vfloat16m1_t vfredosum(vfloat16m1_t op0, vfloat16m4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m4_f16m1_m))) +vfloat16m1_t vfredosum(vbool4_t op0, vfloat16m1_t op1, vfloat16m4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m8_f16m1))) +vfloat16m1_t vfredosum(vfloat16m1_t op0, vfloat16m8_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16m8_f16m1_m))) +vfloat16m1_t vfredosum(vbool2_t op0, vfloat16m1_t op1, vfloat16m8_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16mf2_f16m1))) +vfloat16m1_t vfredosum(vfloat16m1_t op0, vfloat16mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16mf2_f16m1_m))) +vfloat16m1_t vfredosum(vbool32_t op0, vfloat16m1_t op1, vfloat16mf2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16mf4_f16m1))) +vfloat16m1_t vfredosum(vfloat16m1_t op0, vfloat16mf4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfredosum_vs_f16mf4_f16m1_m))) +vfloat16m1_t vfredosum(vbool64_t op0, vfloat16m1_t op1, vfloat16mf4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m1))) +void vsuxei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m1_m))) +void vsuxei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m2))) +void vsuxei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m2_m))) +void vsuxei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m4))) +void vsuxei8(_Float16 * op0, vuint8m2_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m4_m))) +void vsuxei8(vbool4_t op0, _Float16 * op1, vuint8m2_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m8))) +void vsuxei8(_Float16 * op0, vuint8m4_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16m8_m))) +void vsuxei8(vbool2_t op0, _Float16 * op1, vuint8m4_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16mf2))) +void vsuxei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16mf2_m))) +void vsuxei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16mf4))) +void vsuxei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei8_v_f16mf4_m))) +void vsuxei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m1))) +void vsuxei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m1_m))) +void vsuxei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m2))) +void vsuxei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m2_m))) +void vsuxei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m4))) +void vsuxei16(_Float16 * op0, vuint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m4_m))) +void vsuxei16(vbool4_t op0, _Float16 * op1, vuint16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m8))) +void vsuxei16(_Float16 * op0, vuint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16m8_m))) +void vsuxei16(vbool2_t op0, _Float16 * op1, vuint16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16mf2))) +void vsuxei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16mf2_m))) +void vsuxei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16mf4))) +void vsuxei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei16_v_f16mf4_m))) +void vsuxei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f16m1_f16))) +_Float16 vfmv_f(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f16m2_f16))) +_Float16 vfmv_f(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f16m4_f16))) +_Float16 vfmv_f(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f16m8_f16))) +_Float16 vfmv_f(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f16mf2_f16))) +_Float16 vfmv_f(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_f_s_f16mf4_f16))) +_Float16 vfmv_f(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f16m1))) +vfloat16m1_t vfmv_s(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f16m2))) +vfloat16m2_t vfmv_s(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f16m4))) +vfloat16m4_t vfmv_s(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f16m8))) +vfloat16m8_t vfmv_s(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f16mf2))) +vfloat16mf2_t vfmv_s(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfmv_s_f_f16mf4))) +vfloat16mf4_t vfmv_s(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m1))) +vfloat16m1_t vslideup(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m1_m))) +vfloat16m1_t vslideup(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m2))) +vfloat16m2_t vslideup(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m2_m))) +vfloat16m2_t vslideup(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m4))) +vfloat16m4_t vslideup(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m4_m))) +vfloat16m4_t vslideup(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m8))) +vfloat16m8_t vslideup(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16m8_m))) +vfloat16m8_t vslideup(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16mf2))) +vfloat16mf2_t vslideup(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16mf2_m))) +vfloat16mf2_t vslideup(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16mf4))) +vfloat16mf4_t vslideup(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslideup_vx_f16mf4_m))) +vfloat16mf4_t vslideup(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m1))) +vfloat16m1_t vslidedown(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m1_m))) +vfloat16m1_t vslidedown(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m2))) +vfloat16m2_t vslidedown(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m2_m))) +vfloat16m2_t vslidedown(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m4))) +vfloat16m4_t vslidedown(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m4_m))) +vfloat16m4_t vslidedown(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m8))) +vfloat16m8_t vslidedown(vfloat16m8_t op0, vfloat16m8_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16m8_m))) +vfloat16m8_t vslidedown(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16mf2))) +vfloat16mf2_t vslidedown(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16mf2_m))) +vfloat16mf2_t vslidedown(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16mf4))) +vfloat16mf4_t vslidedown(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vslidedown_vx_f16mf4_m))) +vfloat16mf4_t vslidedown(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m1))) +vfloat16m1_t vfslide1up(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m1_m))) +vfloat16m1_t vfslide1up(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m2))) +vfloat16m2_t vfslide1up(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m2_m))) +vfloat16m2_t vfslide1up(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m4))) +vfloat16m4_t vfslide1up(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m4_m))) +vfloat16m4_t vfslide1up(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m8))) +vfloat16m8_t vfslide1up(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16m8_m))) +vfloat16m8_t vfslide1up(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16mf2))) +vfloat16mf2_t vfslide1up(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16mf2_m))) +vfloat16mf2_t vfslide1up(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16mf4))) +vfloat16mf4_t vfslide1up(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1up_vf_f16mf4_m))) +vfloat16mf4_t vfslide1up(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16m1))) +void vsuxei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16m1_m))) +void vsuxei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16m2))) +void vsuxei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16m2_m))) +void vsuxei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16m4))) +void vsuxei32(_Float16 * op0, vuint32m8_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16m4_m))) +void vsuxei32(vbool4_t op0, _Float16 * op1, vuint32m8_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16mf2))) +void vsuxei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16mf2_m))) +void vsuxei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16mf4))) +void vsuxei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei32_v_f16mf4_m))) +void vsuxei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m1))) +vfloat16m1_t vfslide1down(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m1_m))) +vfloat16m1_t vfslide1down(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m2))) +vfloat16m2_t vfslide1down(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m2_m))) +vfloat16m2_t vfslide1down(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m4))) +vfloat16m4_t vfslide1down(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m4_m))) +vfloat16m4_t vfslide1down(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m8))) +vfloat16m8_t vfslide1down(vfloat16m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16m8_m))) +vfloat16m8_t vfslide1down(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16mf2))) +vfloat16mf2_t vfslide1down(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16mf2_m))) +vfloat16mf2_t vfslide1down(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16mf4))) +vfloat16mf4_t vfslide1down(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfslide1down_vf_f16mf4_m))) +vfloat16mf4_t vfslide1down(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m1))) +vfloat16m1_t vrgather(vfloat16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m1_m))) +vfloat16m1_t vrgather(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m2))) +vfloat16m2_t vrgather(vfloat16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m2_m))) +vfloat16m2_t vrgather(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m4))) +vfloat16m4_t vrgather(vfloat16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m4_m))) +vfloat16m4_t vrgather(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m8))) +vfloat16m8_t vrgather(vfloat16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16m8_m))) +vfloat16m8_t vrgather(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16mf2))) +vfloat16mf2_t vrgather(vfloat16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16mf2_m))) +vfloat16mf2_t vrgather(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16mf4))) +vfloat16mf4_t vrgather(vfloat16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vv_f16mf4_m))) +vfloat16mf4_t vrgather(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m1))) +vfloat16m1_t vrgather(vfloat16m1_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m1_m))) +vfloat16m1_t vrgather(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m2))) +vfloat16m2_t vrgather(vfloat16m2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m2_m))) +vfloat16m2_t vrgather(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m4))) +vfloat16m4_t vrgather(vfloat16m4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m4_m))) +vfloat16m4_t vrgather(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m8))) +vfloat16m8_t vrgather(vfloat16m8_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16m8_m))) +vfloat16m8_t vrgather(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16mf2))) +vfloat16mf2_t vrgather(vfloat16mf2_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16mf2_m))) +vfloat16mf2_t vrgather(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16mf4))) +vfloat16mf4_t vrgather(vfloat16mf4_t op0, size_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgather_vx_f16mf4_m))) +vfloat16mf4_t vrgather(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m1))) +vfloat16m1_t vrgatherei16(vfloat16m1_t op0, vuint16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m1_m))) +vfloat16m1_t vrgatherei16(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m2))) +vfloat16m2_t vrgatherei16(vfloat16m2_t op0, vuint16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m2_m))) +vfloat16m2_t vrgatherei16(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m4))) +vfloat16m4_t vrgatherei16(vfloat16m4_t op0, vuint16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m4_m))) +vfloat16m4_t vrgatherei16(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m8))) +vfloat16m8_t vrgatherei16(vfloat16m8_t op0, vuint16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16m8_m))) +vfloat16m8_t vrgatherei16(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16mf2))) +vfloat16mf2_t vrgatherei16(vfloat16mf2_t op0, vuint16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16mf2_m))) +vfloat16mf2_t vrgatherei16(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16mf4))) +vfloat16mf4_t vrgatherei16(vfloat16mf4_t op0, vuint16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vrgatherei16_vv_f16mf4_m))) +vfloat16mf4_t vrgatherei16(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f16m1))) +vfloat16m1_t vcompress(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f16m2))) +vfloat16m2_t vcompress(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f16m4))) +vfloat16m4_t vcompress(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f16m8))) +vfloat16m8_t vcompress(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f16mf2))) +vfloat16mf2_t vcompress(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vcompress_vm_f16mf4))) +vfloat16mf4_t vcompress(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16m1))) +void vsuxei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16m1_m))) +void vsuxei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16m2))) +void vsuxei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16m2_m))) +void vsuxei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16mf2))) +void vsuxei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16mf2_m))) +void vsuxei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16mf4))) +void vsuxei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxei64_v_f16mf4_m))) +void vsuxei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m1))) +void vsoxei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m1_m))) +void vsoxei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m2))) +void vsoxei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m2_m))) +void vsoxei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m4))) +void vsoxei8(_Float16 * op0, vuint8m2_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m4_m))) +void vsoxei8(vbool4_t op0, _Float16 * op1, vuint8m2_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m8))) +void vsoxei8(_Float16 * op0, vuint8m4_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16m8_m))) +void vsoxei8(vbool2_t op0, _Float16 * op1, vuint8m4_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16mf2))) +void vsoxei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16mf2_m))) +void vsoxei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16mf4))) +void vsoxei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei8_v_f16mf4_m))) +void vsoxei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m1))) +void vsoxei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m1_m))) +void vsoxei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m2))) +void vsoxei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m2_m))) +void vsoxei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m4))) +void vsoxei16(_Float16 * op0, vuint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m4_m))) +void vsoxei16(vbool4_t op0, _Float16 * op1, vuint16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m8))) +void vsoxei16(_Float16 * op0, vuint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16m8_m))) +void vsoxei16(vbool2_t op0, _Float16 * op1, vuint16m8_t op2, vfloat16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16mf2))) +void vsoxei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16mf2_m))) +void vsoxei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16mf4))) +void vsoxei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei16_v_f16mf4_m))) +void vsoxei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16m1))) +void vsoxei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16m1_m))) +void vsoxei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16m2))) +void vsoxei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16m2_m))) +void vsoxei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16m4))) +void vsoxei32(_Float16 * op0, vuint32m8_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16m4_m))) +void vsoxei32(vbool4_t op0, _Float16 * op1, vuint32m8_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16mf2))) +void vsoxei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16mf2_m))) +void vsoxei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16mf4))) +void vsoxei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei32_v_f16mf4_m))) +void vsoxei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16m1))) +void vsoxei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16m1_m))) +void vsoxei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16m2))) +void vsoxei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16m2_m))) +void vsoxei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16mf2))) +void vsoxei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16mf2_m))) +void vsoxei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16mf4))) +void vsoxei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxei64_v_f16mf4_m))) +void vsoxei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_f16m1_m))) +vfloat16m1_t vle16ff(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_f16m2_m))) +vfloat16m2_t vle16ff(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_f16m4_m))) +vfloat16m4_t vle16ff(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_f16m8_m))) +vfloat16m8_t vle16ff(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_f16mf2_m))) +vfloat16mf2_t vle16ff(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16ff_v_f16mf4_m))) +vfloat16mf4_t vle16ff(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, size_t * op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m1))) +vfloat16m1_t vfneg(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m1_m))) +vfloat16m1_t vfneg(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m2))) +vfloat16m2_t vfneg(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m2_m))) +vfloat16m2_t vfneg(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m4))) +vfloat16m4_t vfneg(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m4_m))) +vfloat16m4_t vfneg(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m8))) +vfloat16m8_t vfneg(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16m8_m))) +vfloat16m8_t vfneg(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16mf2))) +vfloat16mf2_t vfneg(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16mf2_m))) +vfloat16mf2_t vfneg(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16mf4))) +vfloat16mf4_t vfneg(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfneg_v_f16mf4_m))) +vfloat16mf4_t vfneg(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_f16m1_m))) +vfloat16m1_t vle16(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_f16m2_m))) +vfloat16m2_t vle16(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_f16m4_m))) +vfloat16m4_t vle16(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_f16m8_m))) +vfloat16m8_t vle16(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_f16mf2_m))) +vfloat16mf2_t vle16(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vle16_v_f16mf4_m))) +vfloat16mf4_t vle16(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m1))) +void vse16(_Float16 * op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m1_m))) +void vse16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m2))) +void vse16(_Float16 * op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m2_m))) +void vse16(vbool8_t op0, _Float16 * op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m4))) +void vse16(_Float16 * op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m4_m))) +void vse16(vbool4_t op0, _Float16 * op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m8))) +void vse16(_Float16 * op0, vfloat16m8_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16m8_m))) +void vse16(vbool2_t op0, _Float16 * op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16mf2))) +void vse16(_Float16 * op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16mf2_m))) +void vse16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16mf4))) +void vse16(_Float16 * op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vse16_v_f16mf4_m))) +void vse16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_f16m1_m))) +vfloat16m1_t vlse16(vbool16_t op0, vfloat16m1_t op1, const _Float16 * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_f16m2_m))) +vfloat16m2_t vlse16(vbool8_t op0, vfloat16m2_t op1, const _Float16 * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_f16m4_m))) +vfloat16m4_t vlse16(vbool4_t op0, vfloat16m4_t op1, const _Float16 * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_f16m8_m))) +vfloat16m8_t vlse16(vbool2_t op0, vfloat16m8_t op1, const _Float16 * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_f16mf2_m))) +vfloat16mf2_t vlse16(vbool32_t op0, vfloat16mf2_t op1, const _Float16 * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlse16_v_f16mf4_m))) +vfloat16mf4_t vlse16(vbool64_t op0, vfloat16mf4_t op1, const _Float16 * op2, ptrdiff_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m1))) +vuint16m1_t vfclass(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m1_m))) +vuint16m1_t vfclass(vbool16_t op0, vuint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m2))) +vuint16m2_t vfclass(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m2_m))) +vuint16m2_t vfclass(vbool8_t op0, vuint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m4))) +vuint16m4_t vfclass(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m4_m))) +vuint16m4_t vfclass(vbool4_t op0, vuint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m8))) +vuint16m8_t vfclass(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16m8_m))) +vuint16m8_t vfclass(vbool2_t op0, vuint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16mf2))) +vuint16mf2_t vfclass(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16mf2_m))) +vuint16mf2_t vfclass(vbool32_t op0, vuint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16mf4))) +vuint16mf4_t vfclass(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfclass_v_u16mf4_m))) +vuint16mf4_t vfclass(vbool64_t op0, vuint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m1))) +vfloat16m1_t vfcvt_f(vint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m1_m))) +vfloat16m1_t vfcvt_f(vbool16_t op0, vfloat16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m2))) +vfloat16m2_t vfcvt_f(vint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m2_m))) +vfloat16m2_t vfcvt_f(vbool8_t op0, vfloat16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m4))) +vfloat16m4_t vfcvt_f(vint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m4_m))) +vfloat16m4_t vfcvt_f(vbool4_t op0, vfloat16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m8))) +vfloat16m8_t vfcvt_f(vint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16m8_m))) +vfloat16m8_t vfcvt_f(vbool2_t op0, vfloat16m8_t op1, vint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16mf2))) +vfloat16mf2_t vfcvt_f(vint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16mf2_m))) +vfloat16mf2_t vfcvt_f(vbool32_t op0, vfloat16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16mf4))) +vfloat16mf4_t vfcvt_f(vint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_x_v_f16mf4_m))) +vfloat16mf4_t vfcvt_f(vbool64_t op0, vfloat16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m1))) +vfloat16m1_t vfcvt_f(vuint16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m1_m))) +vfloat16m1_t vfcvt_f(vbool16_t op0, vfloat16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m2))) +vfloat16m2_t vfcvt_f(vuint16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m2_m))) +vfloat16m2_t vfcvt_f(vbool8_t op0, vfloat16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m4))) +vfloat16m4_t vfcvt_f(vuint16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m4_m))) +vfloat16m4_t vfcvt_f(vbool4_t op0, vfloat16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m8))) +vfloat16m8_t vfcvt_f(vuint16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16m8_m))) +vfloat16m8_t vfcvt_f(vbool2_t op0, vfloat16m8_t op1, vuint16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16mf2))) +vfloat16mf2_t vfcvt_f(vuint16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16mf2_m))) +vfloat16mf2_t vfcvt_f(vbool32_t op0, vfloat16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16mf4))) +vfloat16mf4_t vfcvt_f(vuint16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_f_xu_v_f16mf4_m))) +vfloat16mf4_t vfcvt_f(vbool64_t op0, vfloat16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m1))) +vint16m1_t vfcvt_rtz_x(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m1_m))) +vint16m1_t vfcvt_rtz_x(vbool16_t op0, vint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m2))) +vint16m2_t vfcvt_rtz_x(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m2_m))) +vint16m2_t vfcvt_rtz_x(vbool8_t op0, vint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m4))) +vint16m4_t vfcvt_rtz_x(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m4_m))) +vint16m4_t vfcvt_rtz_x(vbool4_t op0, vint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m8))) +vint16m8_t vfcvt_rtz_x(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16m8_m))) +vint16m8_t vfcvt_rtz_x(vbool2_t op0, vint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16mf2))) +vint16mf2_t vfcvt_rtz_x(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16mf2_m))) +vint16mf2_t vfcvt_rtz_x(vbool32_t op0, vint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16mf4))) +vint16mf4_t vfcvt_rtz_x(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_x_f_v_i16mf4_m))) +vint16mf4_t vfcvt_rtz_x(vbool64_t op0, vint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m1))) +vuint16m1_t vfcvt_rtz_xu(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m1_m))) +vuint16m1_t vfcvt_rtz_xu(vbool16_t op0, vuint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m2))) +vuint16m2_t vfcvt_rtz_xu(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m2_m))) +vuint16m2_t vfcvt_rtz_xu(vbool8_t op0, vuint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m4))) +vuint16m4_t vfcvt_rtz_xu(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m4_m))) +vuint16m4_t vfcvt_rtz_xu(vbool4_t op0, vuint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m8))) +vuint16m8_t vfcvt_rtz_xu(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16m8_m))) +vuint16m8_t vfcvt_rtz_xu(vbool2_t op0, vuint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf2))) +vuint16mf2_t vfcvt_rtz_xu(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf2_m))) +vuint16mf2_t vfcvt_rtz_xu(vbool32_t op0, vuint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf4))) +vuint16mf4_t vfcvt_rtz_xu(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_rtz_xu_f_v_u16mf4_m))) +vuint16mf4_t vfcvt_rtz_xu(vbool64_t op0, vuint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m1))) +vint16m1_t vfcvt_x(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m1_m))) +vint16m1_t vfcvt_x(vbool16_t op0, vint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m2))) +vint16m2_t vfcvt_x(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m2_m))) +vint16m2_t vfcvt_x(vbool8_t op0, vint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m4))) +vint16m4_t vfcvt_x(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m4_m))) +vint16m4_t vfcvt_x(vbool4_t op0, vint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m8))) +vint16m8_t vfcvt_x(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16m8_m))) +vint16m8_t vfcvt_x(vbool2_t op0, vint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16mf2))) +vint16mf2_t vfcvt_x(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16mf2_m))) +vint16mf2_t vfcvt_x(vbool32_t op0, vint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16mf4))) +vint16mf4_t vfcvt_x(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_x_f_v_i16mf4_m))) +vint16mf4_t vfcvt_x(vbool64_t op0, vint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m1))) +vuint16m1_t vfcvt_xu(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m1_m))) +vuint16m1_t vfcvt_xu(vbool16_t op0, vuint16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m2))) +vuint16m2_t vfcvt_xu(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m2_m))) +vuint16m2_t vfcvt_xu(vbool8_t op0, vuint16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m4))) +vuint16m4_t vfcvt_xu(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m4_m))) +vuint16m4_t vfcvt_xu(vbool4_t op0, vuint16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m8))) +vuint16m8_t vfcvt_xu(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16m8_m))) +vuint16m8_t vfcvt_xu(vbool2_t op0, vuint16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16mf2))) +vuint16mf2_t vfcvt_xu(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16mf2_m))) +vuint16mf2_t vfcvt_xu(vbool32_t op0, vuint16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16mf4))) +vuint16mf4_t vfcvt_xu(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfcvt_xu_f_v_u16mf4_m))) +vuint16mf4_t vfcvt_xu(vbool64_t op0, vuint16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16mf4))) +vfloat16mf4_t vfncvt_f(vint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16mf4_m))) +vfloat16mf4_t vfncvt_f(vbool64_t op0, vfloat16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16mf2))) +vfloat16mf2_t vfncvt_f(vint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16mf2_m))) +vfloat16mf2_t vfncvt_f(vbool32_t op0, vfloat16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16m1))) +vfloat16m1_t vfncvt_f(vint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16m1_m))) +vfloat16m1_t vfncvt_f(vbool16_t op0, vfloat16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16m2))) +vfloat16m2_t vfncvt_f(vint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16m2_m))) +vfloat16m2_t vfncvt_f(vbool8_t op0, vfloat16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16m4))) +vfloat16m4_t vfncvt_f(vint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_x_w_f16m4_m))) +vfloat16m4_t vfncvt_f(vbool4_t op0, vfloat16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16mf4))) +vfloat16mf4_t vfncvt_f(vuint32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16mf4_m))) +vfloat16mf4_t vfncvt_f(vbool64_t op0, vfloat16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16mf2))) +vfloat16mf2_t vfncvt_f(vuint32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16mf2_m))) +vfloat16mf2_t vfncvt_f(vbool32_t op0, vfloat16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16m1))) +vfloat16m1_t vfncvt_f(vuint32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16m1_m))) +vfloat16m1_t vfncvt_f(vbool16_t op0, vfloat16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16m2))) +vfloat16m2_t vfncvt_f(vuint32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16m2_m))) +vfloat16m2_t vfncvt_f(vbool8_t op0, vfloat16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16m4))) +vfloat16m4_t vfncvt_f(vuint32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_xu_w_f16m4_m))) +vfloat16m4_t vfncvt_f(vbool4_t op0, vfloat16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8mf8))) +vint8mf8_t vfncvt_rtz_x(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8mf8_m))) +vint8mf8_t vfncvt_rtz_x(vbool64_t op0, vint8mf8_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8mf4))) +vint8mf4_t vfncvt_rtz_x(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8mf4_m))) +vint8mf4_t vfncvt_rtz_x(vbool32_t op0, vint8mf4_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8mf2))) +vint8mf2_t vfncvt_rtz_x(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8mf2_m))) +vint8mf2_t vfncvt_rtz_x(vbool16_t op0, vint8mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8m1))) +vint8m1_t vfncvt_rtz_x(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8m1_m))) +vint8m1_t vfncvt_rtz_x(vbool8_t op0, vint8m1_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8m2))) +vint8m2_t vfncvt_rtz_x(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8m2_m))) +vint8m2_t vfncvt_rtz_x(vbool4_t op0, vint8m2_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8m4))) +vint8m4_t vfncvt_rtz_x(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_x_f_w_i8m4_m))) +vint8m4_t vfncvt_rtz_x(vbool2_t op0, vint8m4_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf8))) +vuint8mf8_t vfncvt_rtz_xu(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf8_m))) +vuint8mf8_t vfncvt_rtz_xu(vbool64_t op0, vuint8mf8_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf4))) +vuint8mf4_t vfncvt_rtz_xu(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf4_m))) +vuint8mf4_t vfncvt_rtz_xu(vbool32_t op0, vuint8mf4_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf2))) +vuint8mf2_t vfncvt_rtz_xu(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8mf2_m))) +vuint8mf2_t vfncvt_rtz_xu(vbool16_t op0, vuint8mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8m1))) +vuint8m1_t vfncvt_rtz_xu(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8m1_m))) +vuint8m1_t vfncvt_rtz_xu(vbool8_t op0, vuint8m1_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8m2))) +vuint8m2_t vfncvt_rtz_xu(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8m2_m))) +vuint8m2_t vfncvt_rtz_xu(vbool4_t op0, vuint8m2_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8m4))) +vuint8m4_t vfncvt_rtz_xu(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rtz_xu_f_w_u8m4_m))) +vuint8m4_t vfncvt_rtz_xu(vbool2_t op0, vuint8m4_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8mf8))) +vint8mf8_t vfncvt_x(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8mf8_m))) +vint8mf8_t vfncvt_x(vbool64_t op0, vint8mf8_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8mf4))) +vint8mf4_t vfncvt_x(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8mf4_m))) +vint8mf4_t vfncvt_x(vbool32_t op0, vint8mf4_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8mf2))) +vint8mf2_t vfncvt_x(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8mf2_m))) +vint8mf2_t vfncvt_x(vbool16_t op0, vint8mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8m1))) +vint8m1_t vfncvt_x(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8m1_m))) +vint8m1_t vfncvt_x(vbool8_t op0, vint8m1_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8m2))) +vint8m2_t vfncvt_x(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8m2_m))) +vint8m2_t vfncvt_x(vbool4_t op0, vint8m2_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8m4))) +vint8m4_t vfncvt_x(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_x_f_w_i8m4_m))) +vint8m4_t vfncvt_x(vbool2_t op0, vint8m4_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8mf8))) +vuint8mf8_t vfncvt_xu(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8mf8_m))) +vuint8mf8_t vfncvt_xu(vbool64_t op0, vuint8mf8_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8mf4))) +vuint8mf4_t vfncvt_xu(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8mf4_m))) +vuint8mf4_t vfncvt_xu(vbool32_t op0, vuint8mf4_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8mf2))) +vuint8mf2_t vfncvt_xu(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8mf2_m))) +vuint8mf2_t vfncvt_xu(vbool16_t op0, vuint8mf2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8m1))) +vuint8m1_t vfncvt_xu(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8m1_m))) +vuint8m1_t vfncvt_xu(vbool8_t op0, vuint8m1_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8m2))) +vuint8m2_t vfncvt_xu(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8m2_m))) +vuint8m2_t vfncvt_xu(vbool4_t op0, vuint8m2_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8m4))) +vuint8m4_t vfncvt_xu(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_xu_f_w_u8m4_m))) +vuint8m4_t vfncvt_xu(vbool2_t op0, vuint8m4_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m1))) +vfloat16m1_t vfrec7(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m1_m))) +vfloat16m1_t vfrec7(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m2))) +vfloat16m2_t vfrec7(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m2_m))) +vfloat16m2_t vfrec7(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m4))) +vfloat16m4_t vfrec7(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m4_m))) +vfloat16m4_t vfrec7(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m8))) +vfloat16m8_t vfrec7(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16m8_m))) +vfloat16m8_t vfrec7(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16mf2))) +vfloat16mf2_t vfrec7(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16mf2_m))) +vfloat16mf2_t vfrec7(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16mf4))) +vfloat16mf4_t vfrec7(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrec7_v_f16mf4_m))) +vfloat16mf4_t vfrec7(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m1))) +vfloat16m1_t vfrsqrt7(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m1_m))) +vfloat16m1_t vfrsqrt7(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m2))) +vfloat16m2_t vfrsqrt7(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m2_m))) +vfloat16m2_t vfrsqrt7(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m4))) +vfloat16m4_t vfrsqrt7(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m4_m))) +vfloat16m4_t vfrsqrt7(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m8))) +vfloat16m8_t vfrsqrt7(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16m8_m))) +vfloat16m8_t vfrsqrt7(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16mf2))) +vfloat16mf2_t vfrsqrt7(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16mf2_m))) +vfloat16mf2_t vfrsqrt7(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16mf4))) +vfloat16mf4_t vfrsqrt7(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfrsqrt7_v_f16mf4_m))) +vfloat16mf4_t vfrsqrt7(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m1))) +vfloat16m1_t vfsqrt(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m1_m))) +vfloat16m1_t vfsqrt(vbool16_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m2))) +vfloat16m2_t vfsqrt(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m2_m))) +vfloat16m2_t vfsqrt(vbool8_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m4))) +vfloat16m4_t vfsqrt(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m4_m))) +vfloat16m4_t vfsqrt(vbool4_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m8))) +vfloat16m8_t vfsqrt(vfloat16m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16m8_m))) +vfloat16m8_t vfsqrt(vbool2_t op0, vfloat16m8_t op1, vfloat16m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16mf2))) +vfloat16mf2_t vfsqrt(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16mf2_m))) +vfloat16mf2_t vfsqrt(vbool32_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16mf4))) +vfloat16mf4_t vfsqrt(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfsqrt_v_f16mf4_m))) +vfloat16mf4_t vfsqrt(vbool64_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16mf4))) +vfloat16mf4_t vfwcvt_f(vint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16mf4_m))) +vfloat16mf4_t vfwcvt_f(vbool64_t op0, vfloat16mf4_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16mf2))) +vfloat16mf2_t vfwcvt_f(vint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16mf2_m))) +vfloat16mf2_t vfwcvt_f(vbool32_t op0, vfloat16mf2_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m1))) +vfloat16m1_t vfwcvt_f(vint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m1_m))) +vfloat16m1_t vfwcvt_f(vbool16_t op0, vfloat16m1_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m2))) +vfloat16m2_t vfwcvt_f(vint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m2_m))) +vfloat16m2_t vfwcvt_f(vbool8_t op0, vfloat16m2_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m4))) +vfloat16m4_t vfwcvt_f(vint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m4_m))) +vfloat16m4_t vfwcvt_f(vbool4_t op0, vfloat16m4_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m8))) +vfloat16m8_t vfwcvt_f(vint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_x_v_f16m8_m))) +vfloat16m8_t vfwcvt_f(vbool2_t op0, vfloat16m8_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16mf4))) +vfloat16mf4_t vfwcvt_f(vuint8mf8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16mf4_m))) +vfloat16mf4_t vfwcvt_f(vbool64_t op0, vfloat16mf4_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16mf2))) +vfloat16mf2_t vfwcvt_f(vuint8mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16mf2_m))) +vfloat16mf2_t vfwcvt_f(vbool32_t op0, vfloat16mf2_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m1))) +vfloat16m1_t vfwcvt_f(vuint8mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m1_m))) +vfloat16m1_t vfwcvt_f(vbool16_t op0, vfloat16m1_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m2))) +vfloat16m2_t vfwcvt_f(vuint8m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m2_m))) +vfloat16m2_t vfwcvt_f(vbool8_t op0, vfloat16m2_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m4))) +vfloat16m4_t vfwcvt_f(vuint8m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m4_m))) +vfloat16m4_t vfwcvt_f(vbool4_t op0, vfloat16m4_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m8))) +vfloat16m8_t vfwcvt_f(vuint8m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_xu_v_f16m8_m))) +vfloat16m8_t vfwcvt_f(vbool2_t op0, vfloat16m8_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32mf2))) +vint32mf2_t vfwcvt_rtz_x(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32mf2_m))) +vint32mf2_t vfwcvt_rtz_x(vbool64_t op0, vint32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m1))) +vint32m1_t vfwcvt_rtz_x(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m1_m))) +vint32m1_t vfwcvt_rtz_x(vbool32_t op0, vint32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m2))) +vint32m2_t vfwcvt_rtz_x(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m2_m))) +vint32m2_t vfwcvt_rtz_x(vbool16_t op0, vint32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m4))) +vint32m4_t vfwcvt_rtz_x(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m4_m))) +vint32m4_t vfwcvt_rtz_x(vbool8_t op0, vint32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m8))) +vint32m8_t vfwcvt_rtz_x(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_x_f_v_i32m8_m))) +vint32m8_t vfwcvt_rtz_x(vbool4_t op0, vint32m8_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32mf2))) +vuint32mf2_t vfwcvt_rtz_xu(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32mf2_m))) +vuint32mf2_t vfwcvt_rtz_xu(vbool64_t op0, vuint32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m1))) +vuint32m1_t vfwcvt_rtz_xu(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m1_m))) +vuint32m1_t vfwcvt_rtz_xu(vbool32_t op0, vuint32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m2))) +vuint32m2_t vfwcvt_rtz_xu(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m2_m))) +vuint32m2_t vfwcvt_rtz_xu(vbool16_t op0, vuint32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m4))) +vuint32m4_t vfwcvt_rtz_xu(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m4_m))) +vuint32m4_t vfwcvt_rtz_xu(vbool8_t op0, vuint32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m8))) +vuint32m8_t vfwcvt_rtz_xu(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_rtz_xu_f_v_u32m8_m))) +vuint32m8_t vfwcvt_rtz_xu(vbool4_t op0, vuint32m8_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32mf2))) +vint32mf2_t vfwcvt_x(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32mf2_m))) +vint32mf2_t vfwcvt_x(vbool64_t op0, vint32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m1))) +vint32m1_t vfwcvt_x(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m1_m))) +vint32m1_t vfwcvt_x(vbool32_t op0, vint32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m2))) +vint32m2_t vfwcvt_x(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m2_m))) +vint32m2_t vfwcvt_x(vbool16_t op0, vint32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m4))) +vint32m4_t vfwcvt_x(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m4_m))) +vint32m4_t vfwcvt_x(vbool8_t op0, vint32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m8))) +vint32m8_t vfwcvt_x(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_x_f_v_i32m8_m))) +vint32m8_t vfwcvt_x(vbool4_t op0, vint32m8_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32mf2))) +vuint32mf2_t vfwcvt_xu(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32mf2_m))) +vuint32mf2_t vfwcvt_xu(vbool64_t op0, vuint32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m1))) +vuint32m1_t vfwcvt_xu(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m1_m))) +vuint32m1_t vfwcvt_xu(vbool32_t op0, vuint32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m2))) +vuint32m2_t vfwcvt_xu(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m2_m))) +vuint32m2_t vfwcvt_xu(vbool16_t op0, vuint32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m4))) +vuint32m4_t vfwcvt_xu(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m4_m))) +vuint32m4_t vfwcvt_xu(vbool8_t op0, vuint32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m8))) +vuint32m8_t vfwcvt_xu(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_xu_f_v_u32m8_m))) +vuint32m8_t vfwcvt_xu(vbool4_t op0, vuint32m8_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf2_f16m1))) +vfloat16m1_t vlmul_ext_f16m1(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf4_f16m1))) +vfloat16m1_t vlmul_ext_f16m1(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf4_f16mf2))) +vfloat16mf2_t vlmul_ext_f16mf2(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16m1_f16m2))) +vfloat16m2_t vlmul_ext_f16m2(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf2_f16m2))) +vfloat16m2_t vlmul_ext_f16m2(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf4_f16m2))) +vfloat16m2_t vlmul_ext_f16m2(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16m1_f16m4))) +vfloat16m4_t vlmul_ext_f16m4(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16m2_f16m4))) +vfloat16m4_t vlmul_ext_f16m4(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf2_f16m4))) +vfloat16m4_t vlmul_ext_f16m4(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf4_f16m4))) +vfloat16m4_t vlmul_ext_f16m4(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16m1_f16m8))) +vfloat16m8_t vlmul_ext_f16m8(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16m2_f16m8))) +vfloat16m8_t vlmul_ext_f16m8(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16m4_f16m8))) +vfloat16m8_t vlmul_ext_f16m8(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf2_f16m8))) +vfloat16m8_t vlmul_ext_f16m8(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_ext_v_f16mf4_f16m8))) +vfloat16m8_t vlmul_ext_f16m8(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m1_f16mf2))) +vfloat16mf2_t vlmul_trunc_f16mf2(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m2_f16mf2))) +vfloat16mf2_t vlmul_trunc_f16mf2(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m4_f16mf2))) +vfloat16mf2_t vlmul_trunc_f16mf2(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m8_f16mf2))) +vfloat16mf2_t vlmul_trunc_f16mf2(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m1_f16mf4))) +vfloat16mf4_t vlmul_trunc_f16mf4(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m2_f16mf4))) +vfloat16mf4_t vlmul_trunc_f16mf4(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m4_f16mf4))) +vfloat16mf4_t vlmul_trunc_f16mf4(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m8_f16mf4))) +vfloat16mf4_t vlmul_trunc_f16mf4(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16mf2_f16mf4))) +vfloat16mf4_t vlmul_trunc_f16mf4(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m2_f16m1))) +vfloat16m1_t vlmul_trunc_f16m1(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m4_f16m1))) +vfloat16m1_t vlmul_trunc_f16m1(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m8_f16m1))) +vfloat16m1_t vlmul_trunc_f16m1(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m4_f16m2))) +vfloat16m2_t vlmul_trunc_f16m2(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m8_f16m2))) +vfloat16m2_t vlmul_trunc_f16m2(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlmul_trunc_v_f16m8_f16m4))) +vfloat16m4_t vlmul_trunc_f16m4(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m1_f16m1))) +vfloat16m1_t vreinterpret_f16m1(vint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m2_f16m2))) +vfloat16m2_t vreinterpret_f16m2(vint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m4_f16m4))) +vfloat16m4_t vreinterpret_f16m4(vint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16m8_f16m8))) +vfloat16m8_t vreinterpret_f16m8(vint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf2_f16mf2))) +vfloat16mf2_t vreinterpret_f16mf2(vint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_i16mf4_f16mf4))) +vfloat16mf4_t vreinterpret_f16mf4(vint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m1_f16m1))) +vfloat16m1_t vreinterpret_f16m1(vuint16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m2_f16m2))) +vfloat16m2_t vreinterpret_f16m2(vuint16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m4_f16m4))) +vfloat16m4_t vreinterpret_f16m4(vuint16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16m8_f16m8))) +vfloat16m8_t vreinterpret_f16m8(vuint16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf2_f16mf2))) +vfloat16mf2_t vreinterpret_f16mf2(vuint16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_u16mf4_f16mf4))) +vfloat16mf4_t vreinterpret_f16mf4(vuint16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m1_i16m1))) +vint16m1_t vreinterpret_i16m1(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m2_i16m2))) +vint16m2_t vreinterpret_i16m2(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m4_i16m4))) +vint16m4_t vreinterpret_i16m4(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m8_i16m8))) +vint16m8_t vreinterpret_i16m8(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16mf2_i16mf2))) +vint16mf2_t vreinterpret_i16mf2(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16mf4_i16mf4))) +vint16mf4_t vreinterpret_i16mf4(vfloat16mf4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m1_u16m1))) +vuint16m1_t vreinterpret_u16m1(vfloat16m1_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m2_u16m2))) +vuint16m2_t vreinterpret_u16m2(vfloat16m2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m4_u16m4))) +vuint16m4_t vreinterpret_u16m4(vfloat16m4_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16m8_u16m8))) +vuint16m8_t vreinterpret_u16m8(vfloat16m8_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16mf2_u16mf2))) +vuint16mf2_t vreinterpret_u16mf2(vfloat16mf2_t op0); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vreinterpret_v_f16mf4_u16mf4))) +vuint16mf4_t vreinterpret_u16mf4(vfloat16mf4_t op0); + +#endif + +#if defined(__riscv_f) && defined(__riscv_zfh) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32mf2))) +vfloat32mf2_t vfwadd_vv(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32mf2_m))) +vfloat32mf2_t vfwadd_vv(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m1))) +vfloat32m1_t vfwadd_vv(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m1_m))) +vfloat32m1_t vfwadd_vv(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m2))) +vfloat32m2_t vfwadd_vv(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m2_m))) +vfloat32m2_t vfwadd_vv(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m4))) +vfloat32m4_t vfwadd_vv(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m4_m))) +vfloat32m4_t vfwadd_vv(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m8))) +vfloat32m8_t vfwadd_vv(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vv_f32m8_m))) +vfloat32m8_t vfwadd_vv(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32mf2))) +vfloat32mf2_t vfwadd_vf(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32mf2_m))) +vfloat32mf2_t vfwadd_vf(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m1))) +vfloat32m1_t vfwadd_vf(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m1_m))) +vfloat32m1_t vfwadd_vf(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m2))) +vfloat32m2_t vfwadd_vf(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m2_m))) +vfloat32m2_t vfwadd_vf(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m4))) +vfloat32m4_t vfwadd_vf(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m4_m))) +vfloat32m4_t vfwadd_vf(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m8))) +vfloat32m8_t vfwadd_vf(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_vf_f32m8_m))) +vfloat32m8_t vfwadd_vf(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32mf2))) +vfloat32mf2_t vfwsub_vv(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32mf2_m))) +vfloat32mf2_t vfwsub_vv(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m1))) +vfloat32m1_t vfwsub_vv(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m1_m))) +vfloat32m1_t vfwsub_vv(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m2))) +vfloat32m2_t vfwsub_vv(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m2_m))) +vfloat32m2_t vfwsub_vv(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m4))) +vfloat32m4_t vfwsub_vv(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m4_m))) +vfloat32m4_t vfwsub_vv(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m8))) +vfloat32m8_t vfwsub_vv(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vv_f32m8_m))) +vfloat32m8_t vfwsub_vv(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32mf2))) +vfloat32mf2_t vfwsub_vf(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32mf2_m))) +vfloat32mf2_t vfwsub_vf(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m1))) +vfloat32m1_t vfwsub_vf(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m1_m))) +vfloat32m1_t vfwsub_vf(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m2))) +vfloat32m2_t vfwsub_vf(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m2_m))) +vfloat32m2_t vfwsub_vf(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m4))) +vfloat32m4_t vfwsub_vf(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m4_m))) +vfloat32m4_t vfwsub_vf(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m8))) +vfloat32m8_t vfwsub_vf(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_vf_f32m8_m))) +vfloat32m8_t vfwsub_vf(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32mf2))) +vfloat32mf2_t vfwadd_wv(vfloat32mf2_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32mf2_m))) +vfloat32mf2_t vfwadd_wv(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m1))) +vfloat32m1_t vfwadd_wv(vfloat32m1_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m1_m))) +vfloat32m1_t vfwadd_wv(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m2))) +vfloat32m2_t vfwadd_wv(vfloat32m2_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m2_m))) +vfloat32m2_t vfwadd_wv(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m4))) +vfloat32m4_t vfwadd_wv(vfloat32m4_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m4_m))) +vfloat32m4_t vfwadd_wv(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m8))) +vfloat32m8_t vfwadd_wv(vfloat32m8_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wv_f32m8_m))) +vfloat32m8_t vfwadd_wv(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32mf2))) +vfloat32mf2_t vfwadd_wf(vfloat32mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32mf2_m))) +vfloat32mf2_t vfwadd_wf(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m1))) +vfloat32m1_t vfwadd_wf(vfloat32m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m1_m))) +vfloat32m1_t vfwadd_wf(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m2))) +vfloat32m2_t vfwadd_wf(vfloat32m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m2_m))) +vfloat32m2_t vfwadd_wf(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m4))) +vfloat32m4_t vfwadd_wf(vfloat32m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m4_m))) +vfloat32m4_t vfwadd_wf(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m8))) +vfloat32m8_t vfwadd_wf(vfloat32m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwadd_wf_f32m8_m))) +vfloat32m8_t vfwadd_wf(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32mf2))) +vfloat32mf2_t vfwsub_wv(vfloat32mf2_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32mf2_m))) +vfloat32mf2_t vfwsub_wv(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m1))) +vfloat32m1_t vfwsub_wv(vfloat32m1_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m1_m))) +vfloat32m1_t vfwsub_wv(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m2))) +vfloat32m2_t vfwsub_wv(vfloat32m2_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m2_m))) +vfloat32m2_t vfwsub_wv(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m4))) +vfloat32m4_t vfwsub_wv(vfloat32m4_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m4_m))) +vfloat32m4_t vfwsub_wv(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m8))) +vfloat32m8_t vfwsub_wv(vfloat32m8_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wv_f32m8_m))) +vfloat32m8_t vfwsub_wv(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32mf2))) +vfloat32mf2_t vfwsub_wf(vfloat32mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32mf2_m))) +vfloat32mf2_t vfwsub_wf(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m1))) +vfloat32m1_t vfwsub_wf(vfloat32m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m1_m))) +vfloat32m1_t vfwsub_wf(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m2))) +vfloat32m2_t vfwsub_wf(vfloat32m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m2_m))) +vfloat32m2_t vfwsub_wf(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m4))) +vfloat32m4_t vfwsub_wf(vfloat32m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m4_m))) +vfloat32m4_t vfwsub_wf(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m8))) +vfloat32m8_t vfwsub_wf(vfloat32m8_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwsub_wf_f32m8_m))) +vfloat32m8_t vfwsub_wf(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32mf2))) +vfloat32mf2_t vfwmul(vfloat16mf4_t op0, vfloat16mf4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32mf2_m))) +vfloat32mf2_t vfwmul(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m1))) +vfloat32m1_t vfwmul(vfloat16mf2_t op0, vfloat16mf2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m1_m))) +vfloat32m1_t vfwmul(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m2))) +vfloat32m2_t vfwmul(vfloat16m1_t op0, vfloat16m1_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m2_m))) +vfloat32m2_t vfwmul(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m4))) +vfloat32m4_t vfwmul(vfloat16m2_t op0, vfloat16m2_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m4_m))) +vfloat32m4_t vfwmul(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m8))) +vfloat32m8_t vfwmul(vfloat16m4_t op0, vfloat16m4_t op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vv_f32m8_m))) +vfloat32m8_t vfwmul(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32mf2))) +vfloat32mf2_t vfwmul(vfloat16mf4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32mf2_m))) +vfloat32mf2_t vfwmul(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m1))) +vfloat32m1_t vfwmul(vfloat16mf2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m1_m))) +vfloat32m1_t vfwmul(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m2))) +vfloat32m2_t vfwmul(vfloat16m1_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m2_m))) +vfloat32m2_t vfwmul(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m4))) +vfloat32m4_t vfwmul(vfloat16m2_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m4_m))) +vfloat32m4_t vfwmul(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m8))) +vfloat32m8_t vfwmul(vfloat16m4_t op0, _Float16 op1, size_t op2); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmul_vf_f32m8_m))) +vfloat32m8_t vfwmul(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, _Float16 op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32mf2))) +vfloat32mf2_t vfwmacc(vfloat32mf2_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32mf2_m))) +vfloat32mf2_t vfwmacc(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m1))) +vfloat32m1_t vfwmacc(vfloat32m1_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m1_m))) +vfloat32m1_t vfwmacc(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m2))) +vfloat32m2_t vfwmacc(vfloat32m2_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m2_m))) +vfloat32m2_t vfwmacc(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m4))) +vfloat32m4_t vfwmacc(vfloat32m4_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m4_m))) +vfloat32m4_t vfwmacc(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m8))) +vfloat32m8_t vfwmacc(vfloat32m8_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vv_f32m8_m))) +vfloat32m8_t vfwmacc(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32mf2))) +vfloat32mf2_t vfwmacc(vfloat32mf2_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32mf2_m))) +vfloat32mf2_t vfwmacc(vbool64_t op0, vfloat32mf2_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m1))) +vfloat32m1_t vfwmacc(vfloat32m1_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m1_m))) +vfloat32m1_t vfwmacc(vbool32_t op0, vfloat32m1_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m2))) +vfloat32m2_t vfwmacc(vfloat32m2_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m2_m))) +vfloat32m2_t vfwmacc(vbool16_t op0, vfloat32m2_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m4))) +vfloat32m4_t vfwmacc(vfloat32m4_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m4_m))) +vfloat32m4_t vfwmacc(vbool8_t op0, vfloat32m4_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m8))) +vfloat32m8_t vfwmacc(vfloat32m8_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmacc_vf_f32m8_m))) +vfloat32m8_t vfwmacc(vbool4_t op0, vfloat32m8_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32mf2))) +vfloat32mf2_t vfwnmacc(vfloat32mf2_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32mf2_m))) +vfloat32mf2_t vfwnmacc(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m1))) +vfloat32m1_t vfwnmacc(vfloat32m1_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m1_m))) +vfloat32m1_t vfwnmacc(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m2))) +vfloat32m2_t vfwnmacc(vfloat32m2_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m2_m))) +vfloat32m2_t vfwnmacc(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m4))) +vfloat32m4_t vfwnmacc(vfloat32m4_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m4_m))) +vfloat32m4_t vfwnmacc(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m8))) +vfloat32m8_t vfwnmacc(vfloat32m8_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vv_f32m8_m))) +vfloat32m8_t vfwnmacc(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32mf2))) +vfloat32mf2_t vfwnmacc(vfloat32mf2_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32mf2_m))) +vfloat32mf2_t vfwnmacc(vbool64_t op0, vfloat32mf2_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m1))) +vfloat32m1_t vfwnmacc(vfloat32m1_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m1_m))) +vfloat32m1_t vfwnmacc(vbool32_t op0, vfloat32m1_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m2))) +vfloat32m2_t vfwnmacc(vfloat32m2_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m2_m))) +vfloat32m2_t vfwnmacc(vbool16_t op0, vfloat32m2_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m4))) +vfloat32m4_t vfwnmacc(vfloat32m4_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m4_m))) +vfloat32m4_t vfwnmacc(vbool8_t op0, vfloat32m4_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m8))) +vfloat32m8_t vfwnmacc(vfloat32m8_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmacc_vf_f32m8_m))) +vfloat32m8_t vfwnmacc(vbool4_t op0, vfloat32m8_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32mf2))) +vfloat32mf2_t vfwmsac(vfloat32mf2_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32mf2_m))) +vfloat32mf2_t vfwmsac(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m1))) +vfloat32m1_t vfwmsac(vfloat32m1_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m1_m))) +vfloat32m1_t vfwmsac(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m2))) +vfloat32m2_t vfwmsac(vfloat32m2_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m2_m))) +vfloat32m2_t vfwmsac(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m4))) +vfloat32m4_t vfwmsac(vfloat32m4_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m4_m))) +vfloat32m4_t vfwmsac(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m8))) +vfloat32m8_t vfwmsac(vfloat32m8_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vv_f32m8_m))) +vfloat32m8_t vfwmsac(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32mf2))) +vfloat32mf2_t vfwmsac(vfloat32mf2_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32mf2_m))) +vfloat32mf2_t vfwmsac(vbool64_t op0, vfloat32mf2_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m1))) +vfloat32m1_t vfwmsac(vfloat32m1_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m1_m))) +vfloat32m1_t vfwmsac(vbool32_t op0, vfloat32m1_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m2))) +vfloat32m2_t vfwmsac(vfloat32m2_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m2_m))) +vfloat32m2_t vfwmsac(vbool16_t op0, vfloat32m2_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m4))) +vfloat32m4_t vfwmsac(vfloat32m4_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m4_m))) +vfloat32m4_t vfwmsac(vbool8_t op0, vfloat32m4_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m8))) +vfloat32m8_t vfwmsac(vfloat32m8_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwmsac_vf_f32m8_m))) +vfloat32m8_t vfwmsac(vbool4_t op0, vfloat32m8_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32mf2))) +vfloat32mf2_t vfwnmsac(vfloat32mf2_t op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32mf2_m))) +vfloat32mf2_t vfwnmsac(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m1))) +vfloat32m1_t vfwnmsac(vfloat32m1_t op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m1_m))) +vfloat32m1_t vfwnmsac(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m2))) +vfloat32m2_t vfwnmsac(vfloat32m2_t op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m2_m))) +vfloat32m2_t vfwnmsac(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m4))) +vfloat32m4_t vfwnmsac(vfloat32m4_t op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m4_m))) +vfloat32m4_t vfwnmsac(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m8))) +vfloat32m8_t vfwnmsac(vfloat32m8_t op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vv_f32m8_m))) +vfloat32m8_t vfwnmsac(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32mf2))) +vfloat32mf2_t vfwnmsac(vfloat32mf2_t op0, _Float16 op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32mf2_m))) +vfloat32mf2_t vfwnmsac(vbool64_t op0, vfloat32mf2_t op1, _Float16 op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m1))) +vfloat32m1_t vfwnmsac(vfloat32m1_t op0, _Float16 op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m1_m))) +vfloat32m1_t vfwnmsac(vbool32_t op0, vfloat32m1_t op1, _Float16 op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m2))) +vfloat32m2_t vfwnmsac(vfloat32m2_t op0, _Float16 op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m2_m))) +vfloat32m2_t vfwnmsac(vbool16_t op0, vfloat32m2_t op1, _Float16 op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m4))) +vfloat32m4_t vfwnmsac(vfloat32m4_t op0, _Float16 op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m4_m))) +vfloat32m4_t vfwnmsac(vbool8_t op0, vfloat32m4_t op1, _Float16 op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m8))) +vfloat32m8_t vfwnmsac(vfloat32m8_t op0, _Float16 op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwnmsac_vf_f32m8_m))) +vfloat32m8_t vfwnmsac(vbool4_t op0, vfloat32m8_t op1, _Float16 op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m1_f32m1))) +vfloat32m1_t vfwredsum(vfloat32m1_t op0, vfloat16m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m1_f32m1_m))) +vfloat32m1_t vfwredsum(vbool16_t op0, vfloat32m1_t op1, vfloat16m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m2_f32m1))) +vfloat32m1_t vfwredsum(vfloat32m1_t op0, vfloat16m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m2_f32m1_m))) +vfloat32m1_t vfwredsum(vbool8_t op0, vfloat32m1_t op1, vfloat16m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m4_f32m1))) +vfloat32m1_t vfwredsum(vfloat32m1_t op0, vfloat16m4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m4_f32m1_m))) +vfloat32m1_t vfwredsum(vbool4_t op0, vfloat32m1_t op1, vfloat16m4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m8_f32m1))) +vfloat32m1_t vfwredsum(vfloat32m1_t op0, vfloat16m8_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16m8_f32m1_m))) +vfloat32m1_t vfwredsum(vbool2_t op0, vfloat32m1_t op1, vfloat16m8_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16mf2_f32m1))) +vfloat32m1_t vfwredsum(vfloat32m1_t op0, vfloat16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16mf2_f32m1_m))) +vfloat32m1_t vfwredsum(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16mf4_f32m1))) +vfloat32m1_t vfwredsum(vfloat32m1_t op0, vfloat16mf4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredsum_vs_f16mf4_f32m1_m))) +vfloat32m1_t vfwredsum(vbool64_t op0, vfloat32m1_t op1, vfloat16mf4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m1_f32m1))) +vfloat32m1_t vfwredosum(vfloat32m1_t op0, vfloat16m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m1_f32m1_m))) +vfloat32m1_t vfwredosum(vbool16_t op0, vfloat32m1_t op1, vfloat16m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m2_f32m1))) +vfloat32m1_t vfwredosum(vfloat32m1_t op0, vfloat16m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m2_f32m1_m))) +vfloat32m1_t vfwredosum(vbool8_t op0, vfloat32m1_t op1, vfloat16m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m4_f32m1))) +vfloat32m1_t vfwredosum(vfloat32m1_t op0, vfloat16m4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m4_f32m1_m))) +vfloat32m1_t vfwredosum(vbool4_t op0, vfloat32m1_t op1, vfloat16m4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m8_f32m1))) +vfloat32m1_t vfwredosum(vfloat32m1_t op0, vfloat16m8_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16m8_f32m1_m))) +vfloat32m1_t vfwredosum(vbool2_t op0, vfloat32m1_t op1, vfloat16m8_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16mf2_f32m1))) +vfloat32m1_t vfwredosum(vfloat32m1_t op0, vfloat16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16mf2_f32m1_m))) +vfloat32m1_t vfwredosum(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16mf4_f32m1))) +vfloat32m1_t vfwredosum(vfloat32m1_t op0, vfloat16mf4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwredosum_vs_f16mf4_f32m1_m))) +vfloat32m1_t vfwredosum(vbool64_t op0, vfloat32m1_t op1, vfloat16mf4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16mf4))) +vfloat16mf4_t vfncvt_f(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16mf4_m))) +vfloat16mf4_t vfncvt_f(vbool64_t op0, vfloat16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16mf2))) +vfloat16mf2_t vfncvt_f(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16mf2_m))) +vfloat16mf2_t vfncvt_f(vbool32_t op0, vfloat16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16m1))) +vfloat16m1_t vfncvt_f(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16m1_m))) +vfloat16m1_t vfncvt_f(vbool16_t op0, vfloat16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16m2))) +vfloat16m2_t vfncvt_f(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16m2_m))) +vfloat16m2_t vfncvt_f(vbool8_t op0, vfloat16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16m4))) +vfloat16m4_t vfncvt_f(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_f_f_w_f16m4_m))) +vfloat16m4_t vfncvt_f(vbool4_t op0, vfloat16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16mf4))) +vfloat16mf4_t vfncvt_rod_f(vfloat32mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16mf4_m))) +vfloat16mf4_t vfncvt_rod_f(vbool64_t op0, vfloat16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16mf2))) +vfloat16mf2_t vfncvt_rod_f(vfloat32m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16mf2_m))) +vfloat16mf2_t vfncvt_rod_f(vbool32_t op0, vfloat16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16m1))) +vfloat16m1_t vfncvt_rod_f(vfloat32m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16m1_m))) +vfloat16m1_t vfncvt_rod_f(vbool16_t op0, vfloat16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16m2))) +vfloat16m2_t vfncvt_rod_f(vfloat32m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16m2_m))) +vfloat16m2_t vfncvt_rod_f(vbool8_t op0, vfloat16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16m4))) +vfloat16m4_t vfncvt_rod_f(vfloat32m8_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfncvt_rod_f_f_w_f16m4_m))) +vfloat16m4_t vfncvt_rod_f(vbool4_t op0, vfloat16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32mf2))) +vfloat32mf2_t vfwcvt_f(vfloat16mf4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32mf2_m))) +vfloat32mf2_t vfwcvt_f(vbool64_t op0, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m1))) +vfloat32m1_t vfwcvt_f(vfloat16mf2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m1_m))) +vfloat32m1_t vfwcvt_f(vbool32_t op0, vfloat32m1_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m2))) +vfloat32m2_t vfwcvt_f(vfloat16m1_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m2_m))) +vfloat32m2_t vfwcvt_f(vbool16_t op0, vfloat32m2_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m4))) +vfloat32m4_t vfwcvt_f(vfloat16m2_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m4_m))) +vfloat32m4_t vfwcvt_f(vbool8_t op0, vfloat32m4_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m8))) +vfloat32m8_t vfwcvt_f(vfloat16m4_t op0, size_t op1); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vfwcvt_f_f_v_f32m8_m))) +vfloat32m8_t vfwcvt_f(vbool4_t op0, vfloat32m8_t op1, vfloat16m4_t op2, size_t op3); + +#endif + +#if defined(__riscv_zvamo) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m1))) +vint32m1_t vamoswapei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m1_m))) +vint32m1_t vamoswapei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m2))) +vint32m2_t vamoswapei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m2_m))) +vint32m2_t vamoswapei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m4))) +vint32m4_t vamoswapei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m4_m))) +vint32m4_t vamoswapei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m8))) +vint32m8_t vamoswapei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32m8_m))) +vint32m8_t vamoswapei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32mf2))) +vint32mf2_t vamoswapei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i32mf2_m))) +vint32mf2_t vamoswapei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m1))) +vuint32m1_t vamoswapei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m1_m))) +vuint32m1_t vamoswapei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m2))) +vuint32m2_t vamoswapei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m2_m))) +vuint32m2_t vamoswapei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m4))) +vuint32m4_t vamoswapei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m4_m))) +vuint32m4_t vamoswapei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m8))) +vuint32m8_t vamoswapei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32m8_m))) +vuint32m8_t vamoswapei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32mf2))) +vuint32mf2_t vamoswapei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u32mf2_m))) +vuint32mf2_t vamoswapei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m1))) +vint32m1_t vamoswapei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m1_m))) +vint32m1_t vamoswapei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m2))) +vint32m2_t vamoswapei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m2_m))) +vint32m2_t vamoswapei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m4))) +vint32m4_t vamoswapei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m4_m))) +vint32m4_t vamoswapei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m8))) +vint32m8_t vamoswapei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32m8_m))) +vint32m8_t vamoswapei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32mf2))) +vint32mf2_t vamoswapei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i32mf2_m))) +vint32mf2_t vamoswapei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m1))) +vuint32m1_t vamoswapei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m1_m))) +vuint32m1_t vamoswapei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m2))) +vuint32m2_t vamoswapei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m2_m))) +vuint32m2_t vamoswapei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m4))) +vuint32m4_t vamoswapei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m4_m))) +vuint32m4_t vamoswapei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m8))) +vuint32m8_t vamoswapei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32m8_m))) +vuint32m8_t vamoswapei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32mf2))) +vuint32mf2_t vamoswapei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u32mf2_m))) +vuint32mf2_t vamoswapei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32m1))) +vint32m1_t vamoswapei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32m1_m))) +vint32m1_t vamoswapei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32m2))) +vint32m2_t vamoswapei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32m2_m))) +vint32m2_t vamoswapei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32m4))) +vint32m4_t vamoswapei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32m4_m))) +vint32m4_t vamoswapei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32mf2))) +vint32mf2_t vamoswapei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i32mf2_m))) +vint32mf2_t vamoswapei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32m1))) +vuint32m1_t vamoswapei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32m1_m))) +vuint32m1_t vamoswapei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32m2))) +vuint32m2_t vamoswapei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32m2_m))) +vuint32m2_t vamoswapei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32m4))) +vuint32m4_t vamoswapei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32m4_m))) +vuint32m4_t vamoswapei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32mf2))) +vuint32mf2_t vamoswapei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u32mf2_m))) +vuint32mf2_t vamoswapei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m1))) +vint64m1_t vamoswapei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m1_m))) +vint64m1_t vamoswapei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m2))) +vint64m2_t vamoswapei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m2_m))) +vint64m2_t vamoswapei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m4))) +vint64m4_t vamoswapei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m4_m))) +vint64m4_t vamoswapei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m8))) +vint64m8_t vamoswapei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i64m8_m))) +vint64m8_t vamoswapei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m1))) +vuint64m1_t vamoswapei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m1_m))) +vuint64m1_t vamoswapei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m2))) +vuint64m2_t vamoswapei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m2_m))) +vuint64m2_t vamoswapei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m4))) +vuint64m4_t vamoswapei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m4_m))) +vuint64m4_t vamoswapei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m8))) +vuint64m8_t vamoswapei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u64m8_m))) +vuint64m8_t vamoswapei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m1))) +vint64m1_t vamoswapei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m1_m))) +vint64m1_t vamoswapei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m2))) +vint64m2_t vamoswapei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m2_m))) +vint64m2_t vamoswapei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m4))) +vint64m4_t vamoswapei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m4_m))) +vint64m4_t vamoswapei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m8))) +vint64m8_t vamoswapei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_i64m8_m))) +vint64m8_t vamoswapei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m1))) +vuint64m1_t vamoswapei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m1_m))) +vuint64m1_t vamoswapei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m2))) +vuint64m2_t vamoswapei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m2_m))) +vuint64m2_t vamoswapei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m4))) +vuint64m4_t vamoswapei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m4_m))) +vuint64m4_t vamoswapei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m8))) +vuint64m8_t vamoswapei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_u64m8_m))) +vuint64m8_t vamoswapei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m1))) +vint64m1_t vamoswapei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m1_m))) +vint64m1_t vamoswapei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m2))) +vint64m2_t vamoswapei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m2_m))) +vint64m2_t vamoswapei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m4))) +vint64m4_t vamoswapei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m4_m))) +vint64m4_t vamoswapei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m8))) +vint64m8_t vamoswapei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_i64m8_m))) +vint64m8_t vamoswapei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m1))) +vuint64m1_t vamoswapei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m1_m))) +vuint64m1_t vamoswapei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m2))) +vuint64m2_t vamoswapei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m2_m))) +vuint64m2_t vamoswapei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m4))) +vuint64m4_t vamoswapei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m4_m))) +vuint64m4_t vamoswapei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m8))) +vuint64m8_t vamoswapei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_u64m8_m))) +vuint64m8_t vamoswapei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m1))) +vint64m1_t vamoswapei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m1_m))) +vint64m1_t vamoswapei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m2))) +vint64m2_t vamoswapei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m2_m))) +vint64m2_t vamoswapei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m4))) +vint64m4_t vamoswapei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m4_m))) +vint64m4_t vamoswapei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m8))) +vint64m8_t vamoswapei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_i64m8_m))) +vint64m8_t vamoswapei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m1))) +vuint64m1_t vamoswapei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m1_m))) +vuint64m1_t vamoswapei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m2))) +vuint64m2_t vamoswapei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m2_m))) +vuint64m2_t vamoswapei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m4))) +vuint64m4_t vamoswapei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m4_m))) +vuint64m4_t vamoswapei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m8))) +vuint64m8_t vamoswapei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_u64m8_m))) +vuint64m8_t vamoswapei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m1))) +vint32m1_t vamoaddei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m1_m))) +vint32m1_t vamoaddei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m2))) +vint32m2_t vamoaddei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m2_m))) +vint32m2_t vamoaddei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m4))) +vint32m4_t vamoaddei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m4_m))) +vint32m4_t vamoaddei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m8))) +vint32m8_t vamoaddei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32m8_m))) +vint32m8_t vamoaddei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32mf2))) +vint32mf2_t vamoaddei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i32mf2_m))) +vint32mf2_t vamoaddei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m1))) +vuint32m1_t vamoaddei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m1_m))) +vuint32m1_t vamoaddei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m2))) +vuint32m2_t vamoaddei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m2_m))) +vuint32m2_t vamoaddei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m4))) +vuint32m4_t vamoaddei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m4_m))) +vuint32m4_t vamoaddei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m8))) +vuint32m8_t vamoaddei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32m8_m))) +vuint32m8_t vamoaddei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32mf2))) +vuint32mf2_t vamoaddei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u32mf2_m))) +vuint32mf2_t vamoaddei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m1))) +vint32m1_t vamoaddei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m1_m))) +vint32m1_t vamoaddei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m2))) +vint32m2_t vamoaddei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m2_m))) +vint32m2_t vamoaddei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m4))) +vint32m4_t vamoaddei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m4_m))) +vint32m4_t vamoaddei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m8))) +vint32m8_t vamoaddei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32m8_m))) +vint32m8_t vamoaddei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32mf2))) +vint32mf2_t vamoaddei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i32mf2_m))) +vint32mf2_t vamoaddei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m1))) +vuint32m1_t vamoaddei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m1_m))) +vuint32m1_t vamoaddei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m2))) +vuint32m2_t vamoaddei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m2_m))) +vuint32m2_t vamoaddei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m4))) +vuint32m4_t vamoaddei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m4_m))) +vuint32m4_t vamoaddei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m8))) +vuint32m8_t vamoaddei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32m8_m))) +vuint32m8_t vamoaddei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32mf2))) +vuint32mf2_t vamoaddei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u32mf2_m))) +vuint32mf2_t vamoaddei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m1))) +vint32m1_t vamoaddei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m1_m))) +vint32m1_t vamoaddei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m2))) +vint32m2_t vamoaddei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m2_m))) +vint32m2_t vamoaddei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m4))) +vint32m4_t vamoaddei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m4_m))) +vint32m4_t vamoaddei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m8))) +vint32m8_t vamoaddei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32m8_m))) +vint32m8_t vamoaddei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32mf2))) +vint32mf2_t vamoaddei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i32mf2_m))) +vint32mf2_t vamoaddei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m1))) +vuint32m1_t vamoaddei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m1_m))) +vuint32m1_t vamoaddei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m2))) +vuint32m2_t vamoaddei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m2_m))) +vuint32m2_t vamoaddei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m4))) +vuint32m4_t vamoaddei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m4_m))) +vuint32m4_t vamoaddei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m8))) +vuint32m8_t vamoaddei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32m8_m))) +vuint32m8_t vamoaddei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32mf2))) +vuint32mf2_t vamoaddei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u32mf2_m))) +vuint32mf2_t vamoaddei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32m1))) +vint32m1_t vamoaddei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32m1_m))) +vint32m1_t vamoaddei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32m2))) +vint32m2_t vamoaddei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32m2_m))) +vint32m2_t vamoaddei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32m4))) +vint32m4_t vamoaddei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32m4_m))) +vint32m4_t vamoaddei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32mf2))) +vint32mf2_t vamoaddei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i32mf2_m))) +vint32mf2_t vamoaddei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32m1))) +vuint32m1_t vamoaddei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32m1_m))) +vuint32m1_t vamoaddei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32m2))) +vuint32m2_t vamoaddei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32m2_m))) +vuint32m2_t vamoaddei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32m4))) +vuint32m4_t vamoaddei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32m4_m))) +vuint32m4_t vamoaddei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32mf2))) +vuint32mf2_t vamoaddei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u32mf2_m))) +vuint32mf2_t vamoaddei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m1))) +vint64m1_t vamoaddei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m1_m))) +vint64m1_t vamoaddei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m2))) +vint64m2_t vamoaddei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m2_m))) +vint64m2_t vamoaddei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m4))) +vint64m4_t vamoaddei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m4_m))) +vint64m4_t vamoaddei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m8))) +vint64m8_t vamoaddei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_i64m8_m))) +vint64m8_t vamoaddei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m1))) +vuint64m1_t vamoaddei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m1_m))) +vuint64m1_t vamoaddei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m2))) +vuint64m2_t vamoaddei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m2_m))) +vuint64m2_t vamoaddei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m4))) +vuint64m4_t vamoaddei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m4_m))) +vuint64m4_t vamoaddei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m8))) +vuint64m8_t vamoaddei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei8_v_u64m8_m))) +vuint64m8_t vamoaddei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m1))) +vint64m1_t vamoaddei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m1_m))) +vint64m1_t vamoaddei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m2))) +vint64m2_t vamoaddei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m2_m))) +vint64m2_t vamoaddei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m4))) +vint64m4_t vamoaddei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m4_m))) +vint64m4_t vamoaddei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m8))) +vint64m8_t vamoaddei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_i64m8_m))) +vint64m8_t vamoaddei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m1))) +vuint64m1_t vamoaddei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m1_m))) +vuint64m1_t vamoaddei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m2))) +vuint64m2_t vamoaddei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m2_m))) +vuint64m2_t vamoaddei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m4))) +vuint64m4_t vamoaddei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m4_m))) +vuint64m4_t vamoaddei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m8))) +vuint64m8_t vamoaddei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei16_v_u64m8_m))) +vuint64m8_t vamoaddei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m1))) +vint64m1_t vamoaddei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m1_m))) +vint64m1_t vamoaddei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m2))) +vint64m2_t vamoaddei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m2_m))) +vint64m2_t vamoaddei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m4))) +vint64m4_t vamoaddei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m4_m))) +vint64m4_t vamoaddei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m8))) +vint64m8_t vamoaddei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_i64m8_m))) +vint64m8_t vamoaddei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m1))) +vuint64m1_t vamoaddei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m1_m))) +vuint64m1_t vamoaddei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m2))) +vuint64m2_t vamoaddei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m2_m))) +vuint64m2_t vamoaddei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m4))) +vuint64m4_t vamoaddei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m4_m))) +vuint64m4_t vamoaddei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m8))) +vuint64m8_t vamoaddei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei32_v_u64m8_m))) +vuint64m8_t vamoaddei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m1))) +vint64m1_t vamoaddei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m1_m))) +vint64m1_t vamoaddei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m2))) +vint64m2_t vamoaddei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m2_m))) +vint64m2_t vamoaddei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m4))) +vint64m4_t vamoaddei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m4_m))) +vint64m4_t vamoaddei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m8))) +vint64m8_t vamoaddei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_i64m8_m))) +vint64m8_t vamoaddei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m1))) +vuint64m1_t vamoaddei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m1_m))) +vuint64m1_t vamoaddei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m2))) +vuint64m2_t vamoaddei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m2_m))) +vuint64m2_t vamoaddei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m4))) +vuint64m4_t vamoaddei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m4_m))) +vuint64m4_t vamoaddei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m8))) +vuint64m8_t vamoaddei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoaddei64_v_u64m8_m))) +vuint64m8_t vamoaddei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m1))) +vint32m1_t vamoxorei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m1_m))) +vint32m1_t vamoxorei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m2))) +vint32m2_t vamoxorei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m2_m))) +vint32m2_t vamoxorei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m4))) +vint32m4_t vamoxorei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m4_m))) +vint32m4_t vamoxorei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m8))) +vint32m8_t vamoxorei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32m8_m))) +vint32m8_t vamoxorei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32mf2))) +vint32mf2_t vamoxorei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i32mf2_m))) +vint32mf2_t vamoxorei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m1))) +vuint32m1_t vamoxorei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m1_m))) +vuint32m1_t vamoxorei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m2))) +vuint32m2_t vamoxorei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m2_m))) +vuint32m2_t vamoxorei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m4))) +vuint32m4_t vamoxorei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m4_m))) +vuint32m4_t vamoxorei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m8))) +vuint32m8_t vamoxorei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32m8_m))) +vuint32m8_t vamoxorei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32mf2))) +vuint32mf2_t vamoxorei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u32mf2_m))) +vuint32mf2_t vamoxorei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m1))) +vint32m1_t vamoxorei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m1_m))) +vint32m1_t vamoxorei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m2))) +vint32m2_t vamoxorei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m2_m))) +vint32m2_t vamoxorei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m4))) +vint32m4_t vamoxorei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m4_m))) +vint32m4_t vamoxorei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m8))) +vint32m8_t vamoxorei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32m8_m))) +vint32m8_t vamoxorei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32mf2))) +vint32mf2_t vamoxorei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i32mf2_m))) +vint32mf2_t vamoxorei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m1))) +vuint32m1_t vamoxorei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m1_m))) +vuint32m1_t vamoxorei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m2))) +vuint32m2_t vamoxorei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m2_m))) +vuint32m2_t vamoxorei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m4))) +vuint32m4_t vamoxorei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m4_m))) +vuint32m4_t vamoxorei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m8))) +vuint32m8_t vamoxorei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32m8_m))) +vuint32m8_t vamoxorei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32mf2))) +vuint32mf2_t vamoxorei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u32mf2_m))) +vuint32mf2_t vamoxorei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m1))) +vint32m1_t vamoxorei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m1_m))) +vint32m1_t vamoxorei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m2))) +vint32m2_t vamoxorei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m2_m))) +vint32m2_t vamoxorei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m4))) +vint32m4_t vamoxorei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m4_m))) +vint32m4_t vamoxorei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m8))) +vint32m8_t vamoxorei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32m8_m))) +vint32m8_t vamoxorei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32mf2))) +vint32mf2_t vamoxorei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i32mf2_m))) +vint32mf2_t vamoxorei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m1))) +vuint32m1_t vamoxorei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m1_m))) +vuint32m1_t vamoxorei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m2))) +vuint32m2_t vamoxorei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m2_m))) +vuint32m2_t vamoxorei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m4))) +vuint32m4_t vamoxorei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m4_m))) +vuint32m4_t vamoxorei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m8))) +vuint32m8_t vamoxorei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32m8_m))) +vuint32m8_t vamoxorei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32mf2))) +vuint32mf2_t vamoxorei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u32mf2_m))) +vuint32mf2_t vamoxorei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32m1))) +vint32m1_t vamoxorei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32m1_m))) +vint32m1_t vamoxorei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32m2))) +vint32m2_t vamoxorei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32m2_m))) +vint32m2_t vamoxorei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32m4))) +vint32m4_t vamoxorei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32m4_m))) +vint32m4_t vamoxorei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32mf2))) +vint32mf2_t vamoxorei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i32mf2_m))) +vint32mf2_t vamoxorei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32m1))) +vuint32m1_t vamoxorei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32m1_m))) +vuint32m1_t vamoxorei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32m2))) +vuint32m2_t vamoxorei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32m2_m))) +vuint32m2_t vamoxorei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32m4))) +vuint32m4_t vamoxorei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32m4_m))) +vuint32m4_t vamoxorei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32mf2))) +vuint32mf2_t vamoxorei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u32mf2_m))) +vuint32mf2_t vamoxorei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m1))) +vint64m1_t vamoxorei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m1_m))) +vint64m1_t vamoxorei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m2))) +vint64m2_t vamoxorei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m2_m))) +vint64m2_t vamoxorei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m4))) +vint64m4_t vamoxorei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m4_m))) +vint64m4_t vamoxorei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m8))) +vint64m8_t vamoxorei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_i64m8_m))) +vint64m8_t vamoxorei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m1))) +vuint64m1_t vamoxorei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m1_m))) +vuint64m1_t vamoxorei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m2))) +vuint64m2_t vamoxorei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m2_m))) +vuint64m2_t vamoxorei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m4))) +vuint64m4_t vamoxorei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m4_m))) +vuint64m4_t vamoxorei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m8))) +vuint64m8_t vamoxorei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei8_v_u64m8_m))) +vuint64m8_t vamoxorei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m1))) +vint64m1_t vamoxorei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m1_m))) +vint64m1_t vamoxorei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m2))) +vint64m2_t vamoxorei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m2_m))) +vint64m2_t vamoxorei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m4))) +vint64m4_t vamoxorei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m4_m))) +vint64m4_t vamoxorei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m8))) +vint64m8_t vamoxorei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_i64m8_m))) +vint64m8_t vamoxorei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m1))) +vuint64m1_t vamoxorei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m1_m))) +vuint64m1_t vamoxorei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m2))) +vuint64m2_t vamoxorei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m2_m))) +vuint64m2_t vamoxorei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m4))) +vuint64m4_t vamoxorei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m4_m))) +vuint64m4_t vamoxorei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m8))) +vuint64m8_t vamoxorei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei16_v_u64m8_m))) +vuint64m8_t vamoxorei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m1))) +vint64m1_t vamoxorei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m1_m))) +vint64m1_t vamoxorei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m2))) +vint64m2_t vamoxorei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m2_m))) +vint64m2_t vamoxorei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m4))) +vint64m4_t vamoxorei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m4_m))) +vint64m4_t vamoxorei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m8))) +vint64m8_t vamoxorei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_i64m8_m))) +vint64m8_t vamoxorei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m1))) +vuint64m1_t vamoxorei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m1_m))) +vuint64m1_t vamoxorei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m2))) +vuint64m2_t vamoxorei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m2_m))) +vuint64m2_t vamoxorei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m4))) +vuint64m4_t vamoxorei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m4_m))) +vuint64m4_t vamoxorei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m8))) +vuint64m8_t vamoxorei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei32_v_u64m8_m))) +vuint64m8_t vamoxorei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m1))) +vint64m1_t vamoxorei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m1_m))) +vint64m1_t vamoxorei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m2))) +vint64m2_t vamoxorei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m2_m))) +vint64m2_t vamoxorei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m4))) +vint64m4_t vamoxorei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m4_m))) +vint64m4_t vamoxorei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m8))) +vint64m8_t vamoxorei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_i64m8_m))) +vint64m8_t vamoxorei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m1))) +vuint64m1_t vamoxorei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m1_m))) +vuint64m1_t vamoxorei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m2))) +vuint64m2_t vamoxorei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m2_m))) +vuint64m2_t vamoxorei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m4))) +vuint64m4_t vamoxorei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m4_m))) +vuint64m4_t vamoxorei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m8))) +vuint64m8_t vamoxorei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoxorei64_v_u64m8_m))) +vuint64m8_t vamoxorei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m1))) +vint32m1_t vamoandei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m1_m))) +vint32m1_t vamoandei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m2))) +vint32m2_t vamoandei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m2_m))) +vint32m2_t vamoandei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m4))) +vint32m4_t vamoandei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m4_m))) +vint32m4_t vamoandei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m8))) +vint32m8_t vamoandei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32m8_m))) +vint32m8_t vamoandei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32mf2))) +vint32mf2_t vamoandei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i32mf2_m))) +vint32mf2_t vamoandei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m1))) +vuint32m1_t vamoandei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m1_m))) +vuint32m1_t vamoandei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m2))) +vuint32m2_t vamoandei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m2_m))) +vuint32m2_t vamoandei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m4))) +vuint32m4_t vamoandei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m4_m))) +vuint32m4_t vamoandei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m8))) +vuint32m8_t vamoandei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32m8_m))) +vuint32m8_t vamoandei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32mf2))) +vuint32mf2_t vamoandei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u32mf2_m))) +vuint32mf2_t vamoandei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m1))) +vint32m1_t vamoandei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m1_m))) +vint32m1_t vamoandei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m2))) +vint32m2_t vamoandei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m2_m))) +vint32m2_t vamoandei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m4))) +vint32m4_t vamoandei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m4_m))) +vint32m4_t vamoandei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m8))) +vint32m8_t vamoandei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32m8_m))) +vint32m8_t vamoandei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32mf2))) +vint32mf2_t vamoandei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i32mf2_m))) +vint32mf2_t vamoandei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m1))) +vuint32m1_t vamoandei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m1_m))) +vuint32m1_t vamoandei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m2))) +vuint32m2_t vamoandei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m2_m))) +vuint32m2_t vamoandei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m4))) +vuint32m4_t vamoandei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m4_m))) +vuint32m4_t vamoandei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m8))) +vuint32m8_t vamoandei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32m8_m))) +vuint32m8_t vamoandei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32mf2))) +vuint32mf2_t vamoandei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u32mf2_m))) +vuint32mf2_t vamoandei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m1))) +vint32m1_t vamoandei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m1_m))) +vint32m1_t vamoandei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m2))) +vint32m2_t vamoandei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m2_m))) +vint32m2_t vamoandei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m4))) +vint32m4_t vamoandei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m4_m))) +vint32m4_t vamoandei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m8))) +vint32m8_t vamoandei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32m8_m))) +vint32m8_t vamoandei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32mf2))) +vint32mf2_t vamoandei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i32mf2_m))) +vint32mf2_t vamoandei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m1))) +vuint32m1_t vamoandei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m1_m))) +vuint32m1_t vamoandei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m2))) +vuint32m2_t vamoandei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m2_m))) +vuint32m2_t vamoandei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m4))) +vuint32m4_t vamoandei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m4_m))) +vuint32m4_t vamoandei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m8))) +vuint32m8_t vamoandei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32m8_m))) +vuint32m8_t vamoandei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32mf2))) +vuint32mf2_t vamoandei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u32mf2_m))) +vuint32mf2_t vamoandei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32m1))) +vint32m1_t vamoandei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32m1_m))) +vint32m1_t vamoandei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32m2))) +vint32m2_t vamoandei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32m2_m))) +vint32m2_t vamoandei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32m4))) +vint32m4_t vamoandei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32m4_m))) +vint32m4_t vamoandei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32mf2))) +vint32mf2_t vamoandei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i32mf2_m))) +vint32mf2_t vamoandei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32m1))) +vuint32m1_t vamoandei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32m1_m))) +vuint32m1_t vamoandei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32m2))) +vuint32m2_t vamoandei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32m2_m))) +vuint32m2_t vamoandei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32m4))) +vuint32m4_t vamoandei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32m4_m))) +vuint32m4_t vamoandei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32mf2))) +vuint32mf2_t vamoandei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u32mf2_m))) +vuint32mf2_t vamoandei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m1))) +vint64m1_t vamoandei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m1_m))) +vint64m1_t vamoandei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m2))) +vint64m2_t vamoandei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m2_m))) +vint64m2_t vamoandei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m4))) +vint64m4_t vamoandei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m4_m))) +vint64m4_t vamoandei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m8))) +vint64m8_t vamoandei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_i64m8_m))) +vint64m8_t vamoandei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m1))) +vuint64m1_t vamoandei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m1_m))) +vuint64m1_t vamoandei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m2))) +vuint64m2_t vamoandei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m2_m))) +vuint64m2_t vamoandei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m4))) +vuint64m4_t vamoandei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m4_m))) +vuint64m4_t vamoandei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m8))) +vuint64m8_t vamoandei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei8_v_u64m8_m))) +vuint64m8_t vamoandei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m1))) +vint64m1_t vamoandei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m1_m))) +vint64m1_t vamoandei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m2))) +vint64m2_t vamoandei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m2_m))) +vint64m2_t vamoandei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m4))) +vint64m4_t vamoandei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m4_m))) +vint64m4_t vamoandei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m8))) +vint64m8_t vamoandei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_i64m8_m))) +vint64m8_t vamoandei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m1))) +vuint64m1_t vamoandei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m1_m))) +vuint64m1_t vamoandei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m2))) +vuint64m2_t vamoandei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m2_m))) +vuint64m2_t vamoandei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m4))) +vuint64m4_t vamoandei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m4_m))) +vuint64m4_t vamoandei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m8))) +vuint64m8_t vamoandei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei16_v_u64m8_m))) +vuint64m8_t vamoandei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m1))) +vint64m1_t vamoandei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m1_m))) +vint64m1_t vamoandei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m2))) +vint64m2_t vamoandei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m2_m))) +vint64m2_t vamoandei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m4))) +vint64m4_t vamoandei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m4_m))) +vint64m4_t vamoandei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m8))) +vint64m8_t vamoandei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_i64m8_m))) +vint64m8_t vamoandei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m1))) +vuint64m1_t vamoandei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m1_m))) +vuint64m1_t vamoandei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m2))) +vuint64m2_t vamoandei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m2_m))) +vuint64m2_t vamoandei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m4))) +vuint64m4_t vamoandei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m4_m))) +vuint64m4_t vamoandei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m8))) +vuint64m8_t vamoandei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei32_v_u64m8_m))) +vuint64m8_t vamoandei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m1))) +vint64m1_t vamoandei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m1_m))) +vint64m1_t vamoandei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m2))) +vint64m2_t vamoandei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m2_m))) +vint64m2_t vamoandei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m4))) +vint64m4_t vamoandei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m4_m))) +vint64m4_t vamoandei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m8))) +vint64m8_t vamoandei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_i64m8_m))) +vint64m8_t vamoandei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m1))) +vuint64m1_t vamoandei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m1_m))) +vuint64m1_t vamoandei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m2))) +vuint64m2_t vamoandei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m2_m))) +vuint64m2_t vamoandei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m4))) +vuint64m4_t vamoandei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m4_m))) +vuint64m4_t vamoandei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m8))) +vuint64m8_t vamoandei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoandei64_v_u64m8_m))) +vuint64m8_t vamoandei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m1))) +vint32m1_t vamoorei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m1_m))) +vint32m1_t vamoorei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m2))) +vint32m2_t vamoorei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m2_m))) +vint32m2_t vamoorei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m4))) +vint32m4_t vamoorei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m4_m))) +vint32m4_t vamoorei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m8))) +vint32m8_t vamoorei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32m8_m))) +vint32m8_t vamoorei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32mf2))) +vint32mf2_t vamoorei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i32mf2_m))) +vint32mf2_t vamoorei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m1))) +vuint32m1_t vamoorei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m1_m))) +vuint32m1_t vamoorei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m2))) +vuint32m2_t vamoorei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m2_m))) +vuint32m2_t vamoorei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m4))) +vuint32m4_t vamoorei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m4_m))) +vuint32m4_t vamoorei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m8))) +vuint32m8_t vamoorei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32m8_m))) +vuint32m8_t vamoorei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32mf2))) +vuint32mf2_t vamoorei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u32mf2_m))) +vuint32mf2_t vamoorei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m1))) +vint32m1_t vamoorei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m1_m))) +vint32m1_t vamoorei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m2))) +vint32m2_t vamoorei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m2_m))) +vint32m2_t vamoorei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m4))) +vint32m4_t vamoorei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m4_m))) +vint32m4_t vamoorei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m8))) +vint32m8_t vamoorei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32m8_m))) +vint32m8_t vamoorei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32mf2))) +vint32mf2_t vamoorei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i32mf2_m))) +vint32mf2_t vamoorei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m1))) +vuint32m1_t vamoorei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m1_m))) +vuint32m1_t vamoorei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m2))) +vuint32m2_t vamoorei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m2_m))) +vuint32m2_t vamoorei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m4))) +vuint32m4_t vamoorei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m4_m))) +vuint32m4_t vamoorei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m8))) +vuint32m8_t vamoorei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32m8_m))) +vuint32m8_t vamoorei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32mf2))) +vuint32mf2_t vamoorei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u32mf2_m))) +vuint32mf2_t vamoorei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m1))) +vint32m1_t vamoorei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m1_m))) +vint32m1_t vamoorei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m2))) +vint32m2_t vamoorei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m2_m))) +vint32m2_t vamoorei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m4))) +vint32m4_t vamoorei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m4_m))) +vint32m4_t vamoorei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m8))) +vint32m8_t vamoorei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32m8_m))) +vint32m8_t vamoorei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32mf2))) +vint32mf2_t vamoorei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i32mf2_m))) +vint32mf2_t vamoorei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m1))) +vuint32m1_t vamoorei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m1_m))) +vuint32m1_t vamoorei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m2))) +vuint32m2_t vamoorei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m2_m))) +vuint32m2_t vamoorei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m4))) +vuint32m4_t vamoorei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m4_m))) +vuint32m4_t vamoorei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m8))) +vuint32m8_t vamoorei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32m8_m))) +vuint32m8_t vamoorei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32mf2))) +vuint32mf2_t vamoorei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u32mf2_m))) +vuint32mf2_t vamoorei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32m1))) +vint32m1_t vamoorei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32m1_m))) +vint32m1_t vamoorei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32m2))) +vint32m2_t vamoorei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32m2_m))) +vint32m2_t vamoorei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32m4))) +vint32m4_t vamoorei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32m4_m))) +vint32m4_t vamoorei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32mf2))) +vint32mf2_t vamoorei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i32mf2_m))) +vint32mf2_t vamoorei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32m1))) +vuint32m1_t vamoorei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32m1_m))) +vuint32m1_t vamoorei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32m2))) +vuint32m2_t vamoorei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32m2_m))) +vuint32m2_t vamoorei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32m4))) +vuint32m4_t vamoorei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32m4_m))) +vuint32m4_t vamoorei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32mf2))) +vuint32mf2_t vamoorei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u32mf2_m))) +vuint32mf2_t vamoorei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m1))) +vint64m1_t vamoorei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m1_m))) +vint64m1_t vamoorei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m2))) +vint64m2_t vamoorei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m2_m))) +vint64m2_t vamoorei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m4))) +vint64m4_t vamoorei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m4_m))) +vint64m4_t vamoorei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m8))) +vint64m8_t vamoorei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_i64m8_m))) +vint64m8_t vamoorei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m1))) +vuint64m1_t vamoorei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m1_m))) +vuint64m1_t vamoorei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m2))) +vuint64m2_t vamoorei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m2_m))) +vuint64m2_t vamoorei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m4))) +vuint64m4_t vamoorei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m4_m))) +vuint64m4_t vamoorei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m8))) +vuint64m8_t vamoorei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei8_v_u64m8_m))) +vuint64m8_t vamoorei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m1))) +vint64m1_t vamoorei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m1_m))) +vint64m1_t vamoorei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m2))) +vint64m2_t vamoorei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m2_m))) +vint64m2_t vamoorei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m4))) +vint64m4_t vamoorei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m4_m))) +vint64m4_t vamoorei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m8))) +vint64m8_t vamoorei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_i64m8_m))) +vint64m8_t vamoorei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m1))) +vuint64m1_t vamoorei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m1_m))) +vuint64m1_t vamoorei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m2))) +vuint64m2_t vamoorei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m2_m))) +vuint64m2_t vamoorei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m4))) +vuint64m4_t vamoorei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m4_m))) +vuint64m4_t vamoorei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m8))) +vuint64m8_t vamoorei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei16_v_u64m8_m))) +vuint64m8_t vamoorei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m1))) +vint64m1_t vamoorei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m1_m))) +vint64m1_t vamoorei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m2))) +vint64m2_t vamoorei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m2_m))) +vint64m2_t vamoorei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m4))) +vint64m4_t vamoorei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m4_m))) +vint64m4_t vamoorei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m8))) +vint64m8_t vamoorei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_i64m8_m))) +vint64m8_t vamoorei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m1))) +vuint64m1_t vamoorei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m1_m))) +vuint64m1_t vamoorei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m2))) +vuint64m2_t vamoorei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m2_m))) +vuint64m2_t vamoorei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m4))) +vuint64m4_t vamoorei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m4_m))) +vuint64m4_t vamoorei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m8))) +vuint64m8_t vamoorei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei32_v_u64m8_m))) +vuint64m8_t vamoorei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m1))) +vint64m1_t vamoorei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m1_m))) +vint64m1_t vamoorei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m2))) +vint64m2_t vamoorei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m2_m))) +vint64m2_t vamoorei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m4))) +vint64m4_t vamoorei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m4_m))) +vint64m4_t vamoorei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m8))) +vint64m8_t vamoorei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_i64m8_m))) +vint64m8_t vamoorei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m1))) +vuint64m1_t vamoorei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m1_m))) +vuint64m1_t vamoorei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m2))) +vuint64m2_t vamoorei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m2_m))) +vuint64m2_t vamoorei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m4))) +vuint64m4_t vamoorei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m4_m))) +vuint64m4_t vamoorei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m8))) +vuint64m8_t vamoorei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoorei64_v_u64m8_m))) +vuint64m8_t vamoorei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m1))) +vint32m1_t vamominei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m1_m))) +vint32m1_t vamominei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m2))) +vint32m2_t vamominei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m2_m))) +vint32m2_t vamominei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m4))) +vint32m4_t vamominei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m4_m))) +vint32m4_t vamominei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m8))) +vint32m8_t vamominei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32m8_m))) +vint32m8_t vamominei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32mf2))) +vint32mf2_t vamominei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i32mf2_m))) +vint32mf2_t vamominei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m1))) +vint32m1_t vamominei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m1_m))) +vint32m1_t vamominei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m2))) +vint32m2_t vamominei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m2_m))) +vint32m2_t vamominei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m4))) +vint32m4_t vamominei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m4_m))) +vint32m4_t vamominei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m8))) +vint32m8_t vamominei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32m8_m))) +vint32m8_t vamominei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32mf2))) +vint32mf2_t vamominei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i32mf2_m))) +vint32mf2_t vamominei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m1))) +vint32m1_t vamominei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m1_m))) +vint32m1_t vamominei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m2))) +vint32m2_t vamominei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m2_m))) +vint32m2_t vamominei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m4))) +vint32m4_t vamominei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m4_m))) +vint32m4_t vamominei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m8))) +vint32m8_t vamominei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32m8_m))) +vint32m8_t vamominei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32mf2))) +vint32mf2_t vamominei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i32mf2_m))) +vint32mf2_t vamominei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32m1))) +vint32m1_t vamominei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32m1_m))) +vint32m1_t vamominei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32m2))) +vint32m2_t vamominei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32m2_m))) +vint32m2_t vamominei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32m4))) +vint32m4_t vamominei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32m4_m))) +vint32m4_t vamominei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32mf2))) +vint32mf2_t vamominei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i32mf2_m))) +vint32mf2_t vamominei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m1))) +vint64m1_t vamominei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m1_m))) +vint64m1_t vamominei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m2))) +vint64m2_t vamominei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m2_m))) +vint64m2_t vamominei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m4))) +vint64m4_t vamominei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m4_m))) +vint64m4_t vamominei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m8))) +vint64m8_t vamominei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei8_v_i64m8_m))) +vint64m8_t vamominei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m1))) +vint64m1_t vamominei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m1_m))) +vint64m1_t vamominei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m2))) +vint64m2_t vamominei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m2_m))) +vint64m2_t vamominei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m4))) +vint64m4_t vamominei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m4_m))) +vint64m4_t vamominei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m8))) +vint64m8_t vamominei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei16_v_i64m8_m))) +vint64m8_t vamominei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m1))) +vint64m1_t vamominei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m1_m))) +vint64m1_t vamominei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m2))) +vint64m2_t vamominei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m2_m))) +vint64m2_t vamominei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m4))) +vint64m4_t vamominei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m4_m))) +vint64m4_t vamominei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m8))) +vint64m8_t vamominei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei32_v_i64m8_m))) +vint64m8_t vamominei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m1))) +vint64m1_t vamominei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m1_m))) +vint64m1_t vamominei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m2))) +vint64m2_t vamominei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m2_m))) +vint64m2_t vamominei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m4))) +vint64m4_t vamominei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m4_m))) +vint64m4_t vamominei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m8))) +vint64m8_t vamominei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominei64_v_i64m8_m))) +vint64m8_t vamominei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m1))) +vint32m1_t vamomaxei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m1_m))) +vint32m1_t vamomaxei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m2))) +vint32m2_t vamomaxei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m2_m))) +vint32m2_t vamomaxei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m4))) +vint32m4_t vamomaxei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m4_m))) +vint32m4_t vamomaxei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m8))) +vint32m8_t vamomaxei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32m8_m))) +vint32m8_t vamomaxei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32mf2))) +vint32mf2_t vamomaxei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i32mf2_m))) +vint32mf2_t vamomaxei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m1))) +vint32m1_t vamomaxei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m1_m))) +vint32m1_t vamomaxei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m2))) +vint32m2_t vamomaxei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m2_m))) +vint32m2_t vamomaxei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m4))) +vint32m4_t vamomaxei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m4_m))) +vint32m4_t vamomaxei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m8))) +vint32m8_t vamomaxei16(int32_t * op0, vuint16m4_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32m8_m))) +vint32m8_t vamomaxei16(vbool4_t op0, int32_t * op1, vuint16m4_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32mf2))) +vint32mf2_t vamomaxei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i32mf2_m))) +vint32mf2_t vamomaxei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m1))) +vint32m1_t vamomaxei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m1_m))) +vint32m1_t vamomaxei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m2))) +vint32m2_t vamomaxei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m2_m))) +vint32m2_t vamomaxei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m4))) +vint32m4_t vamomaxei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m4_m))) +vint32m4_t vamomaxei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m8))) +vint32m8_t vamomaxei32(int32_t * op0, vuint32m8_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32m8_m))) +vint32m8_t vamomaxei32(vbool4_t op0, int32_t * op1, vuint32m8_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32mf2))) +vint32mf2_t vamomaxei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i32mf2_m))) +vint32mf2_t vamomaxei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32m1))) +vint32m1_t vamomaxei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32m1_m))) +vint32m1_t vamomaxei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32m2))) +vint32m2_t vamomaxei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32m2_m))) +vint32m2_t vamomaxei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32m4))) +vint32m4_t vamomaxei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32m4_m))) +vint32m4_t vamomaxei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32mf2))) +vint32mf2_t vamomaxei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i32mf2_m))) +vint32mf2_t vamomaxei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m1))) +vint64m1_t vamomaxei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m1_m))) +vint64m1_t vamomaxei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m2))) +vint64m2_t vamomaxei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m2_m))) +vint64m2_t vamomaxei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m4))) +vint64m4_t vamomaxei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m4_m))) +vint64m4_t vamomaxei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m8))) +vint64m8_t vamomaxei8(int64_t * op0, vuint8m1_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei8_v_i64m8_m))) +vint64m8_t vamomaxei8(vbool8_t op0, int64_t * op1, vuint8m1_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m1))) +vint64m1_t vamomaxei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m1_m))) +vint64m1_t vamomaxei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m2))) +vint64m2_t vamomaxei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m2_m))) +vint64m2_t vamomaxei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m4))) +vint64m4_t vamomaxei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m4_m))) +vint64m4_t vamomaxei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m8))) +vint64m8_t vamomaxei16(int64_t * op0, vuint16m2_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei16_v_i64m8_m))) +vint64m8_t vamomaxei16(vbool8_t op0, int64_t * op1, vuint16m2_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m1))) +vint64m1_t vamomaxei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m1_m))) +vint64m1_t vamomaxei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m2))) +vint64m2_t vamomaxei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m2_m))) +vint64m2_t vamomaxei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m4))) +vint64m4_t vamomaxei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m4_m))) +vint64m4_t vamomaxei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m8))) +vint64m8_t vamomaxei32(int64_t * op0, vuint32m4_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei32_v_i64m8_m))) +vint64m8_t vamomaxei32(vbool8_t op0, int64_t * op1, vuint32m4_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m1))) +vint64m1_t vamomaxei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m1_m))) +vint64m1_t vamomaxei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m2))) +vint64m2_t vamomaxei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m2_m))) +vint64m2_t vamomaxei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m4))) +vint64m4_t vamomaxei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m4_m))) +vint64m4_t vamomaxei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m8))) +vint64m8_t vamomaxei64(int64_t * op0, vuint64m8_t op1, vint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxei64_v_i64m8_m))) +vint64m8_t vamomaxei64(vbool8_t op0, int64_t * op1, vuint64m8_t op2, vint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m1))) +vuint32m1_t vamominuei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m1_m))) +vuint32m1_t vamominuei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m2))) +vuint32m2_t vamominuei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m2_m))) +vuint32m2_t vamominuei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m4))) +vuint32m4_t vamominuei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m4_m))) +vuint32m4_t vamominuei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m8))) +vuint32m8_t vamominuei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32m8_m))) +vuint32m8_t vamominuei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32mf2))) +vuint32mf2_t vamominuei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u32mf2_m))) +vuint32mf2_t vamominuei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m1))) +vuint32m1_t vamominuei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m1_m))) +vuint32m1_t vamominuei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m2))) +vuint32m2_t vamominuei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m2_m))) +vuint32m2_t vamominuei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m4))) +vuint32m4_t vamominuei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m4_m))) +vuint32m4_t vamominuei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m8))) +vuint32m8_t vamominuei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32m8_m))) +vuint32m8_t vamominuei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32mf2))) +vuint32mf2_t vamominuei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u32mf2_m))) +vuint32mf2_t vamominuei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m1))) +vuint32m1_t vamominuei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m1_m))) +vuint32m1_t vamominuei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m2))) +vuint32m2_t vamominuei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m2_m))) +vuint32m2_t vamominuei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m4))) +vuint32m4_t vamominuei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m4_m))) +vuint32m4_t vamominuei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m8))) +vuint32m8_t vamominuei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32m8_m))) +vuint32m8_t vamominuei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32mf2))) +vuint32mf2_t vamominuei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u32mf2_m))) +vuint32mf2_t vamominuei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32m1))) +vuint32m1_t vamominuei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32m1_m))) +vuint32m1_t vamominuei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32m2))) +vuint32m2_t vamominuei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32m2_m))) +vuint32m2_t vamominuei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32m4))) +vuint32m4_t vamominuei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32m4_m))) +vuint32m4_t vamominuei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32mf2))) +vuint32mf2_t vamominuei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u32mf2_m))) +vuint32mf2_t vamominuei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m1))) +vuint64m1_t vamominuei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m1_m))) +vuint64m1_t vamominuei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m2))) +vuint64m2_t vamominuei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m2_m))) +vuint64m2_t vamominuei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m4))) +vuint64m4_t vamominuei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m4_m))) +vuint64m4_t vamominuei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m8))) +vuint64m8_t vamominuei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei8_v_u64m8_m))) +vuint64m8_t vamominuei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m1))) +vuint64m1_t vamominuei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m1_m))) +vuint64m1_t vamominuei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m2))) +vuint64m2_t vamominuei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m2_m))) +vuint64m2_t vamominuei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m4))) +vuint64m4_t vamominuei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m4_m))) +vuint64m4_t vamominuei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m8))) +vuint64m8_t vamominuei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei16_v_u64m8_m))) +vuint64m8_t vamominuei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m1))) +vuint64m1_t vamominuei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m1_m))) +vuint64m1_t vamominuei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m2))) +vuint64m2_t vamominuei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m2_m))) +vuint64m2_t vamominuei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m4))) +vuint64m4_t vamominuei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m4_m))) +vuint64m4_t vamominuei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m8))) +vuint64m8_t vamominuei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei32_v_u64m8_m))) +vuint64m8_t vamominuei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m1))) +vuint64m1_t vamominuei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m1_m))) +vuint64m1_t vamominuei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m2))) +vuint64m2_t vamominuei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m2_m))) +vuint64m2_t vamominuei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m4))) +vuint64m4_t vamominuei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m4_m))) +vuint64m4_t vamominuei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m8))) +vuint64m8_t vamominuei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamominuei64_v_u64m8_m))) +vuint64m8_t vamominuei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m1))) +vuint32m1_t vamomaxuei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m1_m))) +vuint32m1_t vamomaxuei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m2))) +vuint32m2_t vamomaxuei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m2_m))) +vuint32m2_t vamomaxuei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m4))) +vuint32m4_t vamomaxuei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m4_m))) +vuint32m4_t vamomaxuei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m8))) +vuint32m8_t vamomaxuei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32m8_m))) +vuint32m8_t vamomaxuei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32mf2))) +vuint32mf2_t vamomaxuei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u32mf2_m))) +vuint32mf2_t vamomaxuei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m1))) +vuint32m1_t vamomaxuei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m1_m))) +vuint32m1_t vamomaxuei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m2))) +vuint32m2_t vamomaxuei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m2_m))) +vuint32m2_t vamomaxuei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m4))) +vuint32m4_t vamomaxuei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m4_m))) +vuint32m4_t vamomaxuei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m8))) +vuint32m8_t vamomaxuei16(uint32_t * op0, vuint16m4_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32m8_m))) +vuint32m8_t vamomaxuei16(vbool4_t op0, uint32_t * op1, vuint16m4_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32mf2))) +vuint32mf2_t vamomaxuei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u32mf2_m))) +vuint32mf2_t vamomaxuei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m1))) +vuint32m1_t vamomaxuei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m1_m))) +vuint32m1_t vamomaxuei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m2))) +vuint32m2_t vamomaxuei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m2_m))) +vuint32m2_t vamomaxuei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m4))) +vuint32m4_t vamomaxuei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m4_m))) +vuint32m4_t vamomaxuei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m8))) +vuint32m8_t vamomaxuei32(uint32_t * op0, vuint32m8_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32m8_m))) +vuint32m8_t vamomaxuei32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32mf2))) +vuint32mf2_t vamomaxuei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u32mf2_m))) +vuint32mf2_t vamomaxuei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32m1))) +vuint32m1_t vamomaxuei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32m1_m))) +vuint32m1_t vamomaxuei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32m2))) +vuint32m2_t vamomaxuei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32m2_m))) +vuint32m2_t vamomaxuei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32m4))) +vuint32m4_t vamomaxuei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32m4_m))) +vuint32m4_t vamomaxuei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32mf2))) +vuint32mf2_t vamomaxuei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u32mf2_m))) +vuint32mf2_t vamomaxuei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m1))) +vuint64m1_t vamomaxuei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m1_m))) +vuint64m1_t vamomaxuei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m2))) +vuint64m2_t vamomaxuei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m2_m))) +vuint64m2_t vamomaxuei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m4))) +vuint64m4_t vamomaxuei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m4_m))) +vuint64m4_t vamomaxuei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m8))) +vuint64m8_t vamomaxuei8(uint64_t * op0, vuint8m1_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei8_v_u64m8_m))) +vuint64m8_t vamomaxuei8(vbool8_t op0, uint64_t * op1, vuint8m1_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m1))) +vuint64m1_t vamomaxuei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m1_m))) +vuint64m1_t vamomaxuei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m2))) +vuint64m2_t vamomaxuei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m2_m))) +vuint64m2_t vamomaxuei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m4))) +vuint64m4_t vamomaxuei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m4_m))) +vuint64m4_t vamomaxuei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m8))) +vuint64m8_t vamomaxuei16(uint64_t * op0, vuint16m2_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei16_v_u64m8_m))) +vuint64m8_t vamomaxuei16(vbool8_t op0, uint64_t * op1, vuint16m2_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m1))) +vuint64m1_t vamomaxuei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m1_m))) +vuint64m1_t vamomaxuei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m2))) +vuint64m2_t vamomaxuei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m2_m))) +vuint64m2_t vamomaxuei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m4))) +vuint64m4_t vamomaxuei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m4_m))) +vuint64m4_t vamomaxuei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m8))) +vuint64m8_t vamomaxuei32(uint64_t * op0, vuint32m4_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei32_v_u64m8_m))) +vuint64m8_t vamomaxuei32(vbool8_t op0, uint64_t * op1, vuint32m4_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m1))) +vuint64m1_t vamomaxuei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m1_m))) +vuint64m1_t vamomaxuei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m2))) +vuint64m2_t vamomaxuei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m2_m))) +vuint64m2_t vamomaxuei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m4))) +vuint64m4_t vamomaxuei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m4_m))) +vuint64m4_t vamomaxuei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m8))) +vuint64m8_t vamomaxuei64(uint64_t * op0, vuint64m8_t op1, vuint64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamomaxuei64_v_u64m8_m))) +vuint64m8_t vamomaxuei64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m1))) +vint32m1_t vamoswapei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m1_m))) +vint32m1_t vamoswapei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m2))) +vint32m2_t vamoswapei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m2_m))) +vint32m2_t vamoswapei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m4))) +vint32m4_t vamoswapei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m4_m))) +vint32m4_t vamoswapei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m8))) +vint32m8_t vamoswapei8(int32_t * op0, vuint8m2_t op1, vint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32m8_m))) +vint32m8_t vamoswapei8(vbool4_t op0, int32_t * op1, vuint8m2_t op2, vint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32mf2))) +vint32mf2_t vamoswapei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_i32mf2_m))) +vint32mf2_t vamoswapei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m1))) +vuint32m1_t vamoswapei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m1_m))) +vuint32m1_t vamoswapei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m2))) +vuint32m2_t vamoswapei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m2_m))) +vuint32m2_t vamoswapei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m4))) +vuint32m4_t vamoswapei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m4_m))) +vuint32m4_t vamoswapei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m8))) +vuint32m8_t vamoswapei8(uint32_t * op0, vuint8m2_t op1, vuint32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32m8_m))) +vuint32m8_t vamoswapei8(vbool4_t op0, uint32_t * op1, vuint8m2_t op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32mf2))) +vuint32mf2_t vamoswapei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_u32mf2_m))) +vuint32mf2_t vamoswapei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, size_t op4); + +#endif + +#if defined(__riscv_f) && defined(__riscv_zvamo) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m1))) +vfloat32m1_t vamoswapei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m1_m))) +vfloat32m1_t vamoswapei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m2))) +vfloat32m2_t vamoswapei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m2_m))) +vfloat32m2_t vamoswapei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m4))) +vfloat32m4_t vamoswapei8(float * op0, vuint8m1_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m4_m))) +vfloat32m4_t vamoswapei8(vbool8_t op0, float * op1, vuint8m1_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m8))) +vfloat32m8_t vamoswapei8(float * op0, vuint8m2_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32m8_m))) +vfloat32m8_t vamoswapei8(vbool4_t op0, float * op1, vuint8m2_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32mf2))) +vfloat32mf2_t vamoswapei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f32mf2_m))) +vfloat32mf2_t vamoswapei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m1))) +vfloat32m1_t vamoswapei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m1_m))) +vfloat32m1_t vamoswapei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m2))) +vfloat32m2_t vamoswapei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m2_m))) +vfloat32m2_t vamoswapei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m4))) +vfloat32m4_t vamoswapei16(float * op0, vuint16m2_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m4_m))) +vfloat32m4_t vamoswapei16(vbool8_t op0, float * op1, vuint16m2_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m8))) +vfloat32m8_t vamoswapei16(float * op0, vuint16m4_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32m8_m))) +vfloat32m8_t vamoswapei16(vbool4_t op0, float * op1, vuint16m4_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32mf2))) +vfloat32mf2_t vamoswapei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f32mf2_m))) +vfloat32mf2_t vamoswapei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m1))) +vfloat32m1_t vamoswapei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m1_m))) +vfloat32m1_t vamoswapei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m2))) +vfloat32m2_t vamoswapei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m2_m))) +vfloat32m2_t vamoswapei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m4))) +vfloat32m4_t vamoswapei32(float * op0, vuint32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m4_m))) +vfloat32m4_t vamoswapei32(vbool8_t op0, float * op1, vuint32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m8))) +vfloat32m8_t vamoswapei32(float * op0, vuint32m8_t op1, vfloat32m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32m8_m))) +vfloat32m8_t vamoswapei32(vbool4_t op0, float * op1, vuint32m8_t op2, vfloat32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32mf2))) +vfloat32mf2_t vamoswapei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f32mf2_m))) +vfloat32mf2_t vamoswapei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32m1))) +vfloat32m1_t vamoswapei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32m1_m))) +vfloat32m1_t vamoswapei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32m2))) +vfloat32m2_t vamoswapei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32m2_m))) +vfloat32m2_t vamoswapei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32m4))) +vfloat32m4_t vamoswapei64(float * op0, vuint64m8_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32m4_m))) +vfloat32m4_t vamoswapei64(vbool8_t op0, float * op1, vuint64m8_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32mf2))) +vfloat32mf2_t vamoswapei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f32mf2_m))) +vfloat32mf2_t vamoswapei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, size_t op4); + +#endif + +#if defined(__riscv_d) && defined(__riscv_zvamo) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m1))) +vfloat64m1_t vamoswapei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m1_m))) +vfloat64m1_t vamoswapei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m2))) +vfloat64m2_t vamoswapei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m2_m))) +vfloat64m2_t vamoswapei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m4))) +vfloat64m4_t vamoswapei8(double * op0, vuint8mf2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m4_m))) +vfloat64m4_t vamoswapei8(vbool16_t op0, double * op1, vuint8mf2_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m8))) +vfloat64m8_t vamoswapei8(double * op0, vuint8m1_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei8_v_f64m8_m))) +vfloat64m8_t vamoswapei8(vbool8_t op0, double * op1, vuint8m1_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m1))) +vfloat64m1_t vamoswapei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m1_m))) +vfloat64m1_t vamoswapei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m2))) +vfloat64m2_t vamoswapei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m2_m))) +vfloat64m2_t vamoswapei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m4))) +vfloat64m4_t vamoswapei16(double * op0, vuint16m1_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m4_m))) +vfloat64m4_t vamoswapei16(vbool16_t op0, double * op1, vuint16m1_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m8))) +vfloat64m8_t vamoswapei16(double * op0, vuint16m2_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei16_v_f64m8_m))) +vfloat64m8_t vamoswapei16(vbool8_t op0, double * op1, vuint16m2_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m1))) +vfloat64m1_t vamoswapei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m1_m))) +vfloat64m1_t vamoswapei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m2))) +vfloat64m2_t vamoswapei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m2_m))) +vfloat64m2_t vamoswapei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m4))) +vfloat64m4_t vamoswapei32(double * op0, vuint32m2_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m4_m))) +vfloat64m4_t vamoswapei32(vbool16_t op0, double * op1, vuint32m2_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m8))) +vfloat64m8_t vamoswapei32(double * op0, vuint32m4_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei32_v_f64m8_m))) +vfloat64m8_t vamoswapei32(vbool8_t op0, double * op1, vuint32m4_t op2, vfloat64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m1))) +vfloat64m1_t vamoswapei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m1_m))) +vfloat64m1_t vamoswapei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m2))) +vfloat64m2_t vamoswapei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m2_m))) +vfloat64m2_t vamoswapei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m4))) +vfloat64m4_t vamoswapei64(double * op0, vuint64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m4_m))) +vfloat64m4_t vamoswapei64(vbool16_t op0, double * op1, vuint64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m8))) +vfloat64m8_t vamoswapei64(double * op0, vuint64m8_t op1, vfloat64m8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vamoswapei64_v_f64m8_m))) +vfloat64m8_t vamoswapei64(vbool8_t op0, double * op1, vuint64m8_t op2, vfloat64m8_t op3, size_t op4); + +#endif + +#if defined(__riscv_zvlsseg) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i64m1))) +void vloxseg2ei8(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i64m1_m))) +void vloxseg2ei8(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i64m2))) +void vloxseg2ei8(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i64m2_m))) +void vloxseg2ei8(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i64m4))) +void vloxseg2ei8(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i64m4_m))) +void vloxseg2ei8(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u64m1))) +void vloxseg2ei8(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u64m1_m))) +void vloxseg2ei8(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u64m2))) +void vloxseg2ei8(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u64m2_m))) +void vloxseg2ei8(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u64m4))) +void vloxseg2ei8(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u64m4_m))) +void vloxseg2ei8(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i64m1))) +void vloxseg3ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i64m1_m))) +void vloxseg3ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i64m2))) +void vloxseg3ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i64m2_m))) +void vloxseg3ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u64m1))) +void vloxseg3ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u64m1_m))) +void vloxseg3ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u64m2))) +void vloxseg3ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u64m2_m))) +void vloxseg3ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i64m1))) +void vloxseg4ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i64m1_m))) +void vloxseg4ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i64m2))) +void vloxseg4ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i64m2_m))) +void vloxseg4ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u64m1))) +void vloxseg4ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u64m1_m))) +void vloxseg4ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u64m2))) +void vloxseg4ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u64m2_m))) +void vloxseg4ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i64m1))) +void vloxseg5ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i64m1_m))) +void vloxseg5ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u64m1))) +void vloxseg5ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u64m1_m))) +void vloxseg5ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i64m1))) +void vloxseg6ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i64m1_m))) +void vloxseg6ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u64m1))) +void vloxseg6ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u64m1_m))) +void vloxseg6ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i64m1))) +void vloxseg7ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i64m1_m))) +void vloxseg7ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u64m1))) +void vloxseg7ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u64m1_m))) +void vloxseg7ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i64m1))) +void vloxseg8ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i64m1_m))) +void vloxseg8ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u64m1))) +void vloxseg8ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u64m1_m))) +void vloxseg8ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i64m1))) +void vloxseg2ei16(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i64m1_m))) +void vloxseg2ei16(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i64m2))) +void vloxseg2ei16(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i64m2_m))) +void vloxseg2ei16(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i64m4))) +void vloxseg2ei16(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i64m4_m))) +void vloxseg2ei16(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u64m1))) +void vloxseg2ei16(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u64m1_m))) +void vloxseg2ei16(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u64m2))) +void vloxseg2ei16(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u64m2_m))) +void vloxseg2ei16(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u64m4))) +void vloxseg2ei16(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u64m4_m))) +void vloxseg2ei16(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i64m1))) +void vloxseg3ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i64m1_m))) +void vloxseg3ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i64m2))) +void vloxseg3ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i64m2_m))) +void vloxseg3ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u64m1))) +void vloxseg3ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u64m1_m))) +void vloxseg3ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u64m2))) +void vloxseg3ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u64m2_m))) +void vloxseg3ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i64m1))) +void vloxseg4ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i64m1_m))) +void vloxseg4ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i64m2))) +void vloxseg4ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i64m2_m))) +void vloxseg4ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u64m1))) +void vloxseg4ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u64m1_m))) +void vloxseg4ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u64m2))) +void vloxseg4ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u64m2_m))) +void vloxseg4ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i64m1))) +void vloxseg5ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i64m1_m))) +void vloxseg5ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u64m1))) +void vloxseg5ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u64m1_m))) +void vloxseg5ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i64m1))) +void vloxseg6ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i64m1_m))) +void vloxseg6ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u64m1))) +void vloxseg6ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u64m1_m))) +void vloxseg6ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i64m1))) +void vloxseg7ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i64m1_m))) +void vloxseg7ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u64m1))) +void vloxseg7ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u64m1_m))) +void vloxseg7ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i64m1))) +void vloxseg8ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i64m1_m))) +void vloxseg8ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u64m1))) +void vloxseg8ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u64m1_m))) +void vloxseg8ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i64m1))) +void vloxseg2ei32(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i64m1_m))) +void vloxseg2ei32(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i64m2))) +void vloxseg2ei32(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i64m2_m))) +void vloxseg2ei32(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i64m4))) +void vloxseg2ei32(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i64m4_m))) +void vloxseg2ei32(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u64m1))) +void vloxseg2ei32(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u64m1_m))) +void vloxseg2ei32(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u64m2))) +void vloxseg2ei32(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u64m2_m))) +void vloxseg2ei32(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u64m4))) +void vloxseg2ei32(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u64m4_m))) +void vloxseg2ei32(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i64m1))) +void vloxseg3ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i64m1_m))) +void vloxseg3ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i64m2))) +void vloxseg3ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i64m2_m))) +void vloxseg3ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u64m1))) +void vloxseg3ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u64m1_m))) +void vloxseg3ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u64m2))) +void vloxseg3ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u64m2_m))) +void vloxseg3ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i64m1))) +void vloxseg4ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i64m1_m))) +void vloxseg4ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i64m2))) +void vloxseg4ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i64m2_m))) +void vloxseg4ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u64m1))) +void vloxseg4ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u64m1_m))) +void vloxseg4ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u64m2))) +void vloxseg4ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u64m2_m))) +void vloxseg4ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i64m1))) +void vloxseg5ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i64m1_m))) +void vloxseg5ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u64m1))) +void vloxseg5ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u64m1_m))) +void vloxseg5ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i64m1))) +void vloxseg6ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i64m1_m))) +void vloxseg6ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u64m1))) +void vloxseg6ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u64m1_m))) +void vloxseg6ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i64m1))) +void vloxseg7ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i64m1_m))) +void vloxseg7ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u64m1))) +void vloxseg7ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u64m1_m))) +void vloxseg7ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i64m1))) +void vloxseg8ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i64m1_m))) +void vloxseg8ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u64m1))) +void vloxseg8ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u64m1_m))) +void vloxseg8ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i64m1))) +void vloxseg2ei64(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i64m1_m))) +void vloxseg2ei64(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i64m2))) +void vloxseg2ei64(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i64m2_m))) +void vloxseg2ei64(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i64m4))) +void vloxseg2ei64(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i64m4_m))) +void vloxseg2ei64(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u64m1))) +void vloxseg2ei64(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u64m1_m))) +void vloxseg2ei64(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u64m2))) +void vloxseg2ei64(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u64m2_m))) +void vloxseg2ei64(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u64m4))) +void vloxseg2ei64(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u64m4_m))) +void vloxseg2ei64(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i64m1))) +void vloxseg3ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i64m1_m))) +void vloxseg3ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i64m2))) +void vloxseg3ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i64m2_m))) +void vloxseg3ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u64m1))) +void vloxseg3ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u64m1_m))) +void vloxseg3ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u64m2))) +void vloxseg3ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u64m2_m))) +void vloxseg3ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i64m1))) +void vloxseg4ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i64m1_m))) +void vloxseg4ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i64m2))) +void vloxseg4ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i64m2_m))) +void vloxseg4ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u64m1))) +void vloxseg4ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u64m1_m))) +void vloxseg4ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u64m2))) +void vloxseg4ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u64m2_m))) +void vloxseg4ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i64m1))) +void vloxseg5ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i64m1_m))) +void vloxseg5ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u64m1))) +void vloxseg5ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u64m1_m))) +void vloxseg5ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i64m1))) +void vloxseg6ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i64m1_m))) +void vloxseg6ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u64m1))) +void vloxseg6ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u64m1_m))) +void vloxseg6ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i64m1))) +void vloxseg7ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i64m1_m))) +void vloxseg7ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u64m1))) +void vloxseg7ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u64m1_m))) +void vloxseg7ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i64m1))) +void vloxseg8ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i64m1_m))) +void vloxseg8ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u64m1))) +void vloxseg8ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u64m1_m))) +void vloxseg8ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8m1))) +void vsseg3e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8m1_m))) +void vsseg3e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8m2))) +void vsseg3e8(int8_t * op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8m2_m))) +void vsseg3e8(vbool4_t op0, int8_t * op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8mf2))) +void vsseg3e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8mf2_m))) +void vsseg3e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8mf4))) +void vsseg3e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8mf4_m))) +void vsseg3e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8mf8))) +void vsseg3e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_i8mf8_m))) +void vsseg3e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8m1))) +void vsseg3e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8m1_m))) +void vsseg3e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8m2))) +void vsseg3e8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8m2_m))) +void vsseg3e8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8mf2))) +void vsseg3e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8mf2_m))) +void vsseg3e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8mf4))) +void vsseg3e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8mf4_m))) +void vsseg3e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8mf8))) +void vsseg3e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e8_v_u8mf8_m))) +void vsseg3e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8m1))) +void vsseg4e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8m1_m))) +void vsseg4e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8m2))) +void vsseg4e8(int8_t * op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8m2_m))) +void vsseg4e8(vbool4_t op0, int8_t * op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8mf2))) +void vsseg4e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8mf2_m))) +void vsseg4e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8mf4))) +void vsseg4e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8mf4_m))) +void vsseg4e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8mf8))) +void vsseg4e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_i8mf8_m))) +void vsseg4e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8m1))) +void vsseg4e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8m1_m))) +void vsseg4e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8m2))) +void vsseg4e8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8m2_m))) +void vsseg4e8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8mf2))) +void vsseg4e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8mf2_m))) +void vsseg4e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8mf4))) +void vsseg4e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8mf4_m))) +void vsseg4e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8mf8))) +void vsseg4e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e8_v_u8mf8_m))) +void vsseg4e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8m1))) +void vsseg5e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8m1_m))) +void vsseg5e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8mf2))) +void vsseg5e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8mf2_m))) +void vsseg5e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8mf4))) +void vsseg5e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8mf4_m))) +void vsseg5e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8mf8))) +void vsseg5e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_i8mf8_m))) +void vsseg5e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8m1))) +void vsseg5e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8m1_m))) +void vsseg5e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8mf2))) +void vsseg5e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8mf2_m))) +void vsseg5e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8mf4))) +void vsseg5e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8mf4_m))) +void vsseg5e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8mf8))) +void vsseg5e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e8_v_u8mf8_m))) +void vsseg5e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8m1))) +void vsseg6e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8m1_m))) +void vsseg6e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8mf2))) +void vsseg6e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8mf2_m))) +void vsseg6e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8mf4))) +void vsseg6e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8mf4_m))) +void vsseg6e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8mf8))) +void vsseg6e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_i8mf8_m))) +void vsseg6e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8m1))) +void vsseg6e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8m1_m))) +void vsseg6e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8mf2))) +void vsseg6e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8mf2_m))) +void vsseg6e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8mf4))) +void vsseg6e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8mf4_m))) +void vsseg6e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8mf8))) +void vsseg6e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e8_v_u8mf8_m))) +void vsseg6e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8m1))) +void vsseg7e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8m1_m))) +void vsseg7e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8mf2))) +void vsseg7e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8mf2_m))) +void vsseg7e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8mf4))) +void vsseg7e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8mf4_m))) +void vsseg7e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8mf8))) +void vsseg7e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_i8mf8_m))) +void vsseg7e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8m1))) +void vsseg7e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8m1_m))) +void vsseg7e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8mf2))) +void vsseg7e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8mf2_m))) +void vsseg7e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8mf4))) +void vsseg7e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8mf4_m))) +void vsseg7e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8mf8))) +void vsseg7e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e8_v_u8mf8_m))) +void vsseg7e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8m1))) +void vsseg8e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8m1_m))) +void vsseg8e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8mf2))) +void vsseg8e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8mf2_m))) +void vsseg8e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8mf4))) +void vsseg8e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8mf4_m))) +void vsseg8e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8mf8))) +void vsseg8e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_i8mf8_m))) +void vsseg8e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8m1))) +void vsseg8e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8m1_m))) +void vsseg8e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8mf2))) +void vsseg8e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8mf2_m))) +void vsseg8e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8mf4))) +void vsseg8e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8mf4_m))) +void vsseg8e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8mf8))) +void vsseg8e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e8_v_u8mf8_m))) +void vsseg8e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16m1))) +void vsseg2e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16m1_m))) +void vsseg2e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16m2))) +void vsseg2e16(int16_t * op0, vint16m2_t op1, vint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16m2_m))) +void vsseg2e16(vbool8_t op0, int16_t * op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16m4))) +void vsseg2e16(int16_t * op0, vint16m4_t op1, vint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16m4_m))) +void vsseg2e16(vbool4_t op0, int16_t * op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16mf2))) +void vsseg2e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16mf2_m))) +void vsseg2e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16mf4))) +void vsseg2e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_i16mf4_m))) +void vsseg2e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16m1))) +void vsseg2e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16m1_m))) +void vsseg2e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16m2))) +void vsseg2e16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16m2_m))) +void vsseg2e16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16m4))) +void vsseg2e16(uint16_t * op0, vuint16m4_t op1, vuint16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16m4_m))) +void vsseg2e16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16mf2))) +void vsseg2e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16mf2_m))) +void vsseg2e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16mf4))) +void vsseg2e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_u16mf4_m))) +void vsseg2e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16m1))) +void vsseg3e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16m1_m))) +void vsseg3e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16m2))) +void vsseg3e16(int16_t * op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16m2_m))) +void vsseg3e16(vbool8_t op0, int16_t * op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16mf2))) +void vsseg3e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16mf2_m))) +void vsseg3e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16mf4))) +void vsseg3e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_i16mf4_m))) +void vsseg3e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16m1))) +void vsseg3e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16m1_m))) +void vsseg3e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16m2))) +void vsseg3e16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16m2_m))) +void vsseg3e16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16mf2))) +void vsseg3e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16mf2_m))) +void vsseg3e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16mf4))) +void vsseg3e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_u16mf4_m))) +void vsseg3e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16m1))) +void vsseg4e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16m1_m))) +void vsseg4e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16m2))) +void vsseg4e16(int16_t * op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16m2_m))) +void vsseg4e16(vbool8_t op0, int16_t * op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16mf2))) +void vsseg4e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16mf2_m))) +void vsseg4e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16mf4))) +void vsseg4e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_i16mf4_m))) +void vsseg4e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16m1))) +void vsseg4e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16m1_m))) +void vsseg4e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16m2))) +void vsseg4e16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16m2_m))) +void vsseg4e16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16mf2))) +void vsseg4e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16mf2_m))) +void vsseg4e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16mf4))) +void vsseg4e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_u16mf4_m))) +void vsseg4e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_i16m1))) +void vsseg5e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_i16m1_m))) +void vsseg5e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_i16mf2))) +void vsseg5e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_i16mf2_m))) +void vsseg5e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_i16mf4))) +void vsseg5e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_i16mf4_m))) +void vsseg5e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_u16m1))) +void vsseg5e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_u16m1_m))) +void vsseg5e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_u16mf2))) +void vsseg5e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_u16mf2_m))) +void vsseg5e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_u16mf4))) +void vsseg5e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_u16mf4_m))) +void vsseg5e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_i16m1))) +void vsseg6e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_i16m1_m))) +void vsseg6e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_i16mf2))) +void vsseg6e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_i16mf2_m))) +void vsseg6e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_i16mf4))) +void vsseg6e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_i16mf4_m))) +void vsseg6e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_u16m1))) +void vsseg6e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_u16m1_m))) +void vsseg6e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_u16mf2))) +void vsseg6e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_u16mf2_m))) +void vsseg6e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_u16mf4))) +void vsseg6e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_u16mf4_m))) +void vsseg6e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_i16m1))) +void vsseg7e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_i16m1_m))) +void vsseg7e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_i16mf2))) +void vsseg7e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_i16mf2_m))) +void vsseg7e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_i16mf4))) +void vsseg7e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_i16mf4_m))) +void vsseg7e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_u16m1))) +void vsseg7e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_u16m1_m))) +void vsseg7e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_u16mf2))) +void vsseg7e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_u16mf2_m))) +void vsseg7e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_u16mf4))) +void vsseg7e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_u16mf4_m))) +void vsseg7e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_i16m1))) +void vsseg8e16(int16_t * op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_i16m1_m))) +void vsseg8e16(vbool16_t op0, int16_t * op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_i16mf2))) +void vsseg8e16(int16_t * op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_i16mf2_m))) +void vsseg8e16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_i16mf4))) +void vsseg8e16(int16_t * op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_i16mf4_m))) +void vsseg8e16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_u16m1))) +void vsseg8e16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_u16m1_m))) +void vsseg8e16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_u16mf2))) +void vsseg8e16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_u16mf2_m))) +void vsseg8e16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_u16mf4))) +void vsseg8e16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_u16mf4_m))) +void vsseg8e16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32m1))) +void vsseg2e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32m1_m))) +void vsseg2e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32m2))) +void vsseg2e32(int32_t * op0, vint32m2_t op1, vint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32m2_m))) +void vsseg2e32(vbool16_t op0, int32_t * op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32m4))) +void vsseg2e32(int32_t * op0, vint32m4_t op1, vint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32m4_m))) +void vsseg2e32(vbool8_t op0, int32_t * op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32mf2))) +void vsseg2e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_i32mf2_m))) +void vsseg2e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32m1))) +void vsseg2e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32m1_m))) +void vsseg2e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32m2))) +void vsseg2e32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32m2_m))) +void vsseg2e32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32m4))) +void vsseg2e32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32m4_m))) +void vsseg2e32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32mf2))) +void vsseg2e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_u32mf2_m))) +void vsseg2e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_i32m1))) +void vsseg3e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_i32m1_m))) +void vsseg3e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_i32m2))) +void vsseg3e32(int32_t * op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_i32m2_m))) +void vsseg3e32(vbool16_t op0, int32_t * op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_i32mf2))) +void vsseg3e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_i32mf2_m))) +void vsseg3e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_u32m1))) +void vsseg3e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_u32m1_m))) +void vsseg3e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_u32m2))) +void vsseg3e32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_u32m2_m))) +void vsseg3e32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_u32mf2))) +void vsseg3e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_u32mf2_m))) +void vsseg3e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_i32m1))) +void vsseg4e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_i32m1_m))) +void vsseg4e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_i32m2))) +void vsseg4e32(int32_t * op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_i32m2_m))) +void vsseg4e32(vbool16_t op0, int32_t * op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_i32mf2))) +void vsseg4e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_i32mf2_m))) +void vsseg4e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_u32m1))) +void vsseg4e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_u32m1_m))) +void vsseg4e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_u32m2))) +void vsseg4e32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_u32m2_m))) +void vsseg4e32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_u32mf2))) +void vsseg4e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_u32mf2_m))) +void vsseg4e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_i32m1))) +void vsseg5e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_i32m1_m))) +void vsseg5e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_i32mf2))) +void vsseg5e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_i32mf2_m))) +void vsseg5e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_u32m1))) +void vsseg5e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_u32m1_m))) +void vsseg5e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_u32mf2))) +void vsseg5e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_u32mf2_m))) +void vsseg5e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_i32m1))) +void vsseg6e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_i32m1_m))) +void vsseg6e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_i32mf2))) +void vsseg6e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_i32mf2_m))) +void vsseg6e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_u32m1))) +void vsseg6e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_u32m1_m))) +void vsseg6e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_u32mf2))) +void vsseg6e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_u32mf2_m))) +void vsseg6e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_i32m1))) +void vsseg7e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_i32m1_m))) +void vsseg7e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_i32mf2))) +void vsseg7e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_i32mf2_m))) +void vsseg7e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_u32m1))) +void vsseg7e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_u32m1_m))) +void vsseg7e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_u32mf2))) +void vsseg7e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_u32mf2_m))) +void vsseg7e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_i32m1))) +void vsseg8e32(int32_t * op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_i32m1_m))) +void vsseg8e32(vbool32_t op0, int32_t * op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_i32mf2))) +void vsseg8e32(int32_t * op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_i32mf2_m))) +void vsseg8e32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_u32m1))) +void vsseg8e32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_u32m1_m))) +void vsseg8e32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_u32mf2))) +void vsseg8e32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_u32mf2_m))) +void vsseg8e32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_i64m1))) +void vsseg2e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_i64m1_m))) +void vsseg2e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_i64m2))) +void vsseg2e64(int64_t * op0, vint64m2_t op1, vint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_i64m2_m))) +void vsseg2e64(vbool32_t op0, int64_t * op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_i64m4))) +void vsseg2e64(int64_t * op0, vint64m4_t op1, vint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_i64m4_m))) +void vsseg2e64(vbool16_t op0, int64_t * op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_u64m1))) +void vsseg2e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_u64m1_m))) +void vsseg2e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_u64m2))) +void vsseg2e64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_u64m2_m))) +void vsseg2e64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_u64m4))) +void vsseg2e64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_u64m4_m))) +void vsseg2e64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_i64m1))) +void vsseg3e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_i64m1_m))) +void vsseg3e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_i64m2))) +void vsseg3e64(int64_t * op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_i64m2_m))) +void vsseg3e64(vbool32_t op0, int64_t * op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_u64m1))) +void vsseg3e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_u64m1_m))) +void vsseg3e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_u64m2))) +void vsseg3e64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_u64m2_m))) +void vsseg3e64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_i64m1))) +void vsseg4e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_i64m1_m))) +void vsseg4e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_i64m2))) +void vsseg4e64(int64_t * op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_i64m2_m))) +void vsseg4e64(vbool32_t op0, int64_t * op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_u64m1))) +void vsseg4e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_u64m1_m))) +void vsseg4e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_u64m2))) +void vsseg4e64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_u64m2_m))) +void vsseg4e64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e64_v_i64m1))) +void vsseg5e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e64_v_i64m1_m))) +void vsseg5e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e64_v_u64m1))) +void vsseg5e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e64_v_u64m1_m))) +void vsseg5e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e64_v_i64m1))) +void vsseg6e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e64_v_i64m1_m))) +void vsseg6e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e64_v_u64m1))) +void vsseg6e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e64_v_u64m1_m))) +void vsseg6e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e64_v_i64m1))) +void vsseg7e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e64_v_i64m1_m))) +void vsseg7e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e64_v_u64m1))) +void vsseg7e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e64_v_u64m1_m))) +void vsseg7e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e64_v_i64m1))) +void vsseg8e64(int64_t * op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e64_v_i64m1_m))) +void vsseg8e64(vbool64_t op0, int64_t * op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e64_v_u64m1))) +void vsseg8e64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e64_v_u64m1_m))) +void vsseg8e64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8m1))) +void vssseg3e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8m1_m))) +void vssseg3e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8m2))) +void vssseg3e8(int8_t * op0, ptrdiff_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8m2_m))) +void vssseg3e8(vbool4_t op0, int8_t * op1, ptrdiff_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8mf2))) +void vssseg3e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8mf2_m))) +void vssseg3e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8mf4))) +void vssseg3e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8mf4_m))) +void vssseg3e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8mf8))) +void vssseg3e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_i8mf8_m))) +void vssseg3e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8m1))) +void vssseg3e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8m1_m))) +void vssseg3e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8m2))) +void vssseg3e8(uint8_t * op0, ptrdiff_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8m2_m))) +void vssseg3e8(vbool4_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8mf2))) +void vssseg3e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8mf2_m))) +void vssseg3e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8mf4))) +void vssseg3e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8mf4_m))) +void vssseg3e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8mf8))) +void vssseg3e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e8_v_u8mf8_m))) +void vssseg3e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8m1))) +void vssseg4e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8m1_m))) +void vssseg4e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8m2))) +void vssseg4e8(int8_t * op0, ptrdiff_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8m2_m))) +void vssseg4e8(vbool4_t op0, int8_t * op1, ptrdiff_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8mf2))) +void vssseg4e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8mf2_m))) +void vssseg4e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8mf4))) +void vssseg4e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8mf4_m))) +void vssseg4e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8mf8))) +void vssseg4e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_i8mf8_m))) +void vssseg4e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8m1))) +void vssseg4e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8m1_m))) +void vssseg4e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8m2))) +void vssseg4e8(uint8_t * op0, ptrdiff_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8m2_m))) +void vssseg4e8(vbool4_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8mf2))) +void vssseg4e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8mf2_m))) +void vssseg4e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8mf4))) +void vssseg4e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8mf4_m))) +void vssseg4e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8mf8))) +void vssseg4e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e8_v_u8mf8_m))) +void vssseg4e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8m1))) +void vssseg5e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8m1_m))) +void vssseg5e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8mf2))) +void vssseg5e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8mf2_m))) +void vssseg5e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8mf4))) +void vssseg5e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8mf4_m))) +void vssseg5e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8mf8))) +void vssseg5e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_i8mf8_m))) +void vssseg5e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8m1))) +void vssseg5e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8m1_m))) +void vssseg5e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8mf2))) +void vssseg5e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8mf2_m))) +void vssseg5e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8mf4))) +void vssseg5e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8mf4_m))) +void vssseg5e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8mf8))) +void vssseg5e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e8_v_u8mf8_m))) +void vssseg5e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8m1))) +void vssseg6e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8m1_m))) +void vssseg6e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8mf2))) +void vssseg6e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8mf2_m))) +void vssseg6e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8mf4))) +void vssseg6e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8mf4_m))) +void vssseg6e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8mf8))) +void vssseg6e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_i8mf8_m))) +void vssseg6e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8m1))) +void vssseg6e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8m1_m))) +void vssseg6e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8mf2))) +void vssseg6e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8mf2_m))) +void vssseg6e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8mf4))) +void vssseg6e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8mf4_m))) +void vssseg6e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8mf8))) +void vssseg6e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e8_v_u8mf8_m))) +void vssseg6e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8m1))) +void vssseg7e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8m1_m))) +void vssseg7e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8mf2))) +void vssseg7e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8mf2_m))) +void vssseg7e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8mf4))) +void vssseg7e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8mf4_m))) +void vssseg7e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8mf8))) +void vssseg7e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_i8mf8_m))) +void vssseg7e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8m1))) +void vssseg7e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8m1_m))) +void vssseg7e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8mf2))) +void vssseg7e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8mf2_m))) +void vssseg7e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8mf4))) +void vssseg7e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8mf4_m))) +void vssseg7e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8mf8))) +void vssseg7e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e8_v_u8mf8_m))) +void vssseg7e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8m1))) +void vssseg8e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8m1_m))) +void vssseg8e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8mf2))) +void vssseg8e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8mf2_m))) +void vssseg8e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8mf4))) +void vssseg8e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8mf4_m))) +void vssseg8e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8mf8))) +void vssseg8e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_i8mf8_m))) +void vssseg8e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8m1))) +void vssseg8e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8m1_m))) +void vssseg8e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8mf2))) +void vssseg8e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8mf2_m))) +void vssseg8e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8mf4))) +void vssseg8e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8mf4_m))) +void vssseg8e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8mf8))) +void vssseg8e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e8_v_u8mf8_m))) +void vssseg8e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16m1))) +void vssseg2e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16m1_m))) +void vssseg2e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16m2))) +void vssseg2e16(int16_t * op0, ptrdiff_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16m2_m))) +void vssseg2e16(vbool8_t op0, int16_t * op1, ptrdiff_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16m4))) +void vssseg2e16(int16_t * op0, ptrdiff_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16m4_m))) +void vssseg2e16(vbool4_t op0, int16_t * op1, ptrdiff_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16mf2))) +void vssseg2e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16mf2_m))) +void vssseg2e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16mf4))) +void vssseg2e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_i16mf4_m))) +void vssseg2e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16m1))) +void vssseg2e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16m1_m))) +void vssseg2e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16m2))) +void vssseg2e16(uint16_t * op0, ptrdiff_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16m2_m))) +void vssseg2e16(vbool8_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16m4))) +void vssseg2e16(uint16_t * op0, ptrdiff_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16m4_m))) +void vssseg2e16(vbool4_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16mf2))) +void vssseg2e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16mf2_m))) +void vssseg2e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16mf4))) +void vssseg2e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_u16mf4_m))) +void vssseg2e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16m1))) +void vssseg3e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16m1_m))) +void vssseg3e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16m2))) +void vssseg3e16(int16_t * op0, ptrdiff_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16m2_m))) +void vssseg3e16(vbool8_t op0, int16_t * op1, ptrdiff_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16mf2))) +void vssseg3e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16mf2_m))) +void vssseg3e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16mf4))) +void vssseg3e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_i16mf4_m))) +void vssseg3e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16m1))) +void vssseg3e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16m1_m))) +void vssseg3e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16m2))) +void vssseg3e16(uint16_t * op0, ptrdiff_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16m2_m))) +void vssseg3e16(vbool8_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16mf2))) +void vssseg3e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16mf2_m))) +void vssseg3e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16mf4))) +void vssseg3e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_u16mf4_m))) +void vssseg3e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16m1))) +void vssseg4e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16m1_m))) +void vssseg4e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16m2))) +void vssseg4e16(int16_t * op0, ptrdiff_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16m2_m))) +void vssseg4e16(vbool8_t op0, int16_t * op1, ptrdiff_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16mf2))) +void vssseg4e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16mf2_m))) +void vssseg4e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16mf4))) +void vssseg4e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_i16mf4_m))) +void vssseg4e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16m1))) +void vssseg4e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16m1_m))) +void vssseg4e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16m2))) +void vssseg4e16(uint16_t * op0, ptrdiff_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16m2_m))) +void vssseg4e16(vbool8_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16mf2))) +void vssseg4e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16mf2_m))) +void vssseg4e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16mf4))) +void vssseg4e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_u16mf4_m))) +void vssseg4e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_i16m1))) +void vssseg5e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_i16m1_m))) +void vssseg5e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_i16mf2))) +void vssseg5e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_i16mf2_m))) +void vssseg5e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_i16mf4))) +void vssseg5e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_i16mf4_m))) +void vssseg5e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_u16m1))) +void vssseg5e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_u16m1_m))) +void vssseg5e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_u16mf2))) +void vssseg5e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_u16mf2_m))) +void vssseg5e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_u16mf4))) +void vssseg5e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_u16mf4_m))) +void vssseg5e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_i16m1))) +void vssseg6e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_i16m1_m))) +void vssseg6e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_i16mf2))) +void vssseg6e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_i16mf2_m))) +void vssseg6e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_i16mf4))) +void vssseg6e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_i16mf4_m))) +void vssseg6e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_u16m1))) +void vssseg6e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_u16m1_m))) +void vssseg6e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_u16mf2))) +void vssseg6e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_u16mf2_m))) +void vssseg6e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_u16mf4))) +void vssseg6e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_u16mf4_m))) +void vssseg6e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_i16m1))) +void vssseg7e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_i16m1_m))) +void vssseg7e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_i16mf2))) +void vssseg7e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_i16mf2_m))) +void vssseg7e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_i16mf4))) +void vssseg7e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_i16mf4_m))) +void vssseg7e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_u16m1))) +void vssseg7e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_u16m1_m))) +void vssseg7e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_u16mf2))) +void vssseg7e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_u16mf2_m))) +void vssseg7e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_u16mf4))) +void vssseg7e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_u16mf4_m))) +void vssseg7e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_i16m1))) +void vssseg8e16(int16_t * op0, ptrdiff_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_i16m1_m))) +void vssseg8e16(vbool16_t op0, int16_t * op1, ptrdiff_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_i16mf2))) +void vssseg8e16(int16_t * op0, ptrdiff_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_i16mf2_m))) +void vssseg8e16(vbool32_t op0, int16_t * op1, ptrdiff_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_i16mf4))) +void vssseg8e16(int16_t * op0, ptrdiff_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_i16mf4_m))) +void vssseg8e16(vbool64_t op0, int16_t * op1, ptrdiff_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_u16m1))) +void vssseg8e16(uint16_t * op0, ptrdiff_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_u16m1_m))) +void vssseg8e16(vbool16_t op0, uint16_t * op1, ptrdiff_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_u16mf2))) +void vssseg8e16(uint16_t * op0, ptrdiff_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_u16mf2_m))) +void vssseg8e16(vbool32_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_u16mf4))) +void vssseg8e16(uint16_t * op0, ptrdiff_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_u16mf4_m))) +void vssseg8e16(vbool64_t op0, uint16_t * op1, ptrdiff_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32m1))) +void vssseg2e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32m1_m))) +void vssseg2e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32m2))) +void vssseg2e32(int32_t * op0, ptrdiff_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32m2_m))) +void vssseg2e32(vbool16_t op0, int32_t * op1, ptrdiff_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32m4))) +void vssseg2e32(int32_t * op0, ptrdiff_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32m4_m))) +void vssseg2e32(vbool8_t op0, int32_t * op1, ptrdiff_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32mf2))) +void vssseg2e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_i32mf2_m))) +void vssseg2e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32m1))) +void vssseg2e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32m1_m))) +void vssseg2e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32m2))) +void vssseg2e32(uint32_t * op0, ptrdiff_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32m2_m))) +void vssseg2e32(vbool16_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32m4))) +void vssseg2e32(uint32_t * op0, ptrdiff_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32m4_m))) +void vssseg2e32(vbool8_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32mf2))) +void vssseg2e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_u32mf2_m))) +void vssseg2e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_i32m1))) +void vssseg3e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_i32m1_m))) +void vssseg3e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_i32m2))) +void vssseg3e32(int32_t * op0, ptrdiff_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_i32m2_m))) +void vssseg3e32(vbool16_t op0, int32_t * op1, ptrdiff_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_i32mf2))) +void vssseg3e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_i32mf2_m))) +void vssseg3e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_u32m1))) +void vssseg3e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_u32m1_m))) +void vssseg3e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_u32m2))) +void vssseg3e32(uint32_t * op0, ptrdiff_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_u32m2_m))) +void vssseg3e32(vbool16_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_u32mf2))) +void vssseg3e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_u32mf2_m))) +void vssseg3e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_i32m1))) +void vssseg4e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_i32m1_m))) +void vssseg4e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_i32m2))) +void vssseg4e32(int32_t * op0, ptrdiff_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_i32m2_m))) +void vssseg4e32(vbool16_t op0, int32_t * op1, ptrdiff_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_i32mf2))) +void vssseg4e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_i32mf2_m))) +void vssseg4e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_u32m1))) +void vssseg4e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_u32m1_m))) +void vssseg4e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_u32m2))) +void vssseg4e32(uint32_t * op0, ptrdiff_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_u32m2_m))) +void vssseg4e32(vbool16_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_u32mf2))) +void vssseg4e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_u32mf2_m))) +void vssseg4e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_i32m1))) +void vssseg5e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_i32m1_m))) +void vssseg5e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_i32mf2))) +void vssseg5e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_i32mf2_m))) +void vssseg5e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_u32m1))) +void vssseg5e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_u32m1_m))) +void vssseg5e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_u32mf2))) +void vssseg5e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_u32mf2_m))) +void vssseg5e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_i32m1))) +void vssseg6e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_i32m1_m))) +void vssseg6e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_i32mf2))) +void vssseg6e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_i32mf2_m))) +void vssseg6e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_u32m1))) +void vssseg6e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_u32m1_m))) +void vssseg6e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_u32mf2))) +void vssseg6e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_u32mf2_m))) +void vssseg6e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_i32m1))) +void vssseg7e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_i32m1_m))) +void vssseg7e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_i32mf2))) +void vssseg7e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_i32mf2_m))) +void vssseg7e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_u32m1))) +void vssseg7e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_u32m1_m))) +void vssseg7e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_u32mf2))) +void vssseg7e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_u32mf2_m))) +void vssseg7e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_i32m1))) +void vssseg8e32(int32_t * op0, ptrdiff_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_i32m1_m))) +void vssseg8e32(vbool32_t op0, int32_t * op1, ptrdiff_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_i32mf2))) +void vssseg8e32(int32_t * op0, ptrdiff_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_i32mf2_m))) +void vssseg8e32(vbool64_t op0, int32_t * op1, ptrdiff_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_u32m1))) +void vssseg8e32(uint32_t * op0, ptrdiff_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_u32m1_m))) +void vssseg8e32(vbool32_t op0, uint32_t * op1, ptrdiff_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_u32mf2))) +void vssseg8e32(uint32_t * op0, ptrdiff_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_u32mf2_m))) +void vssseg8e32(vbool64_t op0, uint32_t * op1, ptrdiff_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_i64m1))) +void vssseg2e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_i64m1_m))) +void vssseg2e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_i64m2))) +void vssseg2e64(int64_t * op0, ptrdiff_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_i64m2_m))) +void vssseg2e64(vbool32_t op0, int64_t * op1, ptrdiff_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_i64m4))) +void vssseg2e64(int64_t * op0, ptrdiff_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_i64m4_m))) +void vssseg2e64(vbool16_t op0, int64_t * op1, ptrdiff_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_u64m1))) +void vssseg2e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_u64m1_m))) +void vssseg2e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_u64m2))) +void vssseg2e64(uint64_t * op0, ptrdiff_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_u64m2_m))) +void vssseg2e64(vbool32_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_u64m4))) +void vssseg2e64(uint64_t * op0, ptrdiff_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_u64m4_m))) +void vssseg2e64(vbool16_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_i64m1))) +void vssseg3e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_i64m1_m))) +void vssseg3e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_i64m2))) +void vssseg3e64(int64_t * op0, ptrdiff_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_i64m2_m))) +void vssseg3e64(vbool32_t op0, int64_t * op1, ptrdiff_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_u64m1))) +void vssseg3e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_u64m1_m))) +void vssseg3e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_u64m2))) +void vssseg3e64(uint64_t * op0, ptrdiff_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_u64m2_m))) +void vssseg3e64(vbool32_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_i64m1))) +void vssseg4e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_i64m1_m))) +void vssseg4e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_i64m2))) +void vssseg4e64(int64_t * op0, ptrdiff_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_i64m2_m))) +void vssseg4e64(vbool32_t op0, int64_t * op1, ptrdiff_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_u64m1))) +void vssseg4e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_u64m1_m))) +void vssseg4e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_u64m2))) +void vssseg4e64(uint64_t * op0, ptrdiff_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_u64m2_m))) +void vssseg4e64(vbool32_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e64_v_i64m1))) +void vssseg5e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e64_v_i64m1_m))) +void vssseg5e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e64_v_u64m1))) +void vssseg5e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e64_v_u64m1_m))) +void vssseg5e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e64_v_i64m1))) +void vssseg6e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e64_v_i64m1_m))) +void vssseg6e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e64_v_u64m1))) +void vssseg6e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e64_v_u64m1_m))) +void vssseg6e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e64_v_i64m1))) +void vssseg7e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e64_v_i64m1_m))) +void vssseg7e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e64_v_u64m1))) +void vssseg7e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e64_v_u64m1_m))) +void vssseg7e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e64_v_i64m1))) +void vssseg8e64(int64_t * op0, ptrdiff_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e64_v_i64m1_m))) +void vssseg8e64(vbool64_t op0, int64_t * op1, ptrdiff_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e64_v_u64m1))) +void vssseg8e64(uint64_t * op0, ptrdiff_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e64_v_u64m1_m))) +void vssseg8e64(vbool64_t op0, uint64_t * op1, ptrdiff_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8m1))) +void vsuxseg3ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8m1_m))) +void vsuxseg3ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8m2))) +void vsuxseg3ei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8m2_m))) +void vsuxseg3ei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8mf2))) +void vsuxseg3ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8mf2_m))) +void vsuxseg3ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8mf4))) +void vsuxseg3ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8mf4_m))) +void vsuxseg3ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8mf8))) +void vsuxseg3ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i8mf8_m))) +void vsuxseg3ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8m1))) +void vsuxseg3ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8m1_m))) +void vsuxseg3ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8m2))) +void vsuxseg3ei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8m2_m))) +void vsuxseg3ei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8mf2))) +void vsuxseg3ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8mf2_m))) +void vsuxseg3ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8mf4))) +void vsuxseg3ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8mf4_m))) +void vsuxseg3ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8mf8))) +void vsuxseg3ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u8mf8_m))) +void vsuxseg3ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8m1))) +void vsuxseg4ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8m1_m))) +void vsuxseg4ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8m2))) +void vsuxseg4ei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8m2_m))) +void vsuxseg4ei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8mf2))) +void vsuxseg4ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8mf2_m))) +void vsuxseg4ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8mf4))) +void vsuxseg4ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8mf4_m))) +void vsuxseg4ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8mf8))) +void vsuxseg4ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i8mf8_m))) +void vsuxseg4ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8m1))) +void vsuxseg4ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8m1_m))) +void vsuxseg4ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8m2))) +void vsuxseg4ei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8m2_m))) +void vsuxseg4ei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8mf2))) +void vsuxseg4ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8mf2_m))) +void vsuxseg4ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8mf4))) +void vsuxseg4ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8mf4_m))) +void vsuxseg4ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8mf8))) +void vsuxseg4ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u8mf8_m))) +void vsuxseg4ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8m1))) +void vsuxseg5ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8m1_m))) +void vsuxseg5ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8mf2))) +void vsuxseg5ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8mf2_m))) +void vsuxseg5ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8mf4))) +void vsuxseg5ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8mf4_m))) +void vsuxseg5ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8mf8))) +void vsuxseg5ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i8mf8_m))) +void vsuxseg5ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8m1))) +void vsuxseg5ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8m1_m))) +void vsuxseg5ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8mf2))) +void vsuxseg5ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8mf2_m))) +void vsuxseg5ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8mf4))) +void vsuxseg5ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8mf4_m))) +void vsuxseg5ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8mf8))) +void vsuxseg5ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u8mf8_m))) +void vsuxseg5ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8m1))) +void vsuxseg6ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8m1_m))) +void vsuxseg6ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8mf2))) +void vsuxseg6ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8mf2_m))) +void vsuxseg6ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8mf4))) +void vsuxseg6ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8mf4_m))) +void vsuxseg6ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8mf8))) +void vsuxseg6ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i8mf8_m))) +void vsuxseg6ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8m1))) +void vsuxseg6ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8m1_m))) +void vsuxseg6ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8mf2))) +void vsuxseg6ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8mf2_m))) +void vsuxseg6ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8mf4))) +void vsuxseg6ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8mf4_m))) +void vsuxseg6ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8mf8))) +void vsuxseg6ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u8mf8_m))) +void vsuxseg6ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8m1))) +void vsuxseg7ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8m1_m))) +void vsuxseg7ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8mf2))) +void vsuxseg7ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8mf2_m))) +void vsuxseg7ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8mf4))) +void vsuxseg7ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8mf4_m))) +void vsuxseg7ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8mf8))) +void vsuxseg7ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i8mf8_m))) +void vsuxseg7ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8m1))) +void vsuxseg7ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8m1_m))) +void vsuxseg7ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8mf2))) +void vsuxseg7ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8mf2_m))) +void vsuxseg7ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8mf4))) +void vsuxseg7ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8mf4_m))) +void vsuxseg7ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8mf8))) +void vsuxseg7ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u8mf8_m))) +void vsuxseg7ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8m1))) +void vsuxseg8ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8m1_m))) +void vsuxseg8ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8mf2))) +void vsuxseg8ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8mf2_m))) +void vsuxseg8ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8mf4))) +void vsuxseg8ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8mf4_m))) +void vsuxseg8ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8mf8))) +void vsuxseg8ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i8mf8_m))) +void vsuxseg8ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8m1))) +void vsuxseg8ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8m1_m))) +void vsuxseg8ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8mf2))) +void vsuxseg8ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8mf2_m))) +void vsuxseg8ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8mf4))) +void vsuxseg8ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8mf4_m))) +void vsuxseg8ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8mf8))) +void vsuxseg8ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u8mf8_m))) +void vsuxseg8ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8m1))) +void vsuxseg2ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8m1_m))) +void vsuxseg2ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8m2))) +void vsuxseg2ei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8m2_m))) +void vsuxseg2ei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8m4))) +void vsuxseg2ei16(int8_t * op0, vuint16m8_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8m4_m))) +void vsuxseg2ei16(vbool2_t op0, int8_t * op1, vuint16m8_t op2, vint8m4_t op3, vint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8mf2))) +void vsuxseg2ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8mf2_m))) +void vsuxseg2ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8mf4))) +void vsuxseg2ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8mf4_m))) +void vsuxseg2ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8mf8))) +void vsuxseg2ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i8mf8_m))) +void vsuxseg2ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8m1))) +void vsuxseg2ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8m1_m))) +void vsuxseg2ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8m2))) +void vsuxseg2ei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8m2_m))) +void vsuxseg2ei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8m4))) +void vsuxseg2ei16(uint8_t * op0, vuint16m8_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8m4_m))) +void vsuxseg2ei16(vbool2_t op0, uint8_t * op1, vuint16m8_t op2, vuint8m4_t op3, vuint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8mf2))) +void vsuxseg2ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8mf2_m))) +void vsuxseg2ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8mf4))) +void vsuxseg2ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8mf4_m))) +void vsuxseg2ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8mf8))) +void vsuxseg2ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u8mf8_m))) +void vsuxseg2ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8m1))) +void vsuxseg3ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8m1_m))) +void vsuxseg3ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8m2))) +void vsuxseg3ei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8m2_m))) +void vsuxseg3ei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8mf2))) +void vsuxseg3ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8mf2_m))) +void vsuxseg3ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8mf4))) +void vsuxseg3ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8mf4_m))) +void vsuxseg3ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8mf8))) +void vsuxseg3ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i8mf8_m))) +void vsuxseg3ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8m1))) +void vsuxseg3ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8m1_m))) +void vsuxseg3ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8m2))) +void vsuxseg3ei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8m2_m))) +void vsuxseg3ei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8mf2))) +void vsuxseg3ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8mf2_m))) +void vsuxseg3ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8mf4))) +void vsuxseg3ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8mf4_m))) +void vsuxseg3ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8mf8))) +void vsuxseg3ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u8mf8_m))) +void vsuxseg3ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8m1))) +void vsuxseg4ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8m1_m))) +void vsuxseg4ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8m2))) +void vsuxseg4ei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8m2_m))) +void vsuxseg4ei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8mf2))) +void vsuxseg4ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8mf2_m))) +void vsuxseg4ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8mf4))) +void vsuxseg4ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8mf4_m))) +void vsuxseg4ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8mf8))) +void vsuxseg4ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i8mf8_m))) +void vsuxseg4ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8m1))) +void vsuxseg4ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8m1_m))) +void vsuxseg4ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8m2))) +void vsuxseg4ei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8m2_m))) +void vsuxseg4ei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8mf2))) +void vsuxseg4ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8mf2_m))) +void vsuxseg4ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8mf4))) +void vsuxseg4ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8mf4_m))) +void vsuxseg4ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8mf8))) +void vsuxseg4ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u8mf8_m))) +void vsuxseg4ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8m1))) +void vsuxseg5ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8m1_m))) +void vsuxseg5ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8mf2))) +void vsuxseg5ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8mf2_m))) +void vsuxseg5ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8mf4))) +void vsuxseg5ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8mf4_m))) +void vsuxseg5ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8mf8))) +void vsuxseg5ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i8mf8_m))) +void vsuxseg5ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8m1))) +void vsuxseg5ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8m1_m))) +void vsuxseg5ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8mf2))) +void vsuxseg5ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8mf2_m))) +void vsuxseg5ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8mf4))) +void vsuxseg5ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8mf4_m))) +void vsuxseg5ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8mf8))) +void vsuxseg5ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u8mf8_m))) +void vsuxseg5ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8m1))) +void vsuxseg6ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8m1_m))) +void vsuxseg6ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8mf2))) +void vsuxseg6ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8mf2_m))) +void vsuxseg6ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8mf4))) +void vsuxseg6ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8mf4_m))) +void vsuxseg6ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8mf8))) +void vsuxseg6ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i8mf8_m))) +void vsuxseg6ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8m1))) +void vsuxseg6ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8m1_m))) +void vsuxseg6ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8mf2))) +void vsuxseg6ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8mf2_m))) +void vsuxseg6ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8mf4))) +void vsuxseg6ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8mf4_m))) +void vsuxseg6ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8mf8))) +void vsuxseg6ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u8mf8_m))) +void vsuxseg6ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8m1))) +void vsuxseg7ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8m1_m))) +void vsuxseg7ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8mf2))) +void vsuxseg7ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8mf2_m))) +void vsuxseg7ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8mf4))) +void vsuxseg7ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8mf4_m))) +void vsuxseg7ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8mf8))) +void vsuxseg7ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i8mf8_m))) +void vsuxseg7ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8m1))) +void vsuxseg7ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8m1_m))) +void vsuxseg7ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8mf2))) +void vsuxseg7ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8mf2_m))) +void vsuxseg7ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8mf4))) +void vsuxseg7ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8mf4_m))) +void vsuxseg7ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8mf8))) +void vsuxseg7ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u8mf8_m))) +void vsuxseg7ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8m1))) +void vsuxseg8ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8m1_m))) +void vsuxseg8ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8mf2))) +void vsuxseg8ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8mf2_m))) +void vsuxseg8ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8mf4))) +void vsuxseg8ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8mf4_m))) +void vsuxseg8ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8mf8))) +void vsuxseg8ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i8mf8_m))) +void vsuxseg8ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8m1))) +void vsuxseg8ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8m1_m))) +void vsuxseg8ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8mf2))) +void vsuxseg8ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8mf2_m))) +void vsuxseg8ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8mf4))) +void vsuxseg8ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8mf4_m))) +void vsuxseg8ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8mf8))) +void vsuxseg8ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u8mf8_m))) +void vsuxseg8ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8m1))) +void vsuxseg2ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8m1_m))) +void vsuxseg2ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8m2))) +void vsuxseg2ei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8m2_m))) +void vsuxseg2ei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8mf2))) +void vsuxseg2ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8mf2_m))) +void vsuxseg2ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8mf4))) +void vsuxseg2ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8mf4_m))) +void vsuxseg2ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8mf8))) +void vsuxseg2ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i8mf8_m))) +void vsuxseg2ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8m1))) +void vsuxseg2ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8m1_m))) +void vsuxseg2ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8m2))) +void vsuxseg2ei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8m2_m))) +void vsuxseg2ei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8mf2))) +void vsuxseg2ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8mf2_m))) +void vsuxseg2ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8mf4))) +void vsuxseg2ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8mf4_m))) +void vsuxseg2ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8mf8))) +void vsuxseg2ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u8mf8_m))) +void vsuxseg2ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8m1))) +void vsuxseg3ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8m1_m))) +void vsuxseg3ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8m2))) +void vsuxseg3ei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8m2_m))) +void vsuxseg3ei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8mf2))) +void vsuxseg3ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8mf2_m))) +void vsuxseg3ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8mf4))) +void vsuxseg3ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8mf4_m))) +void vsuxseg3ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8mf8))) +void vsuxseg3ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i8mf8_m))) +void vsuxseg3ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8m1))) +void vsuxseg3ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8m1_m))) +void vsuxseg3ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8m2))) +void vsuxseg3ei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8m2_m))) +void vsuxseg3ei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8mf2))) +void vsuxseg3ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8mf2_m))) +void vsuxseg3ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8mf4))) +void vsuxseg3ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8mf4_m))) +void vsuxseg3ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8mf8))) +void vsuxseg3ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u8mf8_m))) +void vsuxseg3ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8m1))) +void vsuxseg4ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8m1_m))) +void vsuxseg4ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8m2))) +void vsuxseg4ei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8m2_m))) +void vsuxseg4ei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8mf2))) +void vsuxseg4ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8mf2_m))) +void vsuxseg4ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8mf4))) +void vsuxseg4ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8mf4_m))) +void vsuxseg4ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8mf8))) +void vsuxseg4ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i8mf8_m))) +void vsuxseg4ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8m1))) +void vsuxseg4ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8m1_m))) +void vsuxseg4ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8m2))) +void vsuxseg4ei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8m2_m))) +void vsuxseg4ei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8mf2))) +void vsuxseg4ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8mf2_m))) +void vsuxseg4ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8mf4))) +void vsuxseg4ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8mf4_m))) +void vsuxseg4ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8mf8))) +void vsuxseg4ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u8mf8_m))) +void vsuxseg4ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8m1))) +void vsuxseg5ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8m1_m))) +void vsuxseg5ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8mf2))) +void vsuxseg5ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8mf2_m))) +void vsuxseg5ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8mf4))) +void vsuxseg5ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8mf4_m))) +void vsuxseg5ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8mf8))) +void vsuxseg5ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i8mf8_m))) +void vsuxseg5ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8m1))) +void vsuxseg5ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8m1_m))) +void vsuxseg5ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8mf2))) +void vsuxseg5ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8mf2_m))) +void vsuxseg5ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8mf4))) +void vsuxseg5ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8mf4_m))) +void vsuxseg5ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8mf8))) +void vsuxseg5ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u8mf8_m))) +void vsuxseg5ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8m1))) +void vsuxseg6ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8m1_m))) +void vsuxseg6ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8mf2))) +void vsuxseg6ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8mf2_m))) +void vsuxseg6ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8mf4))) +void vsuxseg6ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8mf4_m))) +void vsuxseg6ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8mf8))) +void vsuxseg6ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i8mf8_m))) +void vsuxseg6ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8m1))) +void vsuxseg6ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8m1_m))) +void vsuxseg6ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8mf2))) +void vsuxseg6ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8mf2_m))) +void vsuxseg6ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8mf4))) +void vsuxseg6ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8mf4_m))) +void vsuxseg6ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8mf8))) +void vsuxseg6ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u8mf8_m))) +void vsuxseg6ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8m1))) +void vsuxseg7ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8m1_m))) +void vsuxseg7ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8mf2))) +void vsuxseg7ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8mf2_m))) +void vsuxseg7ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8mf4))) +void vsuxseg7ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8mf4_m))) +void vsuxseg7ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8mf8))) +void vsuxseg7ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i8mf8_m))) +void vsuxseg7ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8m1))) +void vsuxseg7ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8m1_m))) +void vsuxseg7ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8mf2))) +void vsuxseg7ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8mf2_m))) +void vsuxseg7ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8mf4))) +void vsuxseg7ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8mf4_m))) +void vsuxseg7ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8mf8))) +void vsuxseg7ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u8mf8_m))) +void vsuxseg7ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8m1))) +void vsuxseg8ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8m1_m))) +void vsuxseg8ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8mf2))) +void vsuxseg8ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8mf2_m))) +void vsuxseg8ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8mf4))) +void vsuxseg8ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8mf4_m))) +void vsuxseg8ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8mf8))) +void vsuxseg8ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i8mf8_m))) +void vsuxseg8ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8m1))) +void vsuxseg8ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8m1_m))) +void vsuxseg8ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8mf2))) +void vsuxseg8ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8mf2_m))) +void vsuxseg8ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8mf4))) +void vsuxseg8ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8mf4_m))) +void vsuxseg8ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8mf8))) +void vsuxseg8ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u8mf8_m))) +void vsuxseg8ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8m1))) +void vsuxseg2ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8m1_m))) +void vsuxseg2ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8mf2))) +void vsuxseg2ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8mf2_m))) +void vsuxseg2ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8mf4))) +void vsuxseg2ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8mf4_m))) +void vsuxseg2ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8mf8))) +void vsuxseg2ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i8mf8_m))) +void vsuxseg2ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8m1))) +void vsuxseg2ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8m1_m))) +void vsuxseg2ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8mf2))) +void vsuxseg2ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8mf2_m))) +void vsuxseg2ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8mf4))) +void vsuxseg2ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8mf4_m))) +void vsuxseg2ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8mf8))) +void vsuxseg2ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u8mf8_m))) +void vsuxseg2ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8m1))) +void vsuxseg3ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8m1_m))) +void vsuxseg3ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8mf2))) +void vsuxseg3ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8mf2_m))) +void vsuxseg3ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8mf4))) +void vsuxseg3ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8mf4_m))) +void vsuxseg3ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8mf8))) +void vsuxseg3ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i8mf8_m))) +void vsuxseg3ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8m1))) +void vsuxseg3ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8m1_m))) +void vsuxseg3ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8mf2))) +void vsuxseg3ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8mf2_m))) +void vsuxseg3ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8mf4))) +void vsuxseg3ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8mf4_m))) +void vsuxseg3ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8mf8))) +void vsuxseg3ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u8mf8_m))) +void vsuxseg3ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8m1))) +void vsuxseg4ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8m1_m))) +void vsuxseg4ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8mf2))) +void vsuxseg4ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8mf2_m))) +void vsuxseg4ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8mf4))) +void vsuxseg4ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8mf4_m))) +void vsuxseg4ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8mf8))) +void vsuxseg4ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i8mf8_m))) +void vsuxseg4ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8m1))) +void vsuxseg4ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8m1_m))) +void vsuxseg4ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8mf2))) +void vsuxseg4ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8mf2_m))) +void vsuxseg4ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8mf4))) +void vsuxseg4ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8mf4_m))) +void vsuxseg4ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8mf8))) +void vsuxseg4ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u8mf8_m))) +void vsuxseg4ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8m1))) +void vsuxseg5ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8m1_m))) +void vsuxseg5ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8mf2))) +void vsuxseg5ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8mf2_m))) +void vsuxseg5ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8mf4))) +void vsuxseg5ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8mf4_m))) +void vsuxseg5ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8mf8))) +void vsuxseg5ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i8mf8_m))) +void vsuxseg5ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8m1))) +void vsuxseg5ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8m1_m))) +void vsuxseg5ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8mf2))) +void vsuxseg5ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8mf2_m))) +void vsuxseg5ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8mf4))) +void vsuxseg5ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8mf4_m))) +void vsuxseg5ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8mf8))) +void vsuxseg5ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u8mf8_m))) +void vsuxseg5ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8m1))) +void vsuxseg6ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8m1_m))) +void vsuxseg6ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8mf2))) +void vsuxseg6ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8mf2_m))) +void vsuxseg6ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8mf4))) +void vsuxseg6ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8mf4_m))) +void vsuxseg6ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8mf8))) +void vsuxseg6ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i8mf8_m))) +void vsuxseg6ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8m1))) +void vsuxseg6ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8m1_m))) +void vsuxseg6ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8mf2))) +void vsuxseg6ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8mf2_m))) +void vsuxseg6ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8mf4))) +void vsuxseg6ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8mf4_m))) +void vsuxseg6ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8mf8))) +void vsuxseg6ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u8mf8_m))) +void vsuxseg6ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8m1))) +void vsuxseg7ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8m1_m))) +void vsuxseg7ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8mf2))) +void vsuxseg7ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8mf2_m))) +void vsuxseg7ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8mf4))) +void vsuxseg7ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8mf4_m))) +void vsuxseg7ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8mf8))) +void vsuxseg7ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i8mf8_m))) +void vsuxseg7ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8m1))) +void vsuxseg7ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8m1_m))) +void vsuxseg7ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8mf2))) +void vsuxseg7ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8mf2_m))) +void vsuxseg7ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8mf4))) +void vsuxseg7ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8mf4_m))) +void vsuxseg7ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8mf8))) +void vsuxseg7ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u8mf8_m))) +void vsuxseg7ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8m1))) +void vsuxseg8ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8m1_m))) +void vsuxseg8ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8mf2))) +void vsuxseg8ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8mf2_m))) +void vsuxseg8ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8mf4))) +void vsuxseg8ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8mf4_m))) +void vsuxseg8ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8mf8))) +void vsuxseg8ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i8mf8_m))) +void vsuxseg8ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8m1))) +void vsuxseg8ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8m1_m))) +void vsuxseg8ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8mf2))) +void vsuxseg8ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8mf2_m))) +void vsuxseg8ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8mf4))) +void vsuxseg8ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8mf4_m))) +void vsuxseg8ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8mf8))) +void vsuxseg8ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u8mf8_m))) +void vsuxseg8ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16m1))) +void vsuxseg2ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16m1_m))) +void vsuxseg2ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16m2))) +void vsuxseg2ei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16m2_m))) +void vsuxseg2ei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16m4))) +void vsuxseg2ei8(int16_t * op0, vuint8m2_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16m4_m))) +void vsuxseg2ei8(vbool4_t op0, int16_t * op1, vuint8m2_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16mf2))) +void vsuxseg2ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16mf2_m))) +void vsuxseg2ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16mf4))) +void vsuxseg2ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i16mf4_m))) +void vsuxseg2ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16m1))) +void vsuxseg2ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16m1_m))) +void vsuxseg2ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16m2))) +void vsuxseg2ei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16m2_m))) +void vsuxseg2ei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16m4))) +void vsuxseg2ei8(uint16_t * op0, vuint8m2_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16m4_m))) +void vsuxseg2ei8(vbool4_t op0, uint16_t * op1, vuint8m2_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16mf2))) +void vsuxseg2ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16mf2_m))) +void vsuxseg2ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16mf4))) +void vsuxseg2ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u16mf4_m))) +void vsuxseg2ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16m1))) +void vsuxseg3ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16m1_m))) +void vsuxseg3ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16m2))) +void vsuxseg3ei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16m2_m))) +void vsuxseg3ei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16mf2))) +void vsuxseg3ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16mf2_m))) +void vsuxseg3ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16mf4))) +void vsuxseg3ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i16mf4_m))) +void vsuxseg3ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16m1))) +void vsuxseg3ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16m1_m))) +void vsuxseg3ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16m2))) +void vsuxseg3ei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16m2_m))) +void vsuxseg3ei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16mf2))) +void vsuxseg3ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16mf2_m))) +void vsuxseg3ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16mf4))) +void vsuxseg3ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u16mf4_m))) +void vsuxseg3ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16m1))) +void vsuxseg4ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16m1_m))) +void vsuxseg4ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16m2))) +void vsuxseg4ei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16m2_m))) +void vsuxseg4ei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16mf2))) +void vsuxseg4ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16mf2_m))) +void vsuxseg4ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16mf4))) +void vsuxseg4ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i16mf4_m))) +void vsuxseg4ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16m1))) +void vsuxseg4ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16m1_m))) +void vsuxseg4ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16m2))) +void vsuxseg4ei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16m2_m))) +void vsuxseg4ei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16mf2))) +void vsuxseg4ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16mf2_m))) +void vsuxseg4ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16mf4))) +void vsuxseg4ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u16mf4_m))) +void vsuxseg4ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i16m1))) +void vsuxseg5ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i16m1_m))) +void vsuxseg5ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i16mf2))) +void vsuxseg5ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i16mf2_m))) +void vsuxseg5ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i16mf4))) +void vsuxseg5ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i16mf4_m))) +void vsuxseg5ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u16m1))) +void vsuxseg5ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u16m1_m))) +void vsuxseg5ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u16mf2))) +void vsuxseg5ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u16mf2_m))) +void vsuxseg5ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u16mf4))) +void vsuxseg5ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u16mf4_m))) +void vsuxseg5ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i16m1))) +void vsuxseg6ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i16m1_m))) +void vsuxseg6ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i16mf2))) +void vsuxseg6ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i16mf2_m))) +void vsuxseg6ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i16mf4))) +void vsuxseg6ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i16mf4_m))) +void vsuxseg6ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u16m1))) +void vsuxseg6ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u16m1_m))) +void vsuxseg6ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u16mf2))) +void vsuxseg6ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u16mf2_m))) +void vsuxseg6ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u16mf4))) +void vsuxseg6ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u16mf4_m))) +void vsuxseg6ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i16m1))) +void vsuxseg7ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i16m1_m))) +void vsuxseg7ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i16mf2))) +void vsuxseg7ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i16mf2_m))) +void vsuxseg7ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i16mf4))) +void vsuxseg7ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i16mf4_m))) +void vsuxseg7ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u16m1))) +void vsuxseg7ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u16m1_m))) +void vsuxseg7ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u16mf2))) +void vsuxseg7ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u16mf2_m))) +void vsuxseg7ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u16mf4))) +void vsuxseg7ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u16mf4_m))) +void vsuxseg7ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i16m1))) +void vsuxseg8ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i16m1_m))) +void vsuxseg8ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i16mf2))) +void vsuxseg8ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i16mf2_m))) +void vsuxseg8ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i16mf4))) +void vsuxseg8ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i16mf4_m))) +void vsuxseg8ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u16m1))) +void vsuxseg8ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u16m1_m))) +void vsuxseg8ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u16mf2))) +void vsuxseg8ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u16mf2_m))) +void vsuxseg8ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u16mf4))) +void vsuxseg8ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u16mf4_m))) +void vsuxseg8ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16m1))) +void vsuxseg2ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16m1_m))) +void vsuxseg2ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16m2))) +void vsuxseg2ei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16m2_m))) +void vsuxseg2ei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16m4))) +void vsuxseg2ei16(int16_t * op0, vuint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16m4_m))) +void vsuxseg2ei16(vbool4_t op0, int16_t * op1, vuint16m4_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16mf2))) +void vsuxseg2ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16mf2_m))) +void vsuxseg2ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16mf4))) +void vsuxseg2ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i16mf4_m))) +void vsuxseg2ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16m1))) +void vsuxseg2ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16m1_m))) +void vsuxseg2ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16m2))) +void vsuxseg2ei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16m2_m))) +void vsuxseg2ei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16m4))) +void vsuxseg2ei16(uint16_t * op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16m4_m))) +void vsuxseg2ei16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16mf2))) +void vsuxseg2ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16mf2_m))) +void vsuxseg2ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16mf4))) +void vsuxseg2ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u16mf4_m))) +void vsuxseg2ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16m1))) +void vsuxseg3ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16m1_m))) +void vsuxseg3ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16m2))) +void vsuxseg3ei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16m2_m))) +void vsuxseg3ei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16mf2))) +void vsuxseg3ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16mf2_m))) +void vsuxseg3ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16mf4))) +void vsuxseg3ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i16mf4_m))) +void vsuxseg3ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16m1))) +void vsuxseg3ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16m1_m))) +void vsuxseg3ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16m2))) +void vsuxseg3ei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16m2_m))) +void vsuxseg3ei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16mf2))) +void vsuxseg3ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16mf2_m))) +void vsuxseg3ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16mf4))) +void vsuxseg3ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u16mf4_m))) +void vsuxseg3ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16m1))) +void vsuxseg4ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16m1_m))) +void vsuxseg4ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16m2))) +void vsuxseg4ei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16m2_m))) +void vsuxseg4ei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16mf2))) +void vsuxseg4ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16mf2_m))) +void vsuxseg4ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16mf4))) +void vsuxseg4ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i16mf4_m))) +void vsuxseg4ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16m1))) +void vsuxseg4ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16m1_m))) +void vsuxseg4ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16m2))) +void vsuxseg4ei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16m2_m))) +void vsuxseg4ei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16mf2))) +void vsuxseg4ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16mf2_m))) +void vsuxseg4ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16mf4))) +void vsuxseg4ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u16mf4_m))) +void vsuxseg4ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i16m1))) +void vsuxseg5ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i16m1_m))) +void vsuxseg5ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i16mf2))) +void vsuxseg5ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i16mf2_m))) +void vsuxseg5ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i16mf4))) +void vsuxseg5ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i16mf4_m))) +void vsuxseg5ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u16m1))) +void vsuxseg5ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u16m1_m))) +void vsuxseg5ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u16mf2))) +void vsuxseg5ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u16mf2_m))) +void vsuxseg5ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u16mf4))) +void vsuxseg5ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u16mf4_m))) +void vsuxseg5ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i16m1))) +void vsuxseg6ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i16m1_m))) +void vsuxseg6ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i16mf2))) +void vsuxseg6ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i16mf2_m))) +void vsuxseg6ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i16mf4))) +void vsuxseg6ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i16mf4_m))) +void vsuxseg6ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u16m1))) +void vsuxseg6ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u16m1_m))) +void vsuxseg6ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u16mf2))) +void vsuxseg6ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u16mf2_m))) +void vsuxseg6ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u16mf4))) +void vsuxseg6ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u16mf4_m))) +void vsuxseg6ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i16m1))) +void vsuxseg7ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i16m1_m))) +void vsuxseg7ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i16mf2))) +void vsuxseg7ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i16mf2_m))) +void vsuxseg7ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i16mf4))) +void vsuxseg7ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i16mf4_m))) +void vsuxseg7ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u16m1))) +void vsuxseg7ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u16m1_m))) +void vsuxseg7ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u16mf2))) +void vsuxseg7ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u16mf2_m))) +void vsuxseg7ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u16mf4))) +void vsuxseg7ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u16mf4_m))) +void vsuxseg7ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i16m1))) +void vsuxseg8ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i16m1_m))) +void vsuxseg8ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i16mf2))) +void vsuxseg8ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i16mf2_m))) +void vsuxseg8ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i16mf4))) +void vsuxseg8ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i16mf4_m))) +void vsuxseg8ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u16m1))) +void vsuxseg8ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u16m1_m))) +void vsuxseg8ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u16mf2))) +void vsuxseg8ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u16mf2_m))) +void vsuxseg8ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u16mf4))) +void vsuxseg8ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u16mf4_m))) +void vsuxseg8ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16m1))) +void vsuxseg2ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16m1_m))) +void vsuxseg2ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16m2))) +void vsuxseg2ei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16m2_m))) +void vsuxseg2ei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16m4))) +void vsuxseg2ei32(int16_t * op0, vuint32m8_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16m4_m))) +void vsuxseg2ei32(vbool4_t op0, int16_t * op1, vuint32m8_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16mf2))) +void vsuxseg2ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16mf2_m))) +void vsuxseg2ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16mf4))) +void vsuxseg2ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i16mf4_m))) +void vsuxseg2ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16m1))) +void vsuxseg2ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16m1_m))) +void vsuxseg2ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16m2))) +void vsuxseg2ei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16m2_m))) +void vsuxseg2ei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16m4))) +void vsuxseg2ei32(uint16_t * op0, vuint32m8_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16m4_m))) +void vsuxseg2ei32(vbool4_t op0, uint16_t * op1, vuint32m8_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16mf2))) +void vsuxseg2ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16mf2_m))) +void vsuxseg2ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16mf4))) +void vsuxseg2ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u16mf4_m))) +void vsuxseg2ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16m1))) +void vsuxseg3ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16m1_m))) +void vsuxseg3ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16m2))) +void vsuxseg3ei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16m2_m))) +void vsuxseg3ei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16mf2))) +void vsuxseg3ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16mf2_m))) +void vsuxseg3ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16mf4))) +void vsuxseg3ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i16mf4_m))) +void vsuxseg3ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16m1))) +void vsuxseg3ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16m1_m))) +void vsuxseg3ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16m2))) +void vsuxseg3ei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16m2_m))) +void vsuxseg3ei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16mf2))) +void vsuxseg3ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16mf2_m))) +void vsuxseg3ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16mf4))) +void vsuxseg3ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u16mf4_m))) +void vsuxseg3ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16m1))) +void vsuxseg4ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16m1_m))) +void vsuxseg4ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16m2))) +void vsuxseg4ei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16m2_m))) +void vsuxseg4ei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16mf2))) +void vsuxseg4ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16mf2_m))) +void vsuxseg4ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16mf4))) +void vsuxseg4ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i16mf4_m))) +void vsuxseg4ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16m1))) +void vsuxseg4ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16m1_m))) +void vsuxseg4ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16m2))) +void vsuxseg4ei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16m2_m))) +void vsuxseg4ei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16mf2))) +void vsuxseg4ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16mf2_m))) +void vsuxseg4ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16mf4))) +void vsuxseg4ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u16mf4_m))) +void vsuxseg4ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i16m1))) +void vsuxseg5ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i16m1_m))) +void vsuxseg5ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i16mf2))) +void vsuxseg5ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i16mf2_m))) +void vsuxseg5ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i16mf4))) +void vsuxseg5ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i16mf4_m))) +void vsuxseg5ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u16m1))) +void vsuxseg5ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u16m1_m))) +void vsuxseg5ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u16mf2))) +void vsuxseg5ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u16mf2_m))) +void vsuxseg5ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u16mf4))) +void vsuxseg5ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u16mf4_m))) +void vsuxseg5ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i16m1))) +void vsuxseg6ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i16m1_m))) +void vsuxseg6ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i16mf2))) +void vsuxseg6ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i16mf2_m))) +void vsuxseg6ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i16mf4))) +void vsuxseg6ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i16mf4_m))) +void vsuxseg6ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u16m1))) +void vsuxseg6ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u16m1_m))) +void vsuxseg6ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u16mf2))) +void vsuxseg6ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u16mf2_m))) +void vsuxseg6ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u16mf4))) +void vsuxseg6ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u16mf4_m))) +void vsuxseg6ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i16m1))) +void vsuxseg7ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i16m1_m))) +void vsuxseg7ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i16mf2))) +void vsuxseg7ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i16mf2_m))) +void vsuxseg7ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i16mf4))) +void vsuxseg7ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i16mf4_m))) +void vsuxseg7ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u16m1))) +void vsuxseg7ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u16m1_m))) +void vsuxseg7ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u16mf2))) +void vsuxseg7ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u16mf2_m))) +void vsuxseg7ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u16mf4))) +void vsuxseg7ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u16mf4_m))) +void vsuxseg7ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i16m1))) +void vsuxseg8ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i16m1_m))) +void vsuxseg8ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i16mf2))) +void vsuxseg8ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i16mf2_m))) +void vsuxseg8ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i16mf4))) +void vsuxseg8ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i16mf4_m))) +void vsuxseg8ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u16m1))) +void vsuxseg8ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u16m1_m))) +void vsuxseg8ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u16mf2))) +void vsuxseg8ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u16mf2_m))) +void vsuxseg8ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u16mf4))) +void vsuxseg8ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u16mf4_m))) +void vsuxseg8ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16m1))) +void vsuxseg2ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16m1_m))) +void vsuxseg2ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16m2))) +void vsuxseg2ei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16m2_m))) +void vsuxseg2ei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16mf2))) +void vsuxseg2ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16mf2_m))) +void vsuxseg2ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16mf4))) +void vsuxseg2ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i16mf4_m))) +void vsuxseg2ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16m1))) +void vsuxseg2ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16m1_m))) +void vsuxseg2ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16m2))) +void vsuxseg2ei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16m2_m))) +void vsuxseg2ei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16mf2))) +void vsuxseg2ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16mf2_m))) +void vsuxseg2ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16mf4))) +void vsuxseg2ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u16mf4_m))) +void vsuxseg2ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16m1))) +void vsuxseg3ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16m1_m))) +void vsuxseg3ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16m2))) +void vsuxseg3ei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16m2_m))) +void vsuxseg3ei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16mf2))) +void vsuxseg3ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16mf2_m))) +void vsuxseg3ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16mf4))) +void vsuxseg3ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i16mf4_m))) +void vsuxseg3ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16m1))) +void vsuxseg3ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16m1_m))) +void vsuxseg3ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16m2))) +void vsuxseg3ei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16m2_m))) +void vsuxseg3ei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16mf2))) +void vsuxseg3ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16mf2_m))) +void vsuxseg3ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16mf4))) +void vsuxseg3ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u16mf4_m))) +void vsuxseg3ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16m1))) +void vsuxseg4ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16m1_m))) +void vsuxseg4ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16m2))) +void vsuxseg4ei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16m2_m))) +void vsuxseg4ei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16mf2))) +void vsuxseg4ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16mf2_m))) +void vsuxseg4ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16mf4))) +void vsuxseg4ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i16mf4_m))) +void vsuxseg4ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16m1))) +void vsuxseg4ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16m1_m))) +void vsuxseg4ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16m2))) +void vsuxseg4ei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16m2_m))) +void vsuxseg4ei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16mf2))) +void vsuxseg4ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16mf2_m))) +void vsuxseg4ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16mf4))) +void vsuxseg4ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u16mf4_m))) +void vsuxseg4ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i16m1))) +void vsuxseg5ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i16m1_m))) +void vsuxseg5ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i16mf2))) +void vsuxseg5ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i16mf2_m))) +void vsuxseg5ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i16mf4))) +void vsuxseg5ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i16mf4_m))) +void vsuxseg5ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u16m1))) +void vsuxseg5ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u16m1_m))) +void vsuxseg5ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u16mf2))) +void vsuxseg5ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u16mf2_m))) +void vsuxseg5ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u16mf4))) +void vsuxseg5ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u16mf4_m))) +void vsuxseg5ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i16m1))) +void vsuxseg6ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i16m1_m))) +void vsuxseg6ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i16mf2))) +void vsuxseg6ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i16mf2_m))) +void vsuxseg6ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i16mf4))) +void vsuxseg6ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i16mf4_m))) +void vsuxseg6ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u16m1))) +void vsuxseg6ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u16m1_m))) +void vsuxseg6ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u16mf2))) +void vsuxseg6ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u16mf2_m))) +void vsuxseg6ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u16mf4))) +void vsuxseg6ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u16mf4_m))) +void vsuxseg6ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i16m1))) +void vsuxseg7ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i16m1_m))) +void vsuxseg7ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i16mf2))) +void vsuxseg7ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i16mf2_m))) +void vsuxseg7ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i16mf4))) +void vsuxseg7ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i16mf4_m))) +void vsuxseg7ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u16m1))) +void vsuxseg7ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u16m1_m))) +void vsuxseg7ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u16mf2))) +void vsuxseg7ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u16mf2_m))) +void vsuxseg7ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u16mf4))) +void vsuxseg7ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u16mf4_m))) +void vsuxseg7ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i16m1))) +void vsuxseg8ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i16m1_m))) +void vsuxseg8ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i16mf2))) +void vsuxseg8ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i16mf2_m))) +void vsuxseg8ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i16mf4))) +void vsuxseg8ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i16mf4_m))) +void vsuxseg8ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u16m1))) +void vsuxseg8ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u16m1_m))) +void vsuxseg8ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u16mf2))) +void vsuxseg8ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u16mf2_m))) +void vsuxseg8ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u16mf4))) +void vsuxseg8ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u16mf4_m))) +void vsuxseg8ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32m1))) +void vsuxseg2ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32m1_m))) +void vsuxseg2ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32m2))) +void vsuxseg2ei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32m2_m))) +void vsuxseg2ei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32m4))) +void vsuxseg2ei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32m4_m))) +void vsuxseg2ei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32mf2))) +void vsuxseg2ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i32mf2_m))) +void vsuxseg2ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32m1))) +void vsuxseg2ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32m1_m))) +void vsuxseg2ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32m2))) +void vsuxseg2ei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32m2_m))) +void vsuxseg2ei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32m4))) +void vsuxseg2ei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32m4_m))) +void vsuxseg2ei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32mf2))) +void vsuxseg2ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u32mf2_m))) +void vsuxseg2ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i32m1))) +void vsuxseg3ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i32m1_m))) +void vsuxseg3ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i32m2))) +void vsuxseg3ei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i32m2_m))) +void vsuxseg3ei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i32mf2))) +void vsuxseg3ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i32mf2_m))) +void vsuxseg3ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u32m1))) +void vsuxseg3ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u32m1_m))) +void vsuxseg3ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u32m2))) +void vsuxseg3ei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u32m2_m))) +void vsuxseg3ei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u32mf2))) +void vsuxseg3ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u32mf2_m))) +void vsuxseg3ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i32m1))) +void vsuxseg4ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i32m1_m))) +void vsuxseg4ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i32m2))) +void vsuxseg4ei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i32m2_m))) +void vsuxseg4ei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i32mf2))) +void vsuxseg4ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i32mf2_m))) +void vsuxseg4ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u32m1))) +void vsuxseg4ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u32m1_m))) +void vsuxseg4ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u32m2))) +void vsuxseg4ei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u32m2_m))) +void vsuxseg4ei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u32mf2))) +void vsuxseg4ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u32mf2_m))) +void vsuxseg4ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i32m1))) +void vsuxseg5ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i32m1_m))) +void vsuxseg5ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i32mf2))) +void vsuxseg5ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i32mf2_m))) +void vsuxseg5ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u32m1))) +void vsuxseg5ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u32m1_m))) +void vsuxseg5ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u32mf2))) +void vsuxseg5ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u32mf2_m))) +void vsuxseg5ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i32m1))) +void vsuxseg6ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i32m1_m))) +void vsuxseg6ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i32mf2))) +void vsuxseg6ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i32mf2_m))) +void vsuxseg6ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u32m1))) +void vsuxseg6ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u32m1_m))) +void vsuxseg6ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u32mf2))) +void vsuxseg6ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u32mf2_m))) +void vsuxseg6ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i32m1))) +void vsuxseg7ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i32m1_m))) +void vsuxseg7ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i32mf2))) +void vsuxseg7ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i32mf2_m))) +void vsuxseg7ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u32m1))) +void vsuxseg7ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u32m1_m))) +void vsuxseg7ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u32mf2))) +void vsuxseg7ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u32mf2_m))) +void vsuxseg7ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i32m1))) +void vsuxseg8ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i32m1_m))) +void vsuxseg8ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i32mf2))) +void vsuxseg8ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i32mf2_m))) +void vsuxseg8ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u32m1))) +void vsuxseg8ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u32m1_m))) +void vsuxseg8ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u32mf2))) +void vsuxseg8ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u32mf2_m))) +void vsuxseg8ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32m1))) +void vsuxseg2ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32m1_m))) +void vsuxseg2ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32m2))) +void vsuxseg2ei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32m2_m))) +void vsuxseg2ei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32m4))) +void vsuxseg2ei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32m4_m))) +void vsuxseg2ei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32mf2))) +void vsuxseg2ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i32mf2_m))) +void vsuxseg2ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32m1))) +void vsuxseg2ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32m1_m))) +void vsuxseg2ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32m2))) +void vsuxseg2ei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32m2_m))) +void vsuxseg2ei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32m4))) +void vsuxseg2ei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32m4_m))) +void vsuxseg2ei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32mf2))) +void vsuxseg2ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u32mf2_m))) +void vsuxseg2ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i32m1))) +void vsuxseg3ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i32m1_m))) +void vsuxseg3ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i32m2))) +void vsuxseg3ei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i32m2_m))) +void vsuxseg3ei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i32mf2))) +void vsuxseg3ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i32mf2_m))) +void vsuxseg3ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u32m1))) +void vsuxseg3ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u32m1_m))) +void vsuxseg3ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u32m2))) +void vsuxseg3ei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u32m2_m))) +void vsuxseg3ei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u32mf2))) +void vsuxseg3ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u32mf2_m))) +void vsuxseg3ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i32m1))) +void vsuxseg4ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i32m1_m))) +void vsuxseg4ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i32m2))) +void vsuxseg4ei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i32m2_m))) +void vsuxseg4ei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i32mf2))) +void vsuxseg4ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i32mf2_m))) +void vsuxseg4ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u32m1))) +void vsuxseg4ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u32m1_m))) +void vsuxseg4ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u32m2))) +void vsuxseg4ei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u32m2_m))) +void vsuxseg4ei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u32mf2))) +void vsuxseg4ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u32mf2_m))) +void vsuxseg4ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i32m1))) +void vsuxseg5ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i32m1_m))) +void vsuxseg5ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i32mf2))) +void vsuxseg5ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i32mf2_m))) +void vsuxseg5ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u32m1))) +void vsuxseg5ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u32m1_m))) +void vsuxseg5ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u32mf2))) +void vsuxseg5ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u32mf2_m))) +void vsuxseg5ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i32m1))) +void vsuxseg6ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i32m1_m))) +void vsuxseg6ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i32mf2))) +void vsuxseg6ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i32mf2_m))) +void vsuxseg6ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u32m1))) +void vsuxseg6ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u32m1_m))) +void vsuxseg6ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u32mf2))) +void vsuxseg6ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u32mf2_m))) +void vsuxseg6ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i32m1))) +void vsuxseg7ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i32m1_m))) +void vsuxseg7ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i32mf2))) +void vsuxseg7ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i32mf2_m))) +void vsuxseg7ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u32m1))) +void vsuxseg7ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u32m1_m))) +void vsuxseg7ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u32mf2))) +void vsuxseg7ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u32mf2_m))) +void vsuxseg7ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i32m1))) +void vsuxseg8ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i32m1_m))) +void vsuxseg8ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i32mf2))) +void vsuxseg8ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i32mf2_m))) +void vsuxseg8ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u32m1))) +void vsuxseg8ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u32m1_m))) +void vsuxseg8ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u32mf2))) +void vsuxseg8ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u32mf2_m))) +void vsuxseg8ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32m1))) +void vsuxseg2ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32m1_m))) +void vsuxseg2ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32m2))) +void vsuxseg2ei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32m2_m))) +void vsuxseg2ei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32m4))) +void vsuxseg2ei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32m4_m))) +void vsuxseg2ei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32mf2))) +void vsuxseg2ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i32mf2_m))) +void vsuxseg2ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32m1))) +void vsuxseg2ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32m1_m))) +void vsuxseg2ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32m2))) +void vsuxseg2ei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32m2_m))) +void vsuxseg2ei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32m4))) +void vsuxseg2ei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32m4_m))) +void vsuxseg2ei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32mf2))) +void vsuxseg2ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u32mf2_m))) +void vsuxseg2ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i32m1))) +void vsuxseg3ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i32m1_m))) +void vsuxseg3ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i32m2))) +void vsuxseg3ei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i32m2_m))) +void vsuxseg3ei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i32mf2))) +void vsuxseg3ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i32mf2_m))) +void vsuxseg3ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u32m1))) +void vsuxseg3ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u32m1_m))) +void vsuxseg3ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u32m2))) +void vsuxseg3ei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u32m2_m))) +void vsuxseg3ei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u32mf2))) +void vsuxseg3ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u32mf2_m))) +void vsuxseg3ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i32m1))) +void vsuxseg4ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i32m1_m))) +void vsuxseg4ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i32m2))) +void vsuxseg4ei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i32m2_m))) +void vsuxseg4ei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i32mf2))) +void vsuxseg4ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i32mf2_m))) +void vsuxseg4ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u32m1))) +void vsuxseg4ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u32m1_m))) +void vsuxseg4ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u32m2))) +void vsuxseg4ei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u32m2_m))) +void vsuxseg4ei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u32mf2))) +void vsuxseg4ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u32mf2_m))) +void vsuxseg4ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i32m1))) +void vsuxseg5ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i32m1_m))) +void vsuxseg5ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i32mf2))) +void vsuxseg5ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i32mf2_m))) +void vsuxseg5ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u32m1))) +void vsuxseg5ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u32m1_m))) +void vsuxseg5ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u32mf2))) +void vsuxseg5ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u32mf2_m))) +void vsuxseg5ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i32m1))) +void vsuxseg6ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i32m1_m))) +void vsuxseg6ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i32mf2))) +void vsuxseg6ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i32mf2_m))) +void vsuxseg6ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u32m1))) +void vsuxseg6ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u32m1_m))) +void vsuxseg6ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u32mf2))) +void vsuxseg6ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u32mf2_m))) +void vsuxseg6ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i32m1))) +void vsuxseg7ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i32m1_m))) +void vsuxseg7ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i32mf2))) +void vsuxseg7ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i32mf2_m))) +void vsuxseg7ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u32m1))) +void vsuxseg7ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u32m1_m))) +void vsuxseg7ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u32mf2))) +void vsuxseg7ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u32mf2_m))) +void vsuxseg7ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i32m1))) +void vsuxseg8ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i32m1_m))) +void vsuxseg8ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i32mf2))) +void vsuxseg8ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i32mf2_m))) +void vsuxseg8ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u32m1))) +void vsuxseg8ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u32m1_m))) +void vsuxseg8ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u32mf2))) +void vsuxseg8ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u32mf2_m))) +void vsuxseg8ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32m1))) +void vsuxseg2ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32m1_m))) +void vsuxseg2ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32m2))) +void vsuxseg2ei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32m2_m))) +void vsuxseg2ei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32m4))) +void vsuxseg2ei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32m4_m))) +void vsuxseg2ei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32mf2))) +void vsuxseg2ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i32mf2_m))) +void vsuxseg2ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32m1))) +void vsuxseg2ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32m1_m))) +void vsuxseg2ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32m2))) +void vsuxseg2ei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32m2_m))) +void vsuxseg2ei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32m4))) +void vsuxseg2ei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32m4_m))) +void vsuxseg2ei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32mf2))) +void vsuxseg2ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u32mf2_m))) +void vsuxseg2ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i32m1))) +void vsuxseg3ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i32m1_m))) +void vsuxseg3ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i32m2))) +void vsuxseg3ei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i32m2_m))) +void vsuxseg3ei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i32mf2))) +void vsuxseg3ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i32mf2_m))) +void vsuxseg3ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u32m1))) +void vsuxseg3ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u32m1_m))) +void vsuxseg3ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u32m2))) +void vsuxseg3ei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u32m2_m))) +void vsuxseg3ei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u32mf2))) +void vsuxseg3ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u32mf2_m))) +void vsuxseg3ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i32m1))) +void vsuxseg4ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i32m1_m))) +void vsuxseg4ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i32m2))) +void vsuxseg4ei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i32m2_m))) +void vsuxseg4ei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i32mf2))) +void vsuxseg4ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i32mf2_m))) +void vsuxseg4ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u32m1))) +void vsuxseg4ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u32m1_m))) +void vsuxseg4ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u32m2))) +void vsuxseg4ei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u32m2_m))) +void vsuxseg4ei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u32mf2))) +void vsuxseg4ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u32mf2_m))) +void vsuxseg4ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i32m1))) +void vsuxseg5ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i32m1_m))) +void vsuxseg5ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i32mf2))) +void vsuxseg5ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i32mf2_m))) +void vsuxseg5ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u32m1))) +void vsuxseg5ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u32m1_m))) +void vsuxseg5ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u32mf2))) +void vsuxseg5ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u32mf2_m))) +void vsuxseg5ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i32m1))) +void vsuxseg6ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i32m1_m))) +void vsuxseg6ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i32mf2))) +void vsuxseg6ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i32mf2_m))) +void vsuxseg6ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u32m1))) +void vsuxseg6ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u32m1_m))) +void vsuxseg6ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u32mf2))) +void vsuxseg6ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u32mf2_m))) +void vsuxseg6ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i32m1))) +void vsuxseg7ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i32m1_m))) +void vsuxseg7ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i32mf2))) +void vsuxseg7ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i32mf2_m))) +void vsuxseg7ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u32m1))) +void vsuxseg7ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u32m1_m))) +void vsuxseg7ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u32mf2))) +void vsuxseg7ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u32mf2_m))) +void vsuxseg7ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i32m1))) +void vsuxseg8ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i32m1_m))) +void vsuxseg8ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i32mf2))) +void vsuxseg8ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i32mf2_m))) +void vsuxseg8ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u32m1))) +void vsuxseg8ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u32m1_m))) +void vsuxseg8ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u32mf2))) +void vsuxseg8ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u32mf2_m))) +void vsuxseg8ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i64m1))) +void vsuxseg2ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i64m1_m))) +void vsuxseg2ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i64m2))) +void vsuxseg2ei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i64m2_m))) +void vsuxseg2ei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i64m4))) +void vsuxseg2ei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i64m4_m))) +void vsuxseg2ei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u64m1))) +void vsuxseg2ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u64m1_m))) +void vsuxseg2ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u64m2))) +void vsuxseg2ei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u64m2_m))) +void vsuxseg2ei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u64m4))) +void vsuxseg2ei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u64m4_m))) +void vsuxseg2ei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i64m1))) +void vsuxseg3ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i64m1_m))) +void vsuxseg3ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i64m2))) +void vsuxseg3ei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_i64m2_m))) +void vsuxseg3ei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u64m1))) +void vsuxseg3ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u64m1_m))) +void vsuxseg3ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u64m2))) +void vsuxseg3ei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_u64m2_m))) +void vsuxseg3ei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i64m1))) +void vsuxseg4ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i64m1_m))) +void vsuxseg4ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i64m2))) +void vsuxseg4ei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_i64m2_m))) +void vsuxseg4ei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u64m1))) +void vsuxseg4ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u64m1_m))) +void vsuxseg4ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u64m2))) +void vsuxseg4ei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_u64m2_m))) +void vsuxseg4ei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i64m1))) +void vsuxseg5ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_i64m1_m))) +void vsuxseg5ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u64m1))) +void vsuxseg5ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_u64m1_m))) +void vsuxseg5ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i64m1))) +void vsuxseg6ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_i64m1_m))) +void vsuxseg6ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u64m1))) +void vsuxseg6ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_u64m1_m))) +void vsuxseg6ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i64m1))) +void vsuxseg7ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_i64m1_m))) +void vsuxseg7ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u64m1))) +void vsuxseg7ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_u64m1_m))) +void vsuxseg7ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i64m1))) +void vsuxseg8ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_i64m1_m))) +void vsuxseg8ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u64m1))) +void vsuxseg8ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_u64m1_m))) +void vsuxseg8ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i64m1))) +void vsuxseg2ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i64m1_m))) +void vsuxseg2ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i64m2))) +void vsuxseg2ei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i64m2_m))) +void vsuxseg2ei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i64m4))) +void vsuxseg2ei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_i64m4_m))) +void vsuxseg2ei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u64m1))) +void vsuxseg2ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u64m1_m))) +void vsuxseg2ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u64m2))) +void vsuxseg2ei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u64m2_m))) +void vsuxseg2ei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u64m4))) +void vsuxseg2ei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_u64m4_m))) +void vsuxseg2ei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i64m1))) +void vsuxseg3ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i64m1_m))) +void vsuxseg3ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i64m2))) +void vsuxseg3ei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_i64m2_m))) +void vsuxseg3ei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u64m1))) +void vsuxseg3ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u64m1_m))) +void vsuxseg3ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u64m2))) +void vsuxseg3ei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_u64m2_m))) +void vsuxseg3ei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i64m1))) +void vsuxseg4ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i64m1_m))) +void vsuxseg4ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i64m2))) +void vsuxseg4ei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_i64m2_m))) +void vsuxseg4ei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u64m1))) +void vsuxseg4ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u64m1_m))) +void vsuxseg4ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u64m2))) +void vsuxseg4ei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_u64m2_m))) +void vsuxseg4ei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i64m1))) +void vsuxseg5ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_i64m1_m))) +void vsuxseg5ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u64m1))) +void vsuxseg5ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_u64m1_m))) +void vsuxseg5ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i64m1))) +void vsuxseg6ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_i64m1_m))) +void vsuxseg6ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u64m1))) +void vsuxseg6ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_u64m1_m))) +void vsuxseg6ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i64m1))) +void vsuxseg7ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_i64m1_m))) +void vsuxseg7ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u64m1))) +void vsuxseg7ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_u64m1_m))) +void vsuxseg7ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i64m1))) +void vsuxseg8ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_i64m1_m))) +void vsuxseg8ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u64m1))) +void vsuxseg8ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_u64m1_m))) +void vsuxseg8ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i64m1))) +void vsuxseg2ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i64m1_m))) +void vsuxseg2ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i64m2))) +void vsuxseg2ei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i64m2_m))) +void vsuxseg2ei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i64m4))) +void vsuxseg2ei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_i64m4_m))) +void vsuxseg2ei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u64m1))) +void vsuxseg2ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u64m1_m))) +void vsuxseg2ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u64m2))) +void vsuxseg2ei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u64m2_m))) +void vsuxseg2ei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u64m4))) +void vsuxseg2ei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_u64m4_m))) +void vsuxseg2ei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i64m1))) +void vsuxseg3ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i64m1_m))) +void vsuxseg3ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i64m2))) +void vsuxseg3ei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_i64m2_m))) +void vsuxseg3ei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u64m1))) +void vsuxseg3ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u64m1_m))) +void vsuxseg3ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u64m2))) +void vsuxseg3ei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_u64m2_m))) +void vsuxseg3ei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i64m1))) +void vsuxseg4ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i64m1_m))) +void vsuxseg4ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i64m2))) +void vsuxseg4ei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_i64m2_m))) +void vsuxseg4ei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u64m1))) +void vsuxseg4ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u64m1_m))) +void vsuxseg4ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u64m2))) +void vsuxseg4ei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_u64m2_m))) +void vsuxseg4ei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i64m1))) +void vsuxseg5ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_i64m1_m))) +void vsuxseg5ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u64m1))) +void vsuxseg5ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_u64m1_m))) +void vsuxseg5ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i64m1))) +void vsuxseg6ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_i64m1_m))) +void vsuxseg6ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u64m1))) +void vsuxseg6ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_u64m1_m))) +void vsuxseg6ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i64m1))) +void vsuxseg7ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_i64m1_m))) +void vsuxseg7ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u64m1))) +void vsuxseg7ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_u64m1_m))) +void vsuxseg7ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i64m1))) +void vsuxseg8ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_i64m1_m))) +void vsuxseg8ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u64m1))) +void vsuxseg8ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_u64m1_m))) +void vsuxseg8ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i64m1))) +void vsuxseg2ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i64m1_m))) +void vsuxseg2ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i64m2))) +void vsuxseg2ei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i64m2_m))) +void vsuxseg2ei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i64m4))) +void vsuxseg2ei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_i64m4_m))) +void vsuxseg2ei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u64m1))) +void vsuxseg2ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u64m1_m))) +void vsuxseg2ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u64m2))) +void vsuxseg2ei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u64m2_m))) +void vsuxseg2ei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u64m4))) +void vsuxseg2ei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_u64m4_m))) +void vsuxseg2ei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i64m1))) +void vsuxseg3ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i64m1_m))) +void vsuxseg3ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i64m2))) +void vsuxseg3ei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_i64m2_m))) +void vsuxseg3ei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u64m1))) +void vsuxseg3ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u64m1_m))) +void vsuxseg3ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u64m2))) +void vsuxseg3ei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_u64m2_m))) +void vsuxseg3ei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i64m1))) +void vsuxseg4ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i64m1_m))) +void vsuxseg4ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i64m2))) +void vsuxseg4ei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_i64m2_m))) +void vsuxseg4ei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u64m1))) +void vsuxseg4ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u64m1_m))) +void vsuxseg4ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u64m2))) +void vsuxseg4ei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_u64m2_m))) +void vsuxseg4ei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i64m1))) +void vsuxseg5ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_i64m1_m))) +void vsuxseg5ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u64m1))) +void vsuxseg5ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_u64m1_m))) +void vsuxseg5ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i64m1))) +void vsuxseg6ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_i64m1_m))) +void vsuxseg6ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u64m1))) +void vsuxseg6ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_u64m1_m))) +void vsuxseg6ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i64m1))) +void vsuxseg7ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_i64m1_m))) +void vsuxseg7ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u64m1))) +void vsuxseg7ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_u64m1_m))) +void vsuxseg7ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i64m1))) +void vsuxseg8ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_i64m1_m))) +void vsuxseg8ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u64m1))) +void vsuxseg8ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_u64m1_m))) +void vsuxseg8ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8m1))) +void vsoxseg2ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8m1_m))) +void vsoxseg2ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8m2))) +void vsoxseg2ei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8m2_m))) +void vsoxseg2ei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8m4))) +void vsoxseg2ei8(int8_t * op0, vuint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8m4_m))) +void vsoxseg2ei8(vbool2_t op0, int8_t * op1, vuint8m4_t op2, vint8m4_t op3, vint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8mf2))) +void vsoxseg2ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8mf2_m))) +void vsoxseg2ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8mf4))) +void vsoxseg2ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8mf4_m))) +void vsoxseg2ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8mf8))) +void vsoxseg2ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i8mf8_m))) +void vsoxseg2ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8m1))) +void vsoxseg2ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8m1_m))) +void vsoxseg2ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8m2))) +void vsoxseg2ei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8m2_m))) +void vsoxseg2ei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8m4))) +void vsoxseg2ei8(uint8_t * op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8m4_m))) +void vsoxseg2ei8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, vuint8m4_t op3, vuint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8mf2))) +void vsoxseg2ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8mf2_m))) +void vsoxseg2ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8mf4))) +void vsoxseg2ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8mf4_m))) +void vsoxseg2ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8mf8))) +void vsoxseg2ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u8mf8_m))) +void vsoxseg2ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8m1))) +void vsoxseg3ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8m1_m))) +void vsoxseg3ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8m2))) +void vsoxseg3ei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8m2_m))) +void vsoxseg3ei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8mf2))) +void vsoxseg3ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8mf2_m))) +void vsoxseg3ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8mf4))) +void vsoxseg3ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8mf4_m))) +void vsoxseg3ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8mf8))) +void vsoxseg3ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i8mf8_m))) +void vsoxseg3ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8m1))) +void vsoxseg3ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8m1_m))) +void vsoxseg3ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8m2))) +void vsoxseg3ei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8m2_m))) +void vsoxseg3ei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8mf2))) +void vsoxseg3ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8mf2_m))) +void vsoxseg3ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8mf4))) +void vsoxseg3ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8mf4_m))) +void vsoxseg3ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8mf8))) +void vsoxseg3ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u8mf8_m))) +void vsoxseg3ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8m1))) +void vsoxseg4ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8m1_m))) +void vsoxseg4ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8m2))) +void vsoxseg4ei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8m2_m))) +void vsoxseg4ei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8mf2))) +void vsoxseg4ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8mf2_m))) +void vsoxseg4ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8mf4))) +void vsoxseg4ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8mf4_m))) +void vsoxseg4ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8mf8))) +void vsoxseg4ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i8mf8_m))) +void vsoxseg4ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8m1))) +void vsoxseg4ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8m1_m))) +void vsoxseg4ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8m2))) +void vsoxseg4ei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8m2_m))) +void vsoxseg4ei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8mf2))) +void vsoxseg4ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8mf2_m))) +void vsoxseg4ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8mf4))) +void vsoxseg4ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8mf4_m))) +void vsoxseg4ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8mf8))) +void vsoxseg4ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u8mf8_m))) +void vsoxseg4ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8m1))) +void vsoxseg5ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8m1_m))) +void vsoxseg5ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8mf2))) +void vsoxseg5ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8mf2_m))) +void vsoxseg5ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8mf4))) +void vsoxseg5ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8mf4_m))) +void vsoxseg5ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8mf8))) +void vsoxseg5ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i8mf8_m))) +void vsoxseg5ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8m1))) +void vsoxseg5ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8m1_m))) +void vsoxseg5ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8mf2))) +void vsoxseg5ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8mf2_m))) +void vsoxseg5ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8mf4))) +void vsoxseg5ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8mf4_m))) +void vsoxseg5ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8mf8))) +void vsoxseg5ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u8mf8_m))) +void vsoxseg5ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8m1))) +void vsoxseg6ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8m1_m))) +void vsoxseg6ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8mf2))) +void vsoxseg6ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8mf2_m))) +void vsoxseg6ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8mf4))) +void vsoxseg6ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8mf4_m))) +void vsoxseg6ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8mf8))) +void vsoxseg6ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i8mf8_m))) +void vsoxseg6ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8m1))) +void vsoxseg6ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8m1_m))) +void vsoxseg6ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8mf2))) +void vsoxseg6ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8mf2_m))) +void vsoxseg6ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8mf4))) +void vsoxseg6ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8mf4_m))) +void vsoxseg6ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8mf8))) +void vsoxseg6ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u8mf8_m))) +void vsoxseg6ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8m1))) +void vsoxseg7ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8m1_m))) +void vsoxseg7ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8mf2))) +void vsoxseg7ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8mf2_m))) +void vsoxseg7ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8mf4))) +void vsoxseg7ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8mf4_m))) +void vsoxseg7ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8mf8))) +void vsoxseg7ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i8mf8_m))) +void vsoxseg7ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8m1))) +void vsoxseg7ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8m1_m))) +void vsoxseg7ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8mf2))) +void vsoxseg7ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8mf2_m))) +void vsoxseg7ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8mf4))) +void vsoxseg7ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8mf4_m))) +void vsoxseg7ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8mf8))) +void vsoxseg7ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u8mf8_m))) +void vsoxseg7ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8m1))) +void vsoxseg8ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8m1_m))) +void vsoxseg8ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8mf2))) +void vsoxseg8ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8mf2_m))) +void vsoxseg8ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8mf4))) +void vsoxseg8ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8mf4_m))) +void vsoxseg8ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8mf8))) +void vsoxseg8ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i8mf8_m))) +void vsoxseg8ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8m1))) +void vsoxseg8ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8m1_m))) +void vsoxseg8ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8mf2))) +void vsoxseg8ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8mf2_m))) +void vsoxseg8ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8mf4))) +void vsoxseg8ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8mf4_m))) +void vsoxseg8ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8mf8))) +void vsoxseg8ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u8mf8_m))) +void vsoxseg8ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8m1))) +void vsoxseg2ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8m1_m))) +void vsoxseg2ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8m2))) +void vsoxseg2ei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8m2_m))) +void vsoxseg2ei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8m4))) +void vsoxseg2ei16(int8_t * op0, vuint16m8_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8m4_m))) +void vsoxseg2ei16(vbool2_t op0, int8_t * op1, vuint16m8_t op2, vint8m4_t op3, vint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8mf2))) +void vsoxseg2ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8mf2_m))) +void vsoxseg2ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8mf4))) +void vsoxseg2ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8mf4_m))) +void vsoxseg2ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8mf8))) +void vsoxseg2ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i8mf8_m))) +void vsoxseg2ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8m1))) +void vsoxseg2ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8m1_m))) +void vsoxseg2ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8m2))) +void vsoxseg2ei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8m2_m))) +void vsoxseg2ei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8m4))) +void vsoxseg2ei16(uint8_t * op0, vuint16m8_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8m4_m))) +void vsoxseg2ei16(vbool2_t op0, uint8_t * op1, vuint16m8_t op2, vuint8m4_t op3, vuint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8mf2))) +void vsoxseg2ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8mf2_m))) +void vsoxseg2ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8mf4))) +void vsoxseg2ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8mf4_m))) +void vsoxseg2ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8mf8))) +void vsoxseg2ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u8mf8_m))) +void vsoxseg2ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8m1))) +void vsoxseg3ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8m1_m))) +void vsoxseg3ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8m2))) +void vsoxseg3ei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8m2_m))) +void vsoxseg3ei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8mf2))) +void vsoxseg3ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8mf2_m))) +void vsoxseg3ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8mf4))) +void vsoxseg3ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8mf4_m))) +void vsoxseg3ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8mf8))) +void vsoxseg3ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i8mf8_m))) +void vsoxseg3ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8m1))) +void vsoxseg3ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8m1_m))) +void vsoxseg3ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8m2))) +void vsoxseg3ei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8m2_m))) +void vsoxseg3ei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8mf2))) +void vsoxseg3ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8mf2_m))) +void vsoxseg3ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8mf4))) +void vsoxseg3ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8mf4_m))) +void vsoxseg3ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8mf8))) +void vsoxseg3ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u8mf8_m))) +void vsoxseg3ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8m1))) +void vsoxseg4ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8m1_m))) +void vsoxseg4ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8m2))) +void vsoxseg4ei16(int8_t * op0, vuint16m4_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8m2_m))) +void vsoxseg4ei16(vbool4_t op0, int8_t * op1, vuint16m4_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8mf2))) +void vsoxseg4ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8mf2_m))) +void vsoxseg4ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8mf4))) +void vsoxseg4ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8mf4_m))) +void vsoxseg4ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8mf8))) +void vsoxseg4ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i8mf8_m))) +void vsoxseg4ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8m1))) +void vsoxseg4ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8m1_m))) +void vsoxseg4ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8m2))) +void vsoxseg4ei16(uint8_t * op0, vuint16m4_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8m2_m))) +void vsoxseg4ei16(vbool4_t op0, uint8_t * op1, vuint16m4_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8mf2))) +void vsoxseg4ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8mf2_m))) +void vsoxseg4ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8mf4))) +void vsoxseg4ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8mf4_m))) +void vsoxseg4ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8mf8))) +void vsoxseg4ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u8mf8_m))) +void vsoxseg4ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8m1))) +void vsoxseg5ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8m1_m))) +void vsoxseg5ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8mf2))) +void vsoxseg5ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8mf2_m))) +void vsoxseg5ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8mf4))) +void vsoxseg5ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8mf4_m))) +void vsoxseg5ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8mf8))) +void vsoxseg5ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i8mf8_m))) +void vsoxseg5ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8m1))) +void vsoxseg5ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8m1_m))) +void vsoxseg5ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8mf2))) +void vsoxseg5ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8mf2_m))) +void vsoxseg5ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8mf4))) +void vsoxseg5ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8mf4_m))) +void vsoxseg5ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8mf8))) +void vsoxseg5ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u8mf8_m))) +void vsoxseg5ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8m1))) +void vsoxseg6ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8m1_m))) +void vsoxseg6ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8mf2))) +void vsoxseg6ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8mf2_m))) +void vsoxseg6ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8mf4))) +void vsoxseg6ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8mf4_m))) +void vsoxseg6ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8mf8))) +void vsoxseg6ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i8mf8_m))) +void vsoxseg6ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8m1))) +void vsoxseg6ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8m1_m))) +void vsoxseg6ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8mf2))) +void vsoxseg6ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8mf2_m))) +void vsoxseg6ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8mf4))) +void vsoxseg6ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8mf4_m))) +void vsoxseg6ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8mf8))) +void vsoxseg6ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u8mf8_m))) +void vsoxseg6ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8m1))) +void vsoxseg7ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8m1_m))) +void vsoxseg7ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8mf2))) +void vsoxseg7ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8mf2_m))) +void vsoxseg7ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8mf4))) +void vsoxseg7ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8mf4_m))) +void vsoxseg7ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8mf8))) +void vsoxseg7ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i8mf8_m))) +void vsoxseg7ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8m1))) +void vsoxseg7ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8m1_m))) +void vsoxseg7ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8mf2))) +void vsoxseg7ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8mf2_m))) +void vsoxseg7ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8mf4))) +void vsoxseg7ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8mf4_m))) +void vsoxseg7ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8mf8))) +void vsoxseg7ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u8mf8_m))) +void vsoxseg7ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8m1))) +void vsoxseg8ei16(int8_t * op0, vuint16m2_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8m1_m))) +void vsoxseg8ei16(vbool8_t op0, int8_t * op1, vuint16m2_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8mf2))) +void vsoxseg8ei16(int8_t * op0, vuint16m1_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8mf2_m))) +void vsoxseg8ei16(vbool16_t op0, int8_t * op1, vuint16m1_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8mf4))) +void vsoxseg8ei16(int8_t * op0, vuint16mf2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8mf4_m))) +void vsoxseg8ei16(vbool32_t op0, int8_t * op1, vuint16mf2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8mf8))) +void vsoxseg8ei16(int8_t * op0, vuint16mf4_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i8mf8_m))) +void vsoxseg8ei16(vbool64_t op0, int8_t * op1, vuint16mf4_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8m1))) +void vsoxseg8ei16(uint8_t * op0, vuint16m2_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8m1_m))) +void vsoxseg8ei16(vbool8_t op0, uint8_t * op1, vuint16m2_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8mf2))) +void vsoxseg8ei16(uint8_t * op0, vuint16m1_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8mf2_m))) +void vsoxseg8ei16(vbool16_t op0, uint8_t * op1, vuint16m1_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8mf4))) +void vsoxseg8ei16(uint8_t * op0, vuint16mf2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8mf4_m))) +void vsoxseg8ei16(vbool32_t op0, uint8_t * op1, vuint16mf2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8mf8))) +void vsoxseg8ei16(uint8_t * op0, vuint16mf4_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u8mf8_m))) +void vsoxseg8ei16(vbool64_t op0, uint8_t * op1, vuint16mf4_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8m1))) +void vsoxseg2ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8m1_m))) +void vsoxseg2ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8m2))) +void vsoxseg2ei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8m2_m))) +void vsoxseg2ei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8mf2))) +void vsoxseg2ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8mf2_m))) +void vsoxseg2ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8mf4))) +void vsoxseg2ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8mf4_m))) +void vsoxseg2ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8mf8))) +void vsoxseg2ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i8mf8_m))) +void vsoxseg2ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8m1))) +void vsoxseg2ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8m1_m))) +void vsoxseg2ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8m2))) +void vsoxseg2ei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8m2_m))) +void vsoxseg2ei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8mf2))) +void vsoxseg2ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8mf2_m))) +void vsoxseg2ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8mf4))) +void vsoxseg2ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8mf4_m))) +void vsoxseg2ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8mf8))) +void vsoxseg2ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u8mf8_m))) +void vsoxseg2ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8m1))) +void vsoxseg3ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8m1_m))) +void vsoxseg3ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8m2))) +void vsoxseg3ei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8m2_m))) +void vsoxseg3ei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8mf2))) +void vsoxseg3ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8mf2_m))) +void vsoxseg3ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8mf4))) +void vsoxseg3ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8mf4_m))) +void vsoxseg3ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8mf8))) +void vsoxseg3ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i8mf8_m))) +void vsoxseg3ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8m1))) +void vsoxseg3ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8m1_m))) +void vsoxseg3ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8m2))) +void vsoxseg3ei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8m2_m))) +void vsoxseg3ei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8mf2))) +void vsoxseg3ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8mf2_m))) +void vsoxseg3ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8mf4))) +void vsoxseg3ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8mf4_m))) +void vsoxseg3ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8mf8))) +void vsoxseg3ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u8mf8_m))) +void vsoxseg3ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8m1))) +void vsoxseg4ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8m1_m))) +void vsoxseg4ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8m2))) +void vsoxseg4ei32(int8_t * op0, vuint32m8_t op1, vint8m2_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8m2_m))) +void vsoxseg4ei32(vbool4_t op0, int8_t * op1, vuint32m8_t op2, vint8m2_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8mf2))) +void vsoxseg4ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8mf2_m))) +void vsoxseg4ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8mf4))) +void vsoxseg4ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8mf4_m))) +void vsoxseg4ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8mf8))) +void vsoxseg4ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i8mf8_m))) +void vsoxseg4ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8m1))) +void vsoxseg4ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8m1_m))) +void vsoxseg4ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8m2))) +void vsoxseg4ei32(uint8_t * op0, vuint32m8_t op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8m2_m))) +void vsoxseg4ei32(vbool4_t op0, uint8_t * op1, vuint32m8_t op2, vuint8m2_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8mf2))) +void vsoxseg4ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8mf2_m))) +void vsoxseg4ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8mf4))) +void vsoxseg4ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8mf4_m))) +void vsoxseg4ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8mf8))) +void vsoxseg4ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u8mf8_m))) +void vsoxseg4ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8m1))) +void vsoxseg5ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8m1_m))) +void vsoxseg5ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8mf2))) +void vsoxseg5ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8mf2_m))) +void vsoxseg5ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8mf4))) +void vsoxseg5ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8mf4_m))) +void vsoxseg5ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8mf8))) +void vsoxseg5ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i8mf8_m))) +void vsoxseg5ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8m1))) +void vsoxseg5ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8m1_m))) +void vsoxseg5ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8mf2))) +void vsoxseg5ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8mf2_m))) +void vsoxseg5ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8mf4))) +void vsoxseg5ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8mf4_m))) +void vsoxseg5ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8mf8))) +void vsoxseg5ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u8mf8_m))) +void vsoxseg5ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8m1))) +void vsoxseg6ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8m1_m))) +void vsoxseg6ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8mf2))) +void vsoxseg6ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8mf2_m))) +void vsoxseg6ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8mf4))) +void vsoxseg6ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8mf4_m))) +void vsoxseg6ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8mf8))) +void vsoxseg6ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i8mf8_m))) +void vsoxseg6ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8m1))) +void vsoxseg6ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8m1_m))) +void vsoxseg6ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8mf2))) +void vsoxseg6ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8mf2_m))) +void vsoxseg6ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8mf4))) +void vsoxseg6ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8mf4_m))) +void vsoxseg6ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8mf8))) +void vsoxseg6ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u8mf8_m))) +void vsoxseg6ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8m1))) +void vsoxseg7ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8m1_m))) +void vsoxseg7ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8mf2))) +void vsoxseg7ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8mf2_m))) +void vsoxseg7ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8mf4))) +void vsoxseg7ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8mf4_m))) +void vsoxseg7ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8mf8))) +void vsoxseg7ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i8mf8_m))) +void vsoxseg7ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8m1))) +void vsoxseg7ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8m1_m))) +void vsoxseg7ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8mf2))) +void vsoxseg7ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8mf2_m))) +void vsoxseg7ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8mf4))) +void vsoxseg7ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8mf4_m))) +void vsoxseg7ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8mf8))) +void vsoxseg7ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u8mf8_m))) +void vsoxseg7ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8m1))) +void vsoxseg8ei32(int8_t * op0, vuint32m4_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8m1_m))) +void vsoxseg8ei32(vbool8_t op0, int8_t * op1, vuint32m4_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8mf2))) +void vsoxseg8ei32(int8_t * op0, vuint32m2_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8mf2_m))) +void vsoxseg8ei32(vbool16_t op0, int8_t * op1, vuint32m2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8mf4))) +void vsoxseg8ei32(int8_t * op0, vuint32m1_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8mf4_m))) +void vsoxseg8ei32(vbool32_t op0, int8_t * op1, vuint32m1_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8mf8))) +void vsoxseg8ei32(int8_t * op0, vuint32mf2_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i8mf8_m))) +void vsoxseg8ei32(vbool64_t op0, int8_t * op1, vuint32mf2_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8m1))) +void vsoxseg8ei32(uint8_t * op0, vuint32m4_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8m1_m))) +void vsoxseg8ei32(vbool8_t op0, uint8_t * op1, vuint32m4_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8mf2))) +void vsoxseg8ei32(uint8_t * op0, vuint32m2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8mf2_m))) +void vsoxseg8ei32(vbool16_t op0, uint8_t * op1, vuint32m2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8mf4))) +void vsoxseg8ei32(uint8_t * op0, vuint32m1_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8mf4_m))) +void vsoxseg8ei32(vbool32_t op0, uint8_t * op1, vuint32m1_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8mf8))) +void vsoxseg8ei32(uint8_t * op0, vuint32mf2_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u8mf8_m))) +void vsoxseg8ei32(vbool64_t op0, uint8_t * op1, vuint32mf2_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8m1))) +void vsoxseg2ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8m1_m))) +void vsoxseg2ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8mf2))) +void vsoxseg2ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8mf2_m))) +void vsoxseg2ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8mf4))) +void vsoxseg2ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8mf4_m))) +void vsoxseg2ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8mf8))) +void vsoxseg2ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i8mf8_m))) +void vsoxseg2ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8m1))) +void vsoxseg2ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8m1_m))) +void vsoxseg2ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8mf2))) +void vsoxseg2ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8mf2_m))) +void vsoxseg2ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8mf4))) +void vsoxseg2ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8mf4_m))) +void vsoxseg2ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8mf8))) +void vsoxseg2ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u8mf8_m))) +void vsoxseg2ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8m1))) +void vsoxseg3ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8m1_m))) +void vsoxseg3ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8mf2))) +void vsoxseg3ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8mf2_m))) +void vsoxseg3ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8mf4))) +void vsoxseg3ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8mf4_m))) +void vsoxseg3ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8mf8))) +void vsoxseg3ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i8mf8_m))) +void vsoxseg3ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8m1))) +void vsoxseg3ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8m1_m))) +void vsoxseg3ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8mf2))) +void vsoxseg3ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8mf2_m))) +void vsoxseg3ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8mf4))) +void vsoxseg3ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8mf4_m))) +void vsoxseg3ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8mf8))) +void vsoxseg3ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u8mf8_m))) +void vsoxseg3ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8m1))) +void vsoxseg4ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8m1_m))) +void vsoxseg4ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8mf2))) +void vsoxseg4ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8mf2_m))) +void vsoxseg4ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8mf4))) +void vsoxseg4ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8mf4_m))) +void vsoxseg4ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8mf8))) +void vsoxseg4ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i8mf8_m))) +void vsoxseg4ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8m1))) +void vsoxseg4ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8m1_m))) +void vsoxseg4ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8mf2))) +void vsoxseg4ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8mf2_m))) +void vsoxseg4ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8mf4))) +void vsoxseg4ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8mf4_m))) +void vsoxseg4ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8mf8))) +void vsoxseg4ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u8mf8_m))) +void vsoxseg4ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8m1))) +void vsoxseg5ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8m1_m))) +void vsoxseg5ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8mf2))) +void vsoxseg5ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8mf2_m))) +void vsoxseg5ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8mf4))) +void vsoxseg5ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8mf4_m))) +void vsoxseg5ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8mf8))) +void vsoxseg5ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i8mf8_m))) +void vsoxseg5ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8m1))) +void vsoxseg5ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8m1_m))) +void vsoxseg5ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8mf2))) +void vsoxseg5ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8mf2_m))) +void vsoxseg5ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8mf4))) +void vsoxseg5ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8mf4_m))) +void vsoxseg5ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8mf8))) +void vsoxseg5ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u8mf8_m))) +void vsoxseg5ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8m1))) +void vsoxseg6ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8m1_m))) +void vsoxseg6ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8mf2))) +void vsoxseg6ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8mf2_m))) +void vsoxseg6ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8mf4))) +void vsoxseg6ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8mf4_m))) +void vsoxseg6ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8mf8))) +void vsoxseg6ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i8mf8_m))) +void vsoxseg6ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8m1))) +void vsoxseg6ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8m1_m))) +void vsoxseg6ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8mf2))) +void vsoxseg6ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8mf2_m))) +void vsoxseg6ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8mf4))) +void vsoxseg6ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8mf4_m))) +void vsoxseg6ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8mf8))) +void vsoxseg6ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u8mf8_m))) +void vsoxseg6ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8m1))) +void vsoxseg7ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8m1_m))) +void vsoxseg7ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8mf2))) +void vsoxseg7ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8mf2_m))) +void vsoxseg7ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8mf4))) +void vsoxseg7ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8mf4_m))) +void vsoxseg7ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8mf8))) +void vsoxseg7ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i8mf8_m))) +void vsoxseg7ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8m1))) +void vsoxseg7ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8m1_m))) +void vsoxseg7ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8mf2))) +void vsoxseg7ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8mf2_m))) +void vsoxseg7ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8mf4))) +void vsoxseg7ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8mf4_m))) +void vsoxseg7ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8mf8))) +void vsoxseg7ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u8mf8_m))) +void vsoxseg7ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8m1))) +void vsoxseg8ei64(int8_t * op0, vuint64m8_t op1, vint8m1_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8m1_m))) +void vsoxseg8ei64(vbool8_t op0, int8_t * op1, vuint64m8_t op2, vint8m1_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8mf2))) +void vsoxseg8ei64(int8_t * op0, vuint64m4_t op1, vint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8mf2_m))) +void vsoxseg8ei64(vbool16_t op0, int8_t * op1, vuint64m4_t op2, vint8mf2_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8mf4))) +void vsoxseg8ei64(int8_t * op0, vuint64m2_t op1, vint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8mf4_m))) +void vsoxseg8ei64(vbool32_t op0, int8_t * op1, vuint64m2_t op2, vint8mf4_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8mf8))) +void vsoxseg8ei64(int8_t * op0, vuint64m1_t op1, vint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i8mf8_m))) +void vsoxseg8ei64(vbool64_t op0, int8_t * op1, vuint64m1_t op2, vint8mf8_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8m1))) +void vsoxseg8ei64(uint8_t * op0, vuint64m8_t op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8m1_m))) +void vsoxseg8ei64(vbool8_t op0, uint8_t * op1, vuint64m8_t op2, vuint8m1_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8mf2))) +void vsoxseg8ei64(uint8_t * op0, vuint64m4_t op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8mf2_m))) +void vsoxseg8ei64(vbool16_t op0, uint8_t * op1, vuint64m4_t op2, vuint8mf2_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8mf4))) +void vsoxseg8ei64(uint8_t * op0, vuint64m2_t op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8mf4_m))) +void vsoxseg8ei64(vbool32_t op0, uint8_t * op1, vuint64m2_t op2, vuint8mf4_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8mf8))) +void vsoxseg8ei64(uint8_t * op0, vuint64m1_t op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u8mf8_m))) +void vsoxseg8ei64(vbool64_t op0, uint8_t * op1, vuint64m1_t op2, vuint8mf8_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16m1))) +void vsoxseg2ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16m1_m))) +void vsoxseg2ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16m2))) +void vsoxseg2ei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16m2_m))) +void vsoxseg2ei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16m4))) +void vsoxseg2ei8(int16_t * op0, vuint8m2_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16m4_m))) +void vsoxseg2ei8(vbool4_t op0, int16_t * op1, vuint8m2_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16mf2))) +void vsoxseg2ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16mf2_m))) +void vsoxseg2ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16mf4))) +void vsoxseg2ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i16mf4_m))) +void vsoxseg2ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16m1))) +void vsoxseg2ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16m1_m))) +void vsoxseg2ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16m2))) +void vsoxseg2ei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16m2_m))) +void vsoxseg2ei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16m4))) +void vsoxseg2ei8(uint16_t * op0, vuint8m2_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16m4_m))) +void vsoxseg2ei8(vbool4_t op0, uint16_t * op1, vuint8m2_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16mf2))) +void vsoxseg2ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16mf2_m))) +void vsoxseg2ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16mf4))) +void vsoxseg2ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u16mf4_m))) +void vsoxseg2ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16m1))) +void vsoxseg3ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16m1_m))) +void vsoxseg3ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16m2))) +void vsoxseg3ei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16m2_m))) +void vsoxseg3ei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16mf2))) +void vsoxseg3ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16mf2_m))) +void vsoxseg3ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16mf4))) +void vsoxseg3ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i16mf4_m))) +void vsoxseg3ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16m1))) +void vsoxseg3ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16m1_m))) +void vsoxseg3ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16m2))) +void vsoxseg3ei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16m2_m))) +void vsoxseg3ei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16mf2))) +void vsoxseg3ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16mf2_m))) +void vsoxseg3ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16mf4))) +void vsoxseg3ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u16mf4_m))) +void vsoxseg3ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16m1))) +void vsoxseg4ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16m1_m))) +void vsoxseg4ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16m2))) +void vsoxseg4ei8(int16_t * op0, vuint8m1_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16m2_m))) +void vsoxseg4ei8(vbool8_t op0, int16_t * op1, vuint8m1_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16mf2))) +void vsoxseg4ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16mf2_m))) +void vsoxseg4ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16mf4))) +void vsoxseg4ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i16mf4_m))) +void vsoxseg4ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16m1))) +void vsoxseg4ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16m1_m))) +void vsoxseg4ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16m2))) +void vsoxseg4ei8(uint16_t * op0, vuint8m1_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16m2_m))) +void vsoxseg4ei8(vbool8_t op0, uint16_t * op1, vuint8m1_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16mf2))) +void vsoxseg4ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16mf2_m))) +void vsoxseg4ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16mf4))) +void vsoxseg4ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u16mf4_m))) +void vsoxseg4ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i16m1))) +void vsoxseg5ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i16m1_m))) +void vsoxseg5ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i16mf2))) +void vsoxseg5ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i16mf2_m))) +void vsoxseg5ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i16mf4))) +void vsoxseg5ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i16mf4_m))) +void vsoxseg5ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u16m1))) +void vsoxseg5ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u16m1_m))) +void vsoxseg5ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u16mf2))) +void vsoxseg5ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u16mf2_m))) +void vsoxseg5ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u16mf4))) +void vsoxseg5ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u16mf4_m))) +void vsoxseg5ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i16m1))) +void vsoxseg6ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i16m1_m))) +void vsoxseg6ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i16mf2))) +void vsoxseg6ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i16mf2_m))) +void vsoxseg6ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i16mf4))) +void vsoxseg6ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i16mf4_m))) +void vsoxseg6ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u16m1))) +void vsoxseg6ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u16m1_m))) +void vsoxseg6ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u16mf2))) +void vsoxseg6ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u16mf2_m))) +void vsoxseg6ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u16mf4))) +void vsoxseg6ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u16mf4_m))) +void vsoxseg6ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i16m1))) +void vsoxseg7ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i16m1_m))) +void vsoxseg7ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i16mf2))) +void vsoxseg7ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i16mf2_m))) +void vsoxseg7ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i16mf4))) +void vsoxseg7ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i16mf4_m))) +void vsoxseg7ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u16m1))) +void vsoxseg7ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u16m1_m))) +void vsoxseg7ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u16mf2))) +void vsoxseg7ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u16mf2_m))) +void vsoxseg7ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u16mf4))) +void vsoxseg7ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u16mf4_m))) +void vsoxseg7ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i16m1))) +void vsoxseg8ei8(int16_t * op0, vuint8mf2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i16m1_m))) +void vsoxseg8ei8(vbool16_t op0, int16_t * op1, vuint8mf2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i16mf2))) +void vsoxseg8ei8(int16_t * op0, vuint8mf4_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i16mf2_m))) +void vsoxseg8ei8(vbool32_t op0, int16_t * op1, vuint8mf4_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i16mf4))) +void vsoxseg8ei8(int16_t * op0, vuint8mf8_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i16mf4_m))) +void vsoxseg8ei8(vbool64_t op0, int16_t * op1, vuint8mf8_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u16m1))) +void vsoxseg8ei8(uint16_t * op0, vuint8mf2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u16m1_m))) +void vsoxseg8ei8(vbool16_t op0, uint16_t * op1, vuint8mf2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u16mf2))) +void vsoxseg8ei8(uint16_t * op0, vuint8mf4_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u16mf2_m))) +void vsoxseg8ei8(vbool32_t op0, uint16_t * op1, vuint8mf4_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u16mf4))) +void vsoxseg8ei8(uint16_t * op0, vuint8mf8_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u16mf4_m))) +void vsoxseg8ei8(vbool64_t op0, uint16_t * op1, vuint8mf8_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16m1))) +void vsoxseg2ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16m1_m))) +void vsoxseg2ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16m2))) +void vsoxseg2ei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16m2_m))) +void vsoxseg2ei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16m4))) +void vsoxseg2ei16(int16_t * op0, vuint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16m4_m))) +void vsoxseg2ei16(vbool4_t op0, int16_t * op1, vuint16m4_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16mf2))) +void vsoxseg2ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16mf2_m))) +void vsoxseg2ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16mf4))) +void vsoxseg2ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i16mf4_m))) +void vsoxseg2ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16m1))) +void vsoxseg2ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16m1_m))) +void vsoxseg2ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16m2))) +void vsoxseg2ei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16m2_m))) +void vsoxseg2ei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16m4))) +void vsoxseg2ei16(uint16_t * op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16m4_m))) +void vsoxseg2ei16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16mf2))) +void vsoxseg2ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16mf2_m))) +void vsoxseg2ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16mf4))) +void vsoxseg2ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u16mf4_m))) +void vsoxseg2ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16m1))) +void vsoxseg3ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16m1_m))) +void vsoxseg3ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16m2))) +void vsoxseg3ei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16m2_m))) +void vsoxseg3ei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16mf2))) +void vsoxseg3ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16mf2_m))) +void vsoxseg3ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16mf4))) +void vsoxseg3ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i16mf4_m))) +void vsoxseg3ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16m1))) +void vsoxseg3ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16m1_m))) +void vsoxseg3ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16m2))) +void vsoxseg3ei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16m2_m))) +void vsoxseg3ei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16mf2))) +void vsoxseg3ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16mf2_m))) +void vsoxseg3ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16mf4))) +void vsoxseg3ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u16mf4_m))) +void vsoxseg3ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16m1))) +void vsoxseg4ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16m1_m))) +void vsoxseg4ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16m2))) +void vsoxseg4ei16(int16_t * op0, vuint16m2_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16m2_m))) +void vsoxseg4ei16(vbool8_t op0, int16_t * op1, vuint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16mf2))) +void vsoxseg4ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16mf2_m))) +void vsoxseg4ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16mf4))) +void vsoxseg4ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i16mf4_m))) +void vsoxseg4ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16m1))) +void vsoxseg4ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16m1_m))) +void vsoxseg4ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16m2))) +void vsoxseg4ei16(uint16_t * op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16m2_m))) +void vsoxseg4ei16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16mf2))) +void vsoxseg4ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16mf2_m))) +void vsoxseg4ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16mf4))) +void vsoxseg4ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u16mf4_m))) +void vsoxseg4ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i16m1))) +void vsoxseg5ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i16m1_m))) +void vsoxseg5ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i16mf2))) +void vsoxseg5ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i16mf2_m))) +void vsoxseg5ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i16mf4))) +void vsoxseg5ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i16mf4_m))) +void vsoxseg5ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u16m1))) +void vsoxseg5ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u16m1_m))) +void vsoxseg5ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u16mf2))) +void vsoxseg5ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u16mf2_m))) +void vsoxseg5ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u16mf4))) +void vsoxseg5ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u16mf4_m))) +void vsoxseg5ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i16m1))) +void vsoxseg6ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i16m1_m))) +void vsoxseg6ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i16mf2))) +void vsoxseg6ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i16mf2_m))) +void vsoxseg6ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i16mf4))) +void vsoxseg6ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i16mf4_m))) +void vsoxseg6ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u16m1))) +void vsoxseg6ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u16m1_m))) +void vsoxseg6ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u16mf2))) +void vsoxseg6ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u16mf2_m))) +void vsoxseg6ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u16mf4))) +void vsoxseg6ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u16mf4_m))) +void vsoxseg6ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i16m1))) +void vsoxseg7ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i16m1_m))) +void vsoxseg7ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i16mf2))) +void vsoxseg7ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i16mf2_m))) +void vsoxseg7ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i16mf4))) +void vsoxseg7ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i16mf4_m))) +void vsoxseg7ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u16m1))) +void vsoxseg7ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u16m1_m))) +void vsoxseg7ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u16mf2))) +void vsoxseg7ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u16mf2_m))) +void vsoxseg7ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u16mf4))) +void vsoxseg7ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u16mf4_m))) +void vsoxseg7ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i16m1))) +void vsoxseg8ei16(int16_t * op0, vuint16m1_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i16m1_m))) +void vsoxseg8ei16(vbool16_t op0, int16_t * op1, vuint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i16mf2))) +void vsoxseg8ei16(int16_t * op0, vuint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i16mf2_m))) +void vsoxseg8ei16(vbool32_t op0, int16_t * op1, vuint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i16mf4))) +void vsoxseg8ei16(int16_t * op0, vuint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i16mf4_m))) +void vsoxseg8ei16(vbool64_t op0, int16_t * op1, vuint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u16m1))) +void vsoxseg8ei16(uint16_t * op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u16m1_m))) +void vsoxseg8ei16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u16mf2))) +void vsoxseg8ei16(uint16_t * op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u16mf2_m))) +void vsoxseg8ei16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u16mf4))) +void vsoxseg8ei16(uint16_t * op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u16mf4_m))) +void vsoxseg8ei16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16m1))) +void vsoxseg2ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16m1_m))) +void vsoxseg2ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16m2))) +void vsoxseg2ei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16m2_m))) +void vsoxseg2ei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16m4))) +void vsoxseg2ei32(int16_t * op0, vuint32m8_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16m4_m))) +void vsoxseg2ei32(vbool4_t op0, int16_t * op1, vuint32m8_t op2, vint16m4_t op3, vint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16mf2))) +void vsoxseg2ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16mf2_m))) +void vsoxseg2ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16mf4))) +void vsoxseg2ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i16mf4_m))) +void vsoxseg2ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16m1))) +void vsoxseg2ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16m1_m))) +void vsoxseg2ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16m2))) +void vsoxseg2ei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16m2_m))) +void vsoxseg2ei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16m4))) +void vsoxseg2ei32(uint16_t * op0, vuint32m8_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16m4_m))) +void vsoxseg2ei32(vbool4_t op0, uint16_t * op1, vuint32m8_t op2, vuint16m4_t op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16mf2))) +void vsoxseg2ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16mf2_m))) +void vsoxseg2ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16mf4))) +void vsoxseg2ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u16mf4_m))) +void vsoxseg2ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16m1))) +void vsoxseg3ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16m1_m))) +void vsoxseg3ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16m2))) +void vsoxseg3ei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16m2_m))) +void vsoxseg3ei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16mf2))) +void vsoxseg3ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16mf2_m))) +void vsoxseg3ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16mf4))) +void vsoxseg3ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i16mf4_m))) +void vsoxseg3ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16m1))) +void vsoxseg3ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16m1_m))) +void vsoxseg3ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16m2))) +void vsoxseg3ei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16m2_m))) +void vsoxseg3ei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16mf2))) +void vsoxseg3ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16mf2_m))) +void vsoxseg3ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16mf4))) +void vsoxseg3ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u16mf4_m))) +void vsoxseg3ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16m1))) +void vsoxseg4ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16m1_m))) +void vsoxseg4ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16m2))) +void vsoxseg4ei32(int16_t * op0, vuint32m4_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16m2_m))) +void vsoxseg4ei32(vbool8_t op0, int16_t * op1, vuint32m4_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16mf2))) +void vsoxseg4ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16mf2_m))) +void vsoxseg4ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16mf4))) +void vsoxseg4ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i16mf4_m))) +void vsoxseg4ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16m1))) +void vsoxseg4ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16m1_m))) +void vsoxseg4ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16m2))) +void vsoxseg4ei32(uint16_t * op0, vuint32m4_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16m2_m))) +void vsoxseg4ei32(vbool8_t op0, uint16_t * op1, vuint32m4_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16mf2))) +void vsoxseg4ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16mf2_m))) +void vsoxseg4ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16mf4))) +void vsoxseg4ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u16mf4_m))) +void vsoxseg4ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i16m1))) +void vsoxseg5ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i16m1_m))) +void vsoxseg5ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i16mf2))) +void vsoxseg5ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i16mf2_m))) +void vsoxseg5ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i16mf4))) +void vsoxseg5ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i16mf4_m))) +void vsoxseg5ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u16m1))) +void vsoxseg5ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u16m1_m))) +void vsoxseg5ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u16mf2))) +void vsoxseg5ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u16mf2_m))) +void vsoxseg5ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u16mf4))) +void vsoxseg5ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u16mf4_m))) +void vsoxseg5ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i16m1))) +void vsoxseg6ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i16m1_m))) +void vsoxseg6ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i16mf2))) +void vsoxseg6ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i16mf2_m))) +void vsoxseg6ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i16mf4))) +void vsoxseg6ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i16mf4_m))) +void vsoxseg6ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u16m1))) +void vsoxseg6ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u16m1_m))) +void vsoxseg6ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u16mf2))) +void vsoxseg6ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u16mf2_m))) +void vsoxseg6ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u16mf4))) +void vsoxseg6ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u16mf4_m))) +void vsoxseg6ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i16m1))) +void vsoxseg7ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i16m1_m))) +void vsoxseg7ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i16mf2))) +void vsoxseg7ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i16mf2_m))) +void vsoxseg7ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i16mf4))) +void vsoxseg7ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i16mf4_m))) +void vsoxseg7ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u16m1))) +void vsoxseg7ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u16m1_m))) +void vsoxseg7ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u16mf2))) +void vsoxseg7ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u16mf2_m))) +void vsoxseg7ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u16mf4))) +void vsoxseg7ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u16mf4_m))) +void vsoxseg7ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i16m1))) +void vsoxseg8ei32(int16_t * op0, vuint32m2_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i16m1_m))) +void vsoxseg8ei32(vbool16_t op0, int16_t * op1, vuint32m2_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i16mf2))) +void vsoxseg8ei32(int16_t * op0, vuint32m1_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i16mf2_m))) +void vsoxseg8ei32(vbool32_t op0, int16_t * op1, vuint32m1_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i16mf4))) +void vsoxseg8ei32(int16_t * op0, vuint32mf2_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i16mf4_m))) +void vsoxseg8ei32(vbool64_t op0, int16_t * op1, vuint32mf2_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u16m1))) +void vsoxseg8ei32(uint16_t * op0, vuint32m2_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u16m1_m))) +void vsoxseg8ei32(vbool16_t op0, uint16_t * op1, vuint32m2_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u16mf2))) +void vsoxseg8ei32(uint16_t * op0, vuint32m1_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u16mf2_m))) +void vsoxseg8ei32(vbool32_t op0, uint16_t * op1, vuint32m1_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u16mf4))) +void vsoxseg8ei32(uint16_t * op0, vuint32mf2_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u16mf4_m))) +void vsoxseg8ei32(vbool64_t op0, uint16_t * op1, vuint32mf2_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16m1))) +void vsoxseg2ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16m1_m))) +void vsoxseg2ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16m2))) +void vsoxseg2ei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16m2_m))) +void vsoxseg2ei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16mf2))) +void vsoxseg2ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16mf2_m))) +void vsoxseg2ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16mf4))) +void vsoxseg2ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i16mf4_m))) +void vsoxseg2ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16m1))) +void vsoxseg2ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16m1_m))) +void vsoxseg2ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16m2))) +void vsoxseg2ei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16m2_m))) +void vsoxseg2ei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16mf2))) +void vsoxseg2ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16mf2_m))) +void vsoxseg2ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16mf4))) +void vsoxseg2ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u16mf4_m))) +void vsoxseg2ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16m1))) +void vsoxseg3ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16m1_m))) +void vsoxseg3ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16m2))) +void vsoxseg3ei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16m2_m))) +void vsoxseg3ei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16mf2))) +void vsoxseg3ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16mf2_m))) +void vsoxseg3ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16mf4))) +void vsoxseg3ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i16mf4_m))) +void vsoxseg3ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16m1))) +void vsoxseg3ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16m1_m))) +void vsoxseg3ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16m2))) +void vsoxseg3ei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16m2_m))) +void vsoxseg3ei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16mf2))) +void vsoxseg3ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16mf2_m))) +void vsoxseg3ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16mf4))) +void vsoxseg3ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u16mf4_m))) +void vsoxseg3ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16m1))) +void vsoxseg4ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16m1_m))) +void vsoxseg4ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16m2))) +void vsoxseg4ei64(int16_t * op0, vuint64m8_t op1, vint16m2_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16m2_m))) +void vsoxseg4ei64(vbool8_t op0, int16_t * op1, vuint64m8_t op2, vint16m2_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16mf2))) +void vsoxseg4ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16mf2_m))) +void vsoxseg4ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16mf4))) +void vsoxseg4ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i16mf4_m))) +void vsoxseg4ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16m1))) +void vsoxseg4ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16m1_m))) +void vsoxseg4ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16m2))) +void vsoxseg4ei64(uint16_t * op0, vuint64m8_t op1, vuint16m2_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16m2_m))) +void vsoxseg4ei64(vbool8_t op0, uint16_t * op1, vuint64m8_t op2, vuint16m2_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16mf2))) +void vsoxseg4ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16mf2_m))) +void vsoxseg4ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16mf4))) +void vsoxseg4ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u16mf4_m))) +void vsoxseg4ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i16m1))) +void vsoxseg5ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i16m1_m))) +void vsoxseg5ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i16mf2))) +void vsoxseg5ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i16mf2_m))) +void vsoxseg5ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i16mf4))) +void vsoxseg5ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i16mf4_m))) +void vsoxseg5ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u16m1))) +void vsoxseg5ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u16m1_m))) +void vsoxseg5ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u16mf2))) +void vsoxseg5ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u16mf2_m))) +void vsoxseg5ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u16mf4))) +void vsoxseg5ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u16mf4_m))) +void vsoxseg5ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i16m1))) +void vsoxseg6ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i16m1_m))) +void vsoxseg6ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i16mf2))) +void vsoxseg6ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i16mf2_m))) +void vsoxseg6ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i16mf4))) +void vsoxseg6ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i16mf4_m))) +void vsoxseg6ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u16m1))) +void vsoxseg6ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u16m1_m))) +void vsoxseg6ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u16mf2))) +void vsoxseg6ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u16mf2_m))) +void vsoxseg6ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u16mf4))) +void vsoxseg6ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u16mf4_m))) +void vsoxseg6ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i16m1))) +void vsoxseg7ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i16m1_m))) +void vsoxseg7ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i16mf2))) +void vsoxseg7ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i16mf2_m))) +void vsoxseg7ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i16mf4))) +void vsoxseg7ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i16mf4_m))) +void vsoxseg7ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u16m1))) +void vsoxseg7ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u16m1_m))) +void vsoxseg7ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u16mf2))) +void vsoxseg7ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u16mf2_m))) +void vsoxseg7ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u16mf4))) +void vsoxseg7ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u16mf4_m))) +void vsoxseg7ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i16m1))) +void vsoxseg8ei64(int16_t * op0, vuint64m4_t op1, vint16m1_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i16m1_m))) +void vsoxseg8ei64(vbool16_t op0, int16_t * op1, vuint64m4_t op2, vint16m1_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i16mf2))) +void vsoxseg8ei64(int16_t * op0, vuint64m2_t op1, vint16mf2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i16mf2_m))) +void vsoxseg8ei64(vbool32_t op0, int16_t * op1, vuint64m2_t op2, vint16mf2_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i16mf4))) +void vsoxseg8ei64(int16_t * op0, vuint64m1_t op1, vint16mf4_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i16mf4_m))) +void vsoxseg8ei64(vbool64_t op0, int16_t * op1, vuint64m1_t op2, vint16mf4_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u16m1))) +void vsoxseg8ei64(uint16_t * op0, vuint64m4_t op1, vuint16m1_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u16m1_m))) +void vsoxseg8ei64(vbool16_t op0, uint16_t * op1, vuint64m4_t op2, vuint16m1_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u16mf2))) +void vsoxseg8ei64(uint16_t * op0, vuint64m2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u16mf2_m))) +void vsoxseg8ei64(vbool32_t op0, uint16_t * op1, vuint64m2_t op2, vuint16mf2_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u16mf4))) +void vsoxseg8ei64(uint16_t * op0, vuint64m1_t op1, vuint16mf4_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u16mf4_m))) +void vsoxseg8ei64(vbool64_t op0, uint16_t * op1, vuint64m1_t op2, vuint16mf4_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32m1))) +void vsoxseg2ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32m1_m))) +void vsoxseg2ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32m2))) +void vsoxseg2ei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32m2_m))) +void vsoxseg2ei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32m4))) +void vsoxseg2ei8(int32_t * op0, vuint8m1_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32m4_m))) +void vsoxseg2ei8(vbool8_t op0, int32_t * op1, vuint8m1_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32mf2))) +void vsoxseg2ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i32mf2_m))) +void vsoxseg2ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32m1))) +void vsoxseg2ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32m1_m))) +void vsoxseg2ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32m2))) +void vsoxseg2ei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32m2_m))) +void vsoxseg2ei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32m4))) +void vsoxseg2ei8(uint32_t * op0, vuint8m1_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32m4_m))) +void vsoxseg2ei8(vbool8_t op0, uint32_t * op1, vuint8m1_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32mf2))) +void vsoxseg2ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u32mf2_m))) +void vsoxseg2ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i32m1))) +void vsoxseg3ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i32m1_m))) +void vsoxseg3ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i32m2))) +void vsoxseg3ei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i32m2_m))) +void vsoxseg3ei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i32mf2))) +void vsoxseg3ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i32mf2_m))) +void vsoxseg3ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u32m1))) +void vsoxseg3ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u32m1_m))) +void vsoxseg3ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u32m2))) +void vsoxseg3ei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u32m2_m))) +void vsoxseg3ei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u32mf2))) +void vsoxseg3ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u32mf2_m))) +void vsoxseg3ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i32m1))) +void vsoxseg4ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i32m1_m))) +void vsoxseg4ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i32m2))) +void vsoxseg4ei8(int32_t * op0, vuint8mf2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i32m2_m))) +void vsoxseg4ei8(vbool16_t op0, int32_t * op1, vuint8mf2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i32mf2))) +void vsoxseg4ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i32mf2_m))) +void vsoxseg4ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u32m1))) +void vsoxseg4ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u32m1_m))) +void vsoxseg4ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u32m2))) +void vsoxseg4ei8(uint32_t * op0, vuint8mf2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u32m2_m))) +void vsoxseg4ei8(vbool16_t op0, uint32_t * op1, vuint8mf2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u32mf2))) +void vsoxseg4ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u32mf2_m))) +void vsoxseg4ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i32m1))) +void vsoxseg5ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i32m1_m))) +void vsoxseg5ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i32mf2))) +void vsoxseg5ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i32mf2_m))) +void vsoxseg5ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u32m1))) +void vsoxseg5ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u32m1_m))) +void vsoxseg5ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u32mf2))) +void vsoxseg5ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u32mf2_m))) +void vsoxseg5ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i32m1))) +void vsoxseg6ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i32m1_m))) +void vsoxseg6ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i32mf2))) +void vsoxseg6ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i32mf2_m))) +void vsoxseg6ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u32m1))) +void vsoxseg6ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u32m1_m))) +void vsoxseg6ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u32mf2))) +void vsoxseg6ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u32mf2_m))) +void vsoxseg6ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i32m1))) +void vsoxseg7ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i32m1_m))) +void vsoxseg7ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i32mf2))) +void vsoxseg7ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i32mf2_m))) +void vsoxseg7ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u32m1))) +void vsoxseg7ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u32m1_m))) +void vsoxseg7ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u32mf2))) +void vsoxseg7ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u32mf2_m))) +void vsoxseg7ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i32m1))) +void vsoxseg8ei8(int32_t * op0, vuint8mf4_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i32m1_m))) +void vsoxseg8ei8(vbool32_t op0, int32_t * op1, vuint8mf4_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i32mf2))) +void vsoxseg8ei8(int32_t * op0, vuint8mf8_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i32mf2_m))) +void vsoxseg8ei8(vbool64_t op0, int32_t * op1, vuint8mf8_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u32m1))) +void vsoxseg8ei8(uint32_t * op0, vuint8mf4_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u32m1_m))) +void vsoxseg8ei8(vbool32_t op0, uint32_t * op1, vuint8mf4_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u32mf2))) +void vsoxseg8ei8(uint32_t * op0, vuint8mf8_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u32mf2_m))) +void vsoxseg8ei8(vbool64_t op0, uint32_t * op1, vuint8mf8_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32m1))) +void vsoxseg2ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32m1_m))) +void vsoxseg2ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32m2))) +void vsoxseg2ei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32m2_m))) +void vsoxseg2ei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32m4))) +void vsoxseg2ei16(int32_t * op0, vuint16m2_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32m4_m))) +void vsoxseg2ei16(vbool8_t op0, int32_t * op1, vuint16m2_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32mf2))) +void vsoxseg2ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i32mf2_m))) +void vsoxseg2ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32m1))) +void vsoxseg2ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32m1_m))) +void vsoxseg2ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32m2))) +void vsoxseg2ei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32m2_m))) +void vsoxseg2ei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32m4))) +void vsoxseg2ei16(uint32_t * op0, vuint16m2_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32m4_m))) +void vsoxseg2ei16(vbool8_t op0, uint32_t * op1, vuint16m2_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32mf2))) +void vsoxseg2ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u32mf2_m))) +void vsoxseg2ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i32m1))) +void vsoxseg3ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i32m1_m))) +void vsoxseg3ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i32m2))) +void vsoxseg3ei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i32m2_m))) +void vsoxseg3ei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i32mf2))) +void vsoxseg3ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i32mf2_m))) +void vsoxseg3ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u32m1))) +void vsoxseg3ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u32m1_m))) +void vsoxseg3ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u32m2))) +void vsoxseg3ei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u32m2_m))) +void vsoxseg3ei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u32mf2))) +void vsoxseg3ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u32mf2_m))) +void vsoxseg3ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i32m1))) +void vsoxseg4ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i32m1_m))) +void vsoxseg4ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i32m2))) +void vsoxseg4ei16(int32_t * op0, vuint16m1_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i32m2_m))) +void vsoxseg4ei16(vbool16_t op0, int32_t * op1, vuint16m1_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i32mf2))) +void vsoxseg4ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i32mf2_m))) +void vsoxseg4ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u32m1))) +void vsoxseg4ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u32m1_m))) +void vsoxseg4ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u32m2))) +void vsoxseg4ei16(uint32_t * op0, vuint16m1_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u32m2_m))) +void vsoxseg4ei16(vbool16_t op0, uint32_t * op1, vuint16m1_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u32mf2))) +void vsoxseg4ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u32mf2_m))) +void vsoxseg4ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i32m1))) +void vsoxseg5ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i32m1_m))) +void vsoxseg5ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i32mf2))) +void vsoxseg5ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i32mf2_m))) +void vsoxseg5ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u32m1))) +void vsoxseg5ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u32m1_m))) +void vsoxseg5ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u32mf2))) +void vsoxseg5ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u32mf2_m))) +void vsoxseg5ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i32m1))) +void vsoxseg6ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i32m1_m))) +void vsoxseg6ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i32mf2))) +void vsoxseg6ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i32mf2_m))) +void vsoxseg6ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u32m1))) +void vsoxseg6ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u32m1_m))) +void vsoxseg6ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u32mf2))) +void vsoxseg6ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u32mf2_m))) +void vsoxseg6ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i32m1))) +void vsoxseg7ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i32m1_m))) +void vsoxseg7ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i32mf2))) +void vsoxseg7ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i32mf2_m))) +void vsoxseg7ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u32m1))) +void vsoxseg7ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u32m1_m))) +void vsoxseg7ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u32mf2))) +void vsoxseg7ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u32mf2_m))) +void vsoxseg7ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i32m1))) +void vsoxseg8ei16(int32_t * op0, vuint16mf2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i32m1_m))) +void vsoxseg8ei16(vbool32_t op0, int32_t * op1, vuint16mf2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i32mf2))) +void vsoxseg8ei16(int32_t * op0, vuint16mf4_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i32mf2_m))) +void vsoxseg8ei16(vbool64_t op0, int32_t * op1, vuint16mf4_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u32m1))) +void vsoxseg8ei16(uint32_t * op0, vuint16mf2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u32m1_m))) +void vsoxseg8ei16(vbool32_t op0, uint32_t * op1, vuint16mf2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u32mf2))) +void vsoxseg8ei16(uint32_t * op0, vuint16mf4_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u32mf2_m))) +void vsoxseg8ei16(vbool64_t op0, uint32_t * op1, vuint16mf4_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32m1))) +void vsoxseg2ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32m1_m))) +void vsoxseg2ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32m2))) +void vsoxseg2ei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32m2_m))) +void vsoxseg2ei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32m4))) +void vsoxseg2ei32(int32_t * op0, vuint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32m4_m))) +void vsoxseg2ei32(vbool8_t op0, int32_t * op1, vuint32m4_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32mf2))) +void vsoxseg2ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i32mf2_m))) +void vsoxseg2ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32m1))) +void vsoxseg2ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32m1_m))) +void vsoxseg2ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32m2))) +void vsoxseg2ei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32m2_m))) +void vsoxseg2ei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32m4))) +void vsoxseg2ei32(uint32_t * op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32m4_m))) +void vsoxseg2ei32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32mf2))) +void vsoxseg2ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u32mf2_m))) +void vsoxseg2ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i32m1))) +void vsoxseg3ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i32m1_m))) +void vsoxseg3ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i32m2))) +void vsoxseg3ei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i32m2_m))) +void vsoxseg3ei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i32mf2))) +void vsoxseg3ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i32mf2_m))) +void vsoxseg3ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u32m1))) +void vsoxseg3ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u32m1_m))) +void vsoxseg3ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u32m2))) +void vsoxseg3ei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u32m2_m))) +void vsoxseg3ei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u32mf2))) +void vsoxseg3ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u32mf2_m))) +void vsoxseg3ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i32m1))) +void vsoxseg4ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i32m1_m))) +void vsoxseg4ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i32m2))) +void vsoxseg4ei32(int32_t * op0, vuint32m2_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i32m2_m))) +void vsoxseg4ei32(vbool16_t op0, int32_t * op1, vuint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i32mf2))) +void vsoxseg4ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i32mf2_m))) +void vsoxseg4ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u32m1))) +void vsoxseg4ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u32m1_m))) +void vsoxseg4ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u32m2))) +void vsoxseg4ei32(uint32_t * op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u32m2_m))) +void vsoxseg4ei32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u32mf2))) +void vsoxseg4ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u32mf2_m))) +void vsoxseg4ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i32m1))) +void vsoxseg5ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i32m1_m))) +void vsoxseg5ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i32mf2))) +void vsoxseg5ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i32mf2_m))) +void vsoxseg5ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u32m1))) +void vsoxseg5ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u32m1_m))) +void vsoxseg5ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u32mf2))) +void vsoxseg5ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u32mf2_m))) +void vsoxseg5ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i32m1))) +void vsoxseg6ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i32m1_m))) +void vsoxseg6ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i32mf2))) +void vsoxseg6ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i32mf2_m))) +void vsoxseg6ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u32m1))) +void vsoxseg6ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u32m1_m))) +void vsoxseg6ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u32mf2))) +void vsoxseg6ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u32mf2_m))) +void vsoxseg6ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i32m1))) +void vsoxseg7ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i32m1_m))) +void vsoxseg7ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i32mf2))) +void vsoxseg7ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i32mf2_m))) +void vsoxseg7ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u32m1))) +void vsoxseg7ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u32m1_m))) +void vsoxseg7ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u32mf2))) +void vsoxseg7ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u32mf2_m))) +void vsoxseg7ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i32m1))) +void vsoxseg8ei32(int32_t * op0, vuint32m1_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i32m1_m))) +void vsoxseg8ei32(vbool32_t op0, int32_t * op1, vuint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i32mf2))) +void vsoxseg8ei32(int32_t * op0, vuint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i32mf2_m))) +void vsoxseg8ei32(vbool64_t op0, int32_t * op1, vuint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u32m1))) +void vsoxseg8ei32(uint32_t * op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u32m1_m))) +void vsoxseg8ei32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u32mf2))) +void vsoxseg8ei32(uint32_t * op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u32mf2_m))) +void vsoxseg8ei32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32m1))) +void vsoxseg2ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32m1_m))) +void vsoxseg2ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32m2))) +void vsoxseg2ei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32m2_m))) +void vsoxseg2ei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32m4))) +void vsoxseg2ei64(int32_t * op0, vuint64m8_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32m4_m))) +void vsoxseg2ei64(vbool8_t op0, int32_t * op1, vuint64m8_t op2, vint32m4_t op3, vint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32mf2))) +void vsoxseg2ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i32mf2_m))) +void vsoxseg2ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32m1))) +void vsoxseg2ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32m1_m))) +void vsoxseg2ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32m2))) +void vsoxseg2ei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32m2_m))) +void vsoxseg2ei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32m4))) +void vsoxseg2ei64(uint32_t * op0, vuint64m8_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32m4_m))) +void vsoxseg2ei64(vbool8_t op0, uint32_t * op1, vuint64m8_t op2, vuint32m4_t op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32mf2))) +void vsoxseg2ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u32mf2_m))) +void vsoxseg2ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i32m1))) +void vsoxseg3ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i32m1_m))) +void vsoxseg3ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i32m2))) +void vsoxseg3ei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i32m2_m))) +void vsoxseg3ei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i32mf2))) +void vsoxseg3ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i32mf2_m))) +void vsoxseg3ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u32m1))) +void vsoxseg3ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u32m1_m))) +void vsoxseg3ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u32m2))) +void vsoxseg3ei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u32m2_m))) +void vsoxseg3ei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u32mf2))) +void vsoxseg3ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u32mf2_m))) +void vsoxseg3ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i32m1))) +void vsoxseg4ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i32m1_m))) +void vsoxseg4ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i32m2))) +void vsoxseg4ei64(int32_t * op0, vuint64m4_t op1, vint32m2_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i32m2_m))) +void vsoxseg4ei64(vbool16_t op0, int32_t * op1, vuint64m4_t op2, vint32m2_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i32mf2))) +void vsoxseg4ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i32mf2_m))) +void vsoxseg4ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u32m1))) +void vsoxseg4ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u32m1_m))) +void vsoxseg4ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u32m2))) +void vsoxseg4ei64(uint32_t * op0, vuint64m4_t op1, vuint32m2_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u32m2_m))) +void vsoxseg4ei64(vbool16_t op0, uint32_t * op1, vuint64m4_t op2, vuint32m2_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u32mf2))) +void vsoxseg4ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u32mf2_m))) +void vsoxseg4ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i32m1))) +void vsoxseg5ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i32m1_m))) +void vsoxseg5ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i32mf2))) +void vsoxseg5ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i32mf2_m))) +void vsoxseg5ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u32m1))) +void vsoxseg5ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u32m1_m))) +void vsoxseg5ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u32mf2))) +void vsoxseg5ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u32mf2_m))) +void vsoxseg5ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i32m1))) +void vsoxseg6ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i32m1_m))) +void vsoxseg6ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i32mf2))) +void vsoxseg6ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i32mf2_m))) +void vsoxseg6ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u32m1))) +void vsoxseg6ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u32m1_m))) +void vsoxseg6ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u32mf2))) +void vsoxseg6ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u32mf2_m))) +void vsoxseg6ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i32m1))) +void vsoxseg7ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i32m1_m))) +void vsoxseg7ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i32mf2))) +void vsoxseg7ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i32mf2_m))) +void vsoxseg7ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u32m1))) +void vsoxseg7ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u32m1_m))) +void vsoxseg7ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u32mf2))) +void vsoxseg7ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u32mf2_m))) +void vsoxseg7ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i32m1))) +void vsoxseg8ei64(int32_t * op0, vuint64m2_t op1, vint32m1_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i32m1_m))) +void vsoxseg8ei64(vbool32_t op0, int32_t * op1, vuint64m2_t op2, vint32m1_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i32mf2))) +void vsoxseg8ei64(int32_t * op0, vuint64m1_t op1, vint32mf2_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i32mf2_m))) +void vsoxseg8ei64(vbool64_t op0, int32_t * op1, vuint64m1_t op2, vint32mf2_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u32m1))) +void vsoxseg8ei64(uint32_t * op0, vuint64m2_t op1, vuint32m1_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u32m1_m))) +void vsoxseg8ei64(vbool32_t op0, uint32_t * op1, vuint64m2_t op2, vuint32m1_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u32mf2))) +void vsoxseg8ei64(uint32_t * op0, vuint64m1_t op1, vuint32mf2_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u32mf2_m))) +void vsoxseg8ei64(vbool64_t op0, uint32_t * op1, vuint64m1_t op2, vuint32mf2_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i64m1))) +void vsoxseg2ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i64m1_m))) +void vsoxseg2ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i64m2))) +void vsoxseg2ei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i64m2_m))) +void vsoxseg2ei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i64m4))) +void vsoxseg2ei8(int64_t * op0, vuint8mf2_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_i64m4_m))) +void vsoxseg2ei8(vbool16_t op0, int64_t * op1, vuint8mf2_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u64m1))) +void vsoxseg2ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u64m1_m))) +void vsoxseg2ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u64m2))) +void vsoxseg2ei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u64m2_m))) +void vsoxseg2ei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u64m4))) +void vsoxseg2ei8(uint64_t * op0, vuint8mf2_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_u64m4_m))) +void vsoxseg2ei8(vbool16_t op0, uint64_t * op1, vuint8mf2_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i64m1))) +void vsoxseg3ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i64m1_m))) +void vsoxseg3ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i64m2))) +void vsoxseg3ei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_i64m2_m))) +void vsoxseg3ei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u64m1))) +void vsoxseg3ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u64m1_m))) +void vsoxseg3ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u64m2))) +void vsoxseg3ei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_u64m2_m))) +void vsoxseg3ei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i64m1))) +void vsoxseg4ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i64m1_m))) +void vsoxseg4ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i64m2))) +void vsoxseg4ei8(int64_t * op0, vuint8mf4_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_i64m2_m))) +void vsoxseg4ei8(vbool32_t op0, int64_t * op1, vuint8mf4_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u64m1))) +void vsoxseg4ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u64m1_m))) +void vsoxseg4ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u64m2))) +void vsoxseg4ei8(uint64_t * op0, vuint8mf4_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_u64m2_m))) +void vsoxseg4ei8(vbool32_t op0, uint64_t * op1, vuint8mf4_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i64m1))) +void vsoxseg5ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_i64m1_m))) +void vsoxseg5ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u64m1))) +void vsoxseg5ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_u64m1_m))) +void vsoxseg5ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i64m1))) +void vsoxseg6ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_i64m1_m))) +void vsoxseg6ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u64m1))) +void vsoxseg6ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_u64m1_m))) +void vsoxseg6ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i64m1))) +void vsoxseg7ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_i64m1_m))) +void vsoxseg7ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u64m1))) +void vsoxseg7ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_u64m1_m))) +void vsoxseg7ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i64m1))) +void vsoxseg8ei8(int64_t * op0, vuint8mf8_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_i64m1_m))) +void vsoxseg8ei8(vbool64_t op0, int64_t * op1, vuint8mf8_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u64m1))) +void vsoxseg8ei8(uint64_t * op0, vuint8mf8_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_u64m1_m))) +void vsoxseg8ei8(vbool64_t op0, uint64_t * op1, vuint8mf8_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i64m1))) +void vsoxseg2ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i64m1_m))) +void vsoxseg2ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i64m2))) +void vsoxseg2ei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i64m2_m))) +void vsoxseg2ei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i64m4))) +void vsoxseg2ei16(int64_t * op0, vuint16m1_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_i64m4_m))) +void vsoxseg2ei16(vbool16_t op0, int64_t * op1, vuint16m1_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u64m1))) +void vsoxseg2ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u64m1_m))) +void vsoxseg2ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u64m2))) +void vsoxseg2ei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u64m2_m))) +void vsoxseg2ei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u64m4))) +void vsoxseg2ei16(uint64_t * op0, vuint16m1_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_u64m4_m))) +void vsoxseg2ei16(vbool16_t op0, uint64_t * op1, vuint16m1_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i64m1))) +void vsoxseg3ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i64m1_m))) +void vsoxseg3ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i64m2))) +void vsoxseg3ei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_i64m2_m))) +void vsoxseg3ei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u64m1))) +void vsoxseg3ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u64m1_m))) +void vsoxseg3ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u64m2))) +void vsoxseg3ei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_u64m2_m))) +void vsoxseg3ei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i64m1))) +void vsoxseg4ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i64m1_m))) +void vsoxseg4ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i64m2))) +void vsoxseg4ei16(int64_t * op0, vuint16mf2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_i64m2_m))) +void vsoxseg4ei16(vbool32_t op0, int64_t * op1, vuint16mf2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u64m1))) +void vsoxseg4ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u64m1_m))) +void vsoxseg4ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u64m2))) +void vsoxseg4ei16(uint64_t * op0, vuint16mf2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_u64m2_m))) +void vsoxseg4ei16(vbool32_t op0, uint64_t * op1, vuint16mf2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i64m1))) +void vsoxseg5ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_i64m1_m))) +void vsoxseg5ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u64m1))) +void vsoxseg5ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_u64m1_m))) +void vsoxseg5ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i64m1))) +void vsoxseg6ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_i64m1_m))) +void vsoxseg6ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u64m1))) +void vsoxseg6ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_u64m1_m))) +void vsoxseg6ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i64m1))) +void vsoxseg7ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_i64m1_m))) +void vsoxseg7ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u64m1))) +void vsoxseg7ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_u64m1_m))) +void vsoxseg7ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i64m1))) +void vsoxseg8ei16(int64_t * op0, vuint16mf4_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_i64m1_m))) +void vsoxseg8ei16(vbool64_t op0, int64_t * op1, vuint16mf4_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u64m1))) +void vsoxseg8ei16(uint64_t * op0, vuint16mf4_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_u64m1_m))) +void vsoxseg8ei16(vbool64_t op0, uint64_t * op1, vuint16mf4_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i64m1))) +void vsoxseg2ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i64m1_m))) +void vsoxseg2ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i64m2))) +void vsoxseg2ei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i64m2_m))) +void vsoxseg2ei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i64m4))) +void vsoxseg2ei32(int64_t * op0, vuint32m2_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_i64m4_m))) +void vsoxseg2ei32(vbool16_t op0, int64_t * op1, vuint32m2_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u64m1))) +void vsoxseg2ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u64m1_m))) +void vsoxseg2ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u64m2))) +void vsoxseg2ei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u64m2_m))) +void vsoxseg2ei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u64m4))) +void vsoxseg2ei32(uint64_t * op0, vuint32m2_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_u64m4_m))) +void vsoxseg2ei32(vbool16_t op0, uint64_t * op1, vuint32m2_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i64m1))) +void vsoxseg3ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i64m1_m))) +void vsoxseg3ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i64m2))) +void vsoxseg3ei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_i64m2_m))) +void vsoxseg3ei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u64m1))) +void vsoxseg3ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u64m1_m))) +void vsoxseg3ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u64m2))) +void vsoxseg3ei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_u64m2_m))) +void vsoxseg3ei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i64m1))) +void vsoxseg4ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i64m1_m))) +void vsoxseg4ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i64m2))) +void vsoxseg4ei32(int64_t * op0, vuint32m1_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_i64m2_m))) +void vsoxseg4ei32(vbool32_t op0, int64_t * op1, vuint32m1_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u64m1))) +void vsoxseg4ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u64m1_m))) +void vsoxseg4ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u64m2))) +void vsoxseg4ei32(uint64_t * op0, vuint32m1_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_u64m2_m))) +void vsoxseg4ei32(vbool32_t op0, uint64_t * op1, vuint32m1_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i64m1))) +void vsoxseg5ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_i64m1_m))) +void vsoxseg5ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u64m1))) +void vsoxseg5ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_u64m1_m))) +void vsoxseg5ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i64m1))) +void vsoxseg6ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_i64m1_m))) +void vsoxseg6ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u64m1))) +void vsoxseg6ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_u64m1_m))) +void vsoxseg6ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i64m1))) +void vsoxseg7ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_i64m1_m))) +void vsoxseg7ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u64m1))) +void vsoxseg7ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_u64m1_m))) +void vsoxseg7ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i64m1))) +void vsoxseg8ei32(int64_t * op0, vuint32mf2_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_i64m1_m))) +void vsoxseg8ei32(vbool64_t op0, int64_t * op1, vuint32mf2_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u64m1))) +void vsoxseg8ei32(uint64_t * op0, vuint32mf2_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_u64m1_m))) +void vsoxseg8ei32(vbool64_t op0, uint64_t * op1, vuint32mf2_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i64m1))) +void vsoxseg2ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i64m1_m))) +void vsoxseg2ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i64m2))) +void vsoxseg2ei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i64m2_m))) +void vsoxseg2ei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i64m4))) +void vsoxseg2ei64(int64_t * op0, vuint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_i64m4_m))) +void vsoxseg2ei64(vbool16_t op0, int64_t * op1, vuint64m4_t op2, vint64m4_t op3, vint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u64m1))) +void vsoxseg2ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u64m1_m))) +void vsoxseg2ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u64m2))) +void vsoxseg2ei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u64m2_m))) +void vsoxseg2ei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u64m4))) +void vsoxseg2ei64(uint64_t * op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_u64m4_m))) +void vsoxseg2ei64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, vuint64m4_t op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i64m1))) +void vsoxseg3ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i64m1_m))) +void vsoxseg3ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i64m2))) +void vsoxseg3ei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_i64m2_m))) +void vsoxseg3ei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u64m1))) +void vsoxseg3ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u64m1_m))) +void vsoxseg3ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u64m2))) +void vsoxseg3ei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_u64m2_m))) +void vsoxseg3ei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i64m1))) +void vsoxseg4ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i64m1_m))) +void vsoxseg4ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i64m2))) +void vsoxseg4ei64(int64_t * op0, vuint64m2_t op1, vint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_i64m2_m))) +void vsoxseg4ei64(vbool32_t op0, int64_t * op1, vuint64m2_t op2, vint64m2_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u64m1))) +void vsoxseg4ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u64m1_m))) +void vsoxseg4ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u64m2))) +void vsoxseg4ei64(uint64_t * op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_u64m2_m))) +void vsoxseg4ei64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, vuint64m2_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i64m1))) +void vsoxseg5ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_i64m1_m))) +void vsoxseg5ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u64m1))) +void vsoxseg5ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_u64m1_m))) +void vsoxseg5ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i64m1))) +void vsoxseg6ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_i64m1_m))) +void vsoxseg6ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u64m1))) +void vsoxseg6ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_u64m1_m))) +void vsoxseg6ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i64m1))) +void vsoxseg7ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_i64m1_m))) +void vsoxseg7ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u64m1))) +void vsoxseg7ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_u64m1_m))) +void vsoxseg7ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i64m1))) +void vsoxseg8ei64(int64_t * op0, vuint64m1_t op1, vint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_i64m1_m))) +void vsoxseg8ei64(vbool64_t op0, int64_t * op1, vuint64m1_t op2, vint64m1_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u64m1))) +void vsoxseg8ei64(uint64_t * op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_u64m1_m))) +void vsoxseg8ei64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, vuint64m1_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_i8m1_m))) +void vlseg2e8(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_i8m2_m))) +void vlseg2e8(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_i8m4_m))) +void vlseg2e8(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_i8mf2_m))) +void vlseg2e8(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_i8mf4_m))) +void vlseg2e8(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_i8mf8_m))) +void vlseg2e8(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_u8m1_m))) +void vlseg2e8(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_u8m2_m))) +void vlseg2e8(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_u8m4_m))) +void vlseg2e8(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_u8mf2_m))) +void vlseg2e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_u8mf4_m))) +void vlseg2e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8_v_u8mf8_m))) +void vlseg2e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_i8m1_m))) +void vlseg3e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_i8m2_m))) +void vlseg3e8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_i8mf2_m))) +void vlseg3e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_i8mf4_m))) +void vlseg3e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_i8mf8_m))) +void vlseg3e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_u8m1_m))) +void vlseg3e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_u8m2_m))) +void vlseg3e8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_u8mf2_m))) +void vlseg3e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_u8mf4_m))) +void vlseg3e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8_v_u8mf8_m))) +void vlseg3e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_i8m1_m))) +void vlseg4e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_i8m2_m))) +void vlseg4e8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_i8mf2_m))) +void vlseg4e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_i8mf4_m))) +void vlseg4e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_i8mf8_m))) +void vlseg4e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_u8m1_m))) +void vlseg4e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_u8m2_m))) +void vlseg4e8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_u8mf2_m))) +void vlseg4e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_u8mf4_m))) +void vlseg4e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8_v_u8mf8_m))) +void vlseg4e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_i8m1_m))) +void vlseg5e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_i8mf2_m))) +void vlseg5e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_i8mf4_m))) +void vlseg5e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_i8mf8_m))) +void vlseg5e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_u8m1_m))) +void vlseg5e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_u8mf2_m))) +void vlseg5e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_u8mf4_m))) +void vlseg5e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8_v_u8mf8_m))) +void vlseg5e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_i8m1_m))) +void vlseg6e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_i8mf2_m))) +void vlseg6e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_i8mf4_m))) +void vlseg6e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_i8mf8_m))) +void vlseg6e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_u8m1_m))) +void vlseg6e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_u8mf2_m))) +void vlseg6e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_u8mf4_m))) +void vlseg6e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8_v_u8mf8_m))) +void vlseg6e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_i8m1_m))) +void vlseg7e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_i8mf2_m))) +void vlseg7e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_i8mf4_m))) +void vlseg7e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_i8mf8_m))) +void vlseg7e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_u8m1_m))) +void vlseg7e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_u8mf2_m))) +void vlseg7e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_u8mf4_m))) +void vlseg7e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8_v_u8mf8_m))) +void vlseg7e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_i8m1_m))) +void vlseg8e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_i8mf2_m))) +void vlseg8e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_i8mf4_m))) +void vlseg8e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_i8mf8_m))) +void vlseg8e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_u8m1_m))) +void vlseg8e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_u8mf2_m))) +void vlseg8e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_u8mf4_m))) +void vlseg8e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8_v_u8mf8_m))) +void vlseg8e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_i16m1_m))) +void vlseg2e16(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_i16m2_m))) +void vlseg2e16(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_i16m4_m))) +void vlseg2e16(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_i16mf2_m))) +void vlseg2e16(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_i16mf4_m))) +void vlseg2e16(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_u16m1_m))) +void vlseg2e16(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_u16m2_m))) +void vlseg2e16(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_u16m4_m))) +void vlseg2e16(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_u16mf2_m))) +void vlseg2e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_u16mf4_m))) +void vlseg2e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_i16m1_m))) +void vlseg3e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_i16m2_m))) +void vlseg3e16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_i16mf2_m))) +void vlseg3e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_i16mf4_m))) +void vlseg3e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_u16m1_m))) +void vlseg3e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_u16m2_m))) +void vlseg3e16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_u16mf2_m))) +void vlseg3e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_u16mf4_m))) +void vlseg3e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_i16m1_m))) +void vlseg4e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_i16m2_m))) +void vlseg4e16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_i16mf2_m))) +void vlseg4e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_i16mf4_m))) +void vlseg4e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_u16m1_m))) +void vlseg4e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_u16m2_m))) +void vlseg4e16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_u16mf2_m))) +void vlseg4e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_u16mf4_m))) +void vlseg4e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_i16m1_m))) +void vlseg5e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_i16mf2_m))) +void vlseg5e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_i16mf4_m))) +void vlseg5e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_u16m1_m))) +void vlseg5e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_u16mf2_m))) +void vlseg5e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_u16mf4_m))) +void vlseg5e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_i16m1_m))) +void vlseg6e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_i16mf2_m))) +void vlseg6e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_i16mf4_m))) +void vlseg6e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_u16m1_m))) +void vlseg6e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_u16mf2_m))) +void vlseg6e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_u16mf4_m))) +void vlseg6e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_i16m1_m))) +void vlseg7e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_i16mf2_m))) +void vlseg7e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_i16mf4_m))) +void vlseg7e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_u16m1_m))) +void vlseg7e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_u16mf2_m))) +void vlseg7e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_u16mf4_m))) +void vlseg7e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_i16m1_m))) +void vlseg8e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_i16mf2_m))) +void vlseg8e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_i16mf4_m))) +void vlseg8e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_u16m1_m))) +void vlseg8e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_u16mf2_m))) +void vlseg8e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_u16mf4_m))) +void vlseg8e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_i32m1_m))) +void vlseg2e32(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_i32m2_m))) +void vlseg2e32(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_i32m4_m))) +void vlseg2e32(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_i32mf2_m))) +void vlseg2e32(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_u32m1_m))) +void vlseg2e32(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_u32m2_m))) +void vlseg2e32(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_u32m4_m))) +void vlseg2e32(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_u32mf2_m))) +void vlseg2e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_i32m1_m))) +void vlseg3e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_i32m2_m))) +void vlseg3e32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_i32mf2_m))) +void vlseg3e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_u32m1_m))) +void vlseg3e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_u32m2_m))) +void vlseg3e32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_u32mf2_m))) +void vlseg3e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_i32m1_m))) +void vlseg4e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_i32m2_m))) +void vlseg4e32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_i32mf2_m))) +void vlseg4e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_u32m1_m))) +void vlseg4e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_u32m2_m))) +void vlseg4e32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_u32mf2_m))) +void vlseg4e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32_v_i32m1_m))) +void vlseg5e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32_v_i32mf2_m))) +void vlseg5e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32_v_u32m1_m))) +void vlseg5e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32_v_u32mf2_m))) +void vlseg5e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32_v_i32m1_m))) +void vlseg6e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32_v_i32mf2_m))) +void vlseg6e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32_v_u32m1_m))) +void vlseg6e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32_v_u32mf2_m))) +void vlseg6e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32_v_i32m1_m))) +void vlseg7e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32_v_i32mf2_m))) +void vlseg7e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32_v_u32m1_m))) +void vlseg7e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32_v_u32mf2_m))) +void vlseg7e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32_v_i32m1_m))) +void vlseg8e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32_v_i32mf2_m))) +void vlseg8e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32_v_u32m1_m))) +void vlseg8e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32_v_u32mf2_m))) +void vlseg8e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_i64m1_m))) +void vlseg2e64(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_i64m2_m))) +void vlseg2e64(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_i64m4_m))) +void vlseg2e64(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_u64m1_m))) +void vlseg2e64(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_u64m2_m))) +void vlseg2e64(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_u64m4_m))) +void vlseg2e64(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64_v_i64m1_m))) +void vlseg3e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64_v_i64m2_m))) +void vlseg3e64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64_v_u64m1_m))) +void vlseg3e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64_v_u64m2_m))) +void vlseg3e64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64_v_i64m1_m))) +void vlseg4e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64_v_i64m2_m))) +void vlseg4e64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64_v_u64m1_m))) +void vlseg4e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64_v_u64m2_m))) +void vlseg4e64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e64_v_i64m1_m))) +void vlseg5e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e64_v_u64m1_m))) +void vlseg5e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e64_v_i64m1_m))) +void vlseg6e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e64_v_u64m1_m))) +void vlseg6e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e64_v_i64m1_m))) +void vlseg7e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e64_v_u64m1_m))) +void vlseg7e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e64_v_i64m1_m))) +void vlseg8e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e64_v_u64m1_m))) +void vlseg8e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_i8m1_m))) +void vlseg3e8ff(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_i8m2_m))) +void vlseg3e8ff(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_i8mf2_m))) +void vlseg3e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_i8mf4_m))) +void vlseg3e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_i8mf8_m))) +void vlseg3e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_u8m1_m))) +void vlseg3e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_u8m2_m))) +void vlseg3e8ff(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_u8mf2_m))) +void vlseg3e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_u8mf4_m))) +void vlseg3e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e8ff_v_u8mf8_m))) +void vlseg3e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_i8m1_m))) +void vlseg4e8ff(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_i8m2_m))) +void vlseg4e8ff(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_i8mf2_m))) +void vlseg4e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_i8mf4_m))) +void vlseg4e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_i8mf8_m))) +void vlseg4e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_u8m1_m))) +void vlseg4e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_u8m2_m))) +void vlseg4e8ff(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_u8mf2_m))) +void vlseg4e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_u8mf4_m))) +void vlseg4e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e8ff_v_u8mf8_m))) +void vlseg4e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_i8m1_m))) +void vlseg5e8ff(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_i8mf2_m))) +void vlseg5e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_i8mf4_m))) +void vlseg5e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_i8mf8_m))) +void vlseg5e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_u8m1_m))) +void vlseg5e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_u8mf2_m))) +void vlseg5e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_u8mf4_m))) +void vlseg5e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e8ff_v_u8mf8_m))) +void vlseg5e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_i8m1_m))) +void vlseg6e8ff(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_i8mf2_m))) +void vlseg6e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_i8mf4_m))) +void vlseg6e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_i8mf8_m))) +void vlseg6e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_u8m1_m))) +void vlseg6e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_u8mf2_m))) +void vlseg6e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_u8mf4_m))) +void vlseg6e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e8ff_v_u8mf8_m))) +void vlseg6e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_i8m1_m))) +void vlseg7e8ff(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_i8mf2_m))) +void vlseg7e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_i8mf4_m))) +void vlseg7e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_i8mf8_m))) +void vlseg7e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_u8m1_m))) +void vlseg7e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_u8mf2_m))) +void vlseg7e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_u8mf4_m))) +void vlseg7e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e8ff_v_u8mf8_m))) +void vlseg7e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_i8m1_m))) +void vlseg8e8ff(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_i8mf2_m))) +void vlseg8e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_i8mf4_m))) +void vlseg8e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_i8mf8_m))) +void vlseg8e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_u8m1_m))) +void vlseg8e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_u8mf2_m))) +void vlseg8e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_u8mf4_m))) +void vlseg8e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e8ff_v_u8mf8_m))) +void vlseg8e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_i16m1_m))) +void vlseg2e16ff(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_i16m2_m))) +void vlseg2e16ff(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_i16m4_m))) +void vlseg2e16ff(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_i16mf2_m))) +void vlseg2e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_i16mf4_m))) +void vlseg2e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_u16m1_m))) +void vlseg2e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_u16m2_m))) +void vlseg2e16ff(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_u16m4_m))) +void vlseg2e16ff(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_u16mf2_m))) +void vlseg2e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_u16mf4_m))) +void vlseg2e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_i16m1_m))) +void vlseg3e16ff(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_i16m2_m))) +void vlseg3e16ff(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_i16mf2_m))) +void vlseg3e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_i16mf4_m))) +void vlseg3e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_u16m1_m))) +void vlseg3e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_u16m2_m))) +void vlseg3e16ff(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_u16mf2_m))) +void vlseg3e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_u16mf4_m))) +void vlseg3e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_i16m1_m))) +void vlseg4e16ff(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_i16m2_m))) +void vlseg4e16ff(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_i16mf2_m))) +void vlseg4e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_i16mf4_m))) +void vlseg4e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_u16m1_m))) +void vlseg4e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_u16m2_m))) +void vlseg4e16ff(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_u16mf2_m))) +void vlseg4e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_u16mf4_m))) +void vlseg4e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_i16m1_m))) +void vlseg5e16ff(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_i16mf2_m))) +void vlseg5e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_i16mf4_m))) +void vlseg5e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_u16m1_m))) +void vlseg5e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_u16mf2_m))) +void vlseg5e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_u16mf4_m))) +void vlseg5e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_i16m1_m))) +void vlseg6e16ff(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_i16mf2_m))) +void vlseg6e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_i16mf4_m))) +void vlseg6e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_u16m1_m))) +void vlseg6e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_u16mf2_m))) +void vlseg6e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_u16mf4_m))) +void vlseg6e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_i16m1_m))) +void vlseg7e16ff(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_i16mf2_m))) +void vlseg7e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_i16mf4_m))) +void vlseg7e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_u16m1_m))) +void vlseg7e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_u16mf2_m))) +void vlseg7e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_u16mf4_m))) +void vlseg7e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_i16m1_m))) +void vlseg8e16ff(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_i16mf2_m))) +void vlseg8e16ff(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_i16mf4_m))) +void vlseg8e16ff(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_u16m1_m))) +void vlseg8e16ff(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_u16mf2_m))) +void vlseg8e16ff(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_u16mf4_m))) +void vlseg8e16ff(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_i32m1_m))) +void vlseg2e32ff(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_i32m2_m))) +void vlseg2e32ff(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_i32m4_m))) +void vlseg2e32ff(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_i32mf2_m))) +void vlseg2e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_i8m1_m))) +void vlseg2e8ff(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_i8m2_m))) +void vlseg2e8ff(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_i8m4_m))) +void vlseg2e8ff(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_i8mf2_m))) +void vlseg2e8ff(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_i8mf4_m))) +void vlseg2e8ff(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_i8mf8_m))) +void vlseg2e8ff(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_u32m1_m))) +void vlseg2e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_u32m2_m))) +void vlseg2e32ff(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_u32m4_m))) +void vlseg2e32ff(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_u32mf2_m))) +void vlseg2e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_i32m1_m))) +void vlseg3e32ff(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_i32m2_m))) +void vlseg3e32ff(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_i32mf2_m))) +void vlseg3e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_u32m1_m))) +void vlseg3e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_u32m2_m))) +void vlseg3e32ff(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_u32mf2_m))) +void vlseg3e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_i32m1_m))) +void vlseg4e32ff(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_i32m2_m))) +void vlseg4e32ff(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_i32mf2_m))) +void vlseg4e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_u32m1_m))) +void vlseg4e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_u32m2_m))) +void vlseg4e32ff(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_u32mf2_m))) +void vlseg4e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32ff_v_i32m1_m))) +void vlseg5e32ff(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32ff_v_i32mf2_m))) +void vlseg5e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32ff_v_u32m1_m))) +void vlseg5e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32ff_v_u32mf2_m))) +void vlseg5e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32ff_v_i32m1_m))) +void vlseg6e32ff(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32ff_v_i32mf2_m))) +void vlseg6e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32ff_v_u32m1_m))) +void vlseg6e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32ff_v_u32mf2_m))) +void vlseg6e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32ff_v_i32m1_m))) +void vlseg7e32ff(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32ff_v_i32mf2_m))) +void vlseg7e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_u8m1_m))) +void vlseg2e8ff(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_u8m2_m))) +void vlseg2e8ff(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_u8m4_m))) +void vlseg2e8ff(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_u8mf2_m))) +void vlseg2e8ff(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_u8mf4_m))) +void vlseg2e8ff(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e8ff_v_u8mf8_m))) +void vlseg2e8ff(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32ff_v_u32m1_m))) +void vlseg7e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32ff_v_u32mf2_m))) +void vlseg7e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32ff_v_i32m1_m))) +void vlseg8e32ff(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32ff_v_i32mf2_m))) +void vlseg8e32ff(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32ff_v_u32m1_m))) +void vlseg8e32ff(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32ff_v_u32mf2_m))) +void vlseg8e32ff(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_i64m1_m))) +void vlseg2e64ff(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_i64m2_m))) +void vlseg2e64ff(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_i64m4_m))) +void vlseg2e64ff(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_u64m1_m))) +void vlseg2e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_u64m2_m))) +void vlseg2e64ff(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_u64m4_m))) +void vlseg2e64ff(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64ff_v_i64m1_m))) +void vlseg3e64ff(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64ff_v_i64m2_m))) +void vlseg3e64ff(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64ff_v_u64m1_m))) +void vlseg3e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64ff_v_u64m2_m))) +void vlseg3e64ff(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64ff_v_i64m1_m))) +void vlseg4e64ff(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64ff_v_i64m2_m))) +void vlseg4e64ff(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64ff_v_u64m1_m))) +void vlseg4e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64ff_v_u64m2_m))) +void vlseg4e64ff(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e64ff_v_i64m1_m))) +void vlseg5e64ff(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_i8m1_m))) +void vlsseg2e8(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_i8m2_m))) +void vlsseg2e8(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_i8m4_m))) +void vlsseg2e8(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_i8mf2_m))) +void vlsseg2e8(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_i8mf4_m))) +void vlsseg2e8(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_i8mf8_m))) +void vlsseg2e8(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e64ff_v_u64m1_m))) +void vlseg5e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e64ff_v_i64m1_m))) +void vlseg6e64ff(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e64ff_v_u64m1_m))) +void vlseg6e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e64ff_v_i64m1_m))) +void vlseg7e64ff(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e64ff_v_u64m1_m))) +void vlseg7e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e64ff_v_i64m1_m))) +void vlseg8e64ff(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e64ff_v_u64m1_m))) +void vlseg8e64ff(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_u8m1_m))) +void vlsseg2e8(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_u8m2_m))) +void vlsseg2e8(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_u8m4_m))) +void vlsseg2e8(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_u8mf2_m))) +void vlsseg2e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_u8mf4_m))) +void vlsseg2e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e8_v_u8mf8_m))) +void vlsseg2e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8m1))) +void vluxseg2ei8(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8m1_m))) +void vluxseg2ei8(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8m2))) +void vluxseg2ei8(vint8m2_t * op0, vint8m2_t * op1, const int8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8m2_m))) +void vluxseg2ei8(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8m4))) +void vluxseg2ei8(vint8m4_t * op0, vint8m4_t * op1, const int8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8m4_m))) +void vluxseg2ei8(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, vuint8m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8mf2))) +void vluxseg2ei8(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8mf2_m))) +void vluxseg2ei8(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8mf4))) +void vluxseg2ei8(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8mf4_m))) +void vluxseg2ei8(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8mf8))) +void vluxseg2ei8(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i8mf8_m))) +void vluxseg2ei8(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_i8m1_m))) +void vlsseg3e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_i8m2_m))) +void vlsseg3e8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_i8mf2_m))) +void vlsseg3e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_i8mf4_m))) +void vlsseg3e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_i8mf8_m))) +void vlsseg3e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8m1))) +void vluxseg2ei8(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8m1_m))) +void vluxseg2ei8(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8m2))) +void vluxseg2ei8(vuint8m2_t * op0, vuint8m2_t * op1, const uint8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8m2_m))) +void vluxseg2ei8(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8m4))) +void vluxseg2ei8(vuint8m4_t * op0, vuint8m4_t * op1, const uint8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8m4_m))) +void vluxseg2ei8(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, vuint8m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8mf2))) +void vluxseg2ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8mf2_m))) +void vluxseg2ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8mf4))) +void vluxseg2ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8mf4_m))) +void vluxseg2ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8mf8))) +void vluxseg2ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u8mf8_m))) +void vluxseg2ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_u8m1_m))) +void vlsseg3e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_u8m2_m))) +void vlsseg3e8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_u8mf2_m))) +void vlsseg3e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_u8mf4_m))) +void vlsseg3e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e8_v_u8mf8_m))) +void vlsseg3e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_i8m1_m))) +void vlsseg4e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_i8m2_m))) +void vlsseg4e8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_i8mf2_m))) +void vlsseg4e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_i8mf4_m))) +void vlsseg4e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_i8mf8_m))) +void vlsseg4e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_u8m1_m))) +void vlsseg4e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_u8m2_m))) +void vlsseg4e8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_u8mf2_m))) +void vlsseg4e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_u8mf4_m))) +void vlsseg4e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e8_v_u8mf8_m))) +void vlsseg4e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_i8m1_m))) +void vlsseg5e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_i8mf2_m))) +void vlsseg5e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_i8mf4_m))) +void vlsseg5e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_i8mf8_m))) +void vlsseg5e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_u8m1_m))) +void vlsseg5e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_u8mf2_m))) +void vlsseg5e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_u8mf4_m))) +void vlsseg5e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e8_v_u8mf8_m))) +void vlsseg5e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_i8m1_m))) +void vlsseg6e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_i8mf2_m))) +void vlsseg6e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_i8mf4_m))) +void vlsseg6e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_i8mf8_m))) +void vlsseg6e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_u8m1_m))) +void vlsseg6e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_u8mf2_m))) +void vlsseg6e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_u8mf4_m))) +void vlsseg6e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e8_v_u8mf8_m))) +void vlsseg6e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_i8m1_m))) +void vlsseg7e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_i8mf2_m))) +void vlsseg7e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_i8mf4_m))) +void vlsseg7e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_i8mf8_m))) +void vlsseg7e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_u8m1_m))) +void vlsseg7e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_u8mf2_m))) +void vlsseg7e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_u8mf4_m))) +void vlsseg7e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e8_v_u8mf8_m))) +void vlsseg7e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_i8m1_m))) +void vlsseg8e8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_i8mf2_m))) +void vlsseg8e8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_i8mf4_m))) +void vlsseg8e8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_i8mf8_m))) +void vlsseg8e8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8m1))) +void vsseg2e8(int8_t * op0, vint8m1_t op1, vint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8m1_m))) +void vsseg2e8(vbool8_t op0, int8_t * op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8m2))) +void vsseg2e8(int8_t * op0, vint8m2_t op1, vint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8m2_m))) +void vsseg2e8(vbool4_t op0, int8_t * op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8m4))) +void vsseg2e8(int8_t * op0, vint8m4_t op1, vint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8m4_m))) +void vsseg2e8(vbool2_t op0, int8_t * op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8mf2))) +void vsseg2e8(int8_t * op0, vint8mf2_t op1, vint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8mf2_m))) +void vsseg2e8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8mf4))) +void vsseg2e8(int8_t * op0, vint8mf4_t op1, vint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8mf4_m))) +void vsseg2e8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8mf8))) +void vsseg2e8(int8_t * op0, vint8mf8_t op1, vint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_i8mf8_m))) +void vsseg2e8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_u8m1_m))) +void vlsseg8e8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_u8mf2_m))) +void vlsseg8e8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_u8mf4_m))) +void vlsseg8e8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e8_v_u8mf8_m))) +void vlsseg8e8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_i16m1_m))) +void vlsseg2e16(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_i16m2_m))) +void vlsseg2e16(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_i16m4_m))) +void vlsseg2e16(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_i16mf2_m))) +void vlsseg2e16(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_i16mf4_m))) +void vlsseg2e16(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_u16m1_m))) +void vlsseg2e16(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_u16m2_m))) +void vlsseg2e16(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_u16m4_m))) +void vlsseg2e16(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_u16mf2_m))) +void vlsseg2e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_u16mf4_m))) +void vlsseg2e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_i16m1_m))) +void vlsseg3e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_i16m2_m))) +void vlsseg3e16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_i16mf2_m))) +void vlsseg3e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_i16mf4_m))) +void vlsseg3e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_u16m1_m))) +void vlsseg3e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_u16m2_m))) +void vlsseg3e16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_u16mf2_m))) +void vlsseg3e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_u16mf4_m))) +void vlsseg3e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_i16m1_m))) +void vlsseg4e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_i16m2_m))) +void vlsseg4e16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_i16mf2_m))) +void vlsseg4e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_i16mf4_m))) +void vlsseg4e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_u16m1_m))) +void vlsseg4e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_u16m2_m))) +void vlsseg4e16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_u16mf2_m))) +void vlsseg4e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_u16mf4_m))) +void vlsseg4e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_i16m1_m))) +void vlsseg5e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_i16mf2_m))) +void vlsseg5e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_i16mf4_m))) +void vlsseg5e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_u16m1_m))) +void vlsseg5e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_u16mf2_m))) +void vlsseg5e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_u16mf4_m))) +void vlsseg5e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_i16m1_m))) +void vlsseg6e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_i16mf2_m))) +void vlsseg6e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_i16mf4_m))) +void vlsseg6e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8m1))) +void vsseg2e8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8m1_m))) +void vsseg2e8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8m2))) +void vsseg2e8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8m2_m))) +void vsseg2e8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8m4))) +void vsseg2e8(uint8_t * op0, vuint8m4_t op1, vuint8m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8m4_m))) +void vsseg2e8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8mf2))) +void vsseg2e8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8mf2_m))) +void vsseg2e8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8mf4))) +void vsseg2e8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8mf4_m))) +void vsseg2e8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8mf8))) +void vsseg2e8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e8_v_u8mf8_m))) +void vsseg2e8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_u16m1_m))) +void vlsseg6e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_u16mf2_m))) +void vlsseg6e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_u16mf4_m))) +void vlsseg6e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_i16m1_m))) +void vlsseg7e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_i16mf2_m))) +void vlsseg7e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_i16mf4_m))) +void vlsseg7e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_u16m1_m))) +void vlsseg7e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_u16mf2_m))) +void vlsseg7e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_u16mf4_m))) +void vlsseg7e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_i16m1_m))) +void vlsseg8e16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_i16mf2_m))) +void vlsseg8e16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_i16mf4_m))) +void vlsseg8e16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_u16m1_m))) +void vlsseg8e16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_u16mf2_m))) +void vlsseg8e16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_u16mf4_m))) +void vlsseg8e16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_i32m1_m))) +void vlsseg2e32(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_i32m2_m))) +void vlsseg2e32(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_i32m4_m))) +void vlsseg2e32(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_i32mf2_m))) +void vlsseg2e32(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_u32m1_m))) +void vlsseg2e32(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_u32m2_m))) +void vlsseg2e32(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_u32m4_m))) +void vlsseg2e32(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_u32mf2_m))) +void vlsseg2e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_i32m1_m))) +void vlsseg3e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_i32m2_m))) +void vlsseg3e32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_i32mf2_m))) +void vlsseg3e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_u32m1_m))) +void vlsseg3e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_u32m2_m))) +void vlsseg3e32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_u32mf2_m))) +void vlsseg3e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_i32m1_m))) +void vlsseg4e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_i32m2_m))) +void vlsseg4e32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_i32mf2_m))) +void vlsseg4e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_u32m1_m))) +void vlsseg4e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_u32m2_m))) +void vlsseg4e32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_u32mf2_m))) +void vlsseg4e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e32_v_i32m1_m))) +void vlsseg5e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e32_v_i32mf2_m))) +void vlsseg5e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e32_v_u32m1_m))) +void vlsseg5e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e32_v_u32mf2_m))) +void vlsseg5e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e32_v_i32m1_m))) +void vlsseg6e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e32_v_i32mf2_m))) +void vlsseg6e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e32_v_u32m1_m))) +void vlsseg6e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e32_v_u32mf2_m))) +void vlsseg6e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e32_v_i32m1_m))) +void vlsseg7e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e32_v_i32mf2_m))) +void vlsseg7e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e32_v_u32m1_m))) +void vlsseg7e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e32_v_u32mf2_m))) +void vlsseg7e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e32_v_i32m1_m))) +void vlsseg8e32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e32_v_i32mf2_m))) +void vlsseg8e32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e32_v_u32m1_m))) +void vlsseg8e32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e32_v_u32mf2_m))) +void vlsseg8e32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_i64m1_m))) +void vlsseg2e64(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_i64m2_m))) +void vlsseg2e64(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_i64m4_m))) +void vlsseg2e64(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_u64m1_m))) +void vlsseg2e64(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_u64m2_m))) +void vlsseg2e64(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_u64m4_m))) +void vlsseg2e64(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e64_v_i64m1_m))) +void vlsseg3e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e64_v_i64m2_m))) +void vlsseg3e64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e64_v_u64m1_m))) +void vlsseg3e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e64_v_u64m2_m))) +void vlsseg3e64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e64_v_i64m1_m))) +void vlsseg4e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e64_v_i64m2_m))) +void vlsseg4e64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e64_v_u64m1_m))) +void vlsseg4e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e64_v_u64m2_m))) +void vlsseg4e64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e64_v_i64m1_m))) +void vlsseg5e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e64_v_u64m1_m))) +void vlsseg5e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e64_v_i64m1_m))) +void vlsseg6e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e64_v_u64m1_m))) +void vlsseg6e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e64_v_i64m1_m))) +void vlsseg7e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e64_v_u64m1_m))) +void vlsseg7e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e64_v_i64m1_m))) +void vlsseg8e64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e64_v_u64m1_m))) +void vlsseg8e64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8m1))) +void vluxseg3ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8m1_m))) +void vluxseg3ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8m2))) +void vluxseg3ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, const int8_t * op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8m2_m))) +void vluxseg3ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, vuint8m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8mf2))) +void vluxseg3ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8mf2_m))) +void vluxseg3ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8mf4))) +void vluxseg3ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8mf4_m))) +void vluxseg3ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8mf8))) +void vluxseg3ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i8mf8_m))) +void vluxseg3ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8m1))) +void vluxseg3ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8m1_m))) +void vluxseg3ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8m2))) +void vluxseg3ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, const uint8_t * op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8m2_m))) +void vluxseg3ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, vuint8m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8mf2))) +void vluxseg3ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8mf2_m))) +void vluxseg3ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8mf4))) +void vluxseg3ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8mf4_m))) +void vluxseg3ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8mf8))) +void vluxseg3ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u8mf8_m))) +void vluxseg3ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8m1))) +void vluxseg4ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8m1_m))) +void vluxseg4ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8m2))) +void vluxseg4ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, const int8_t * op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8m2_m))) +void vluxseg4ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, vuint8m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8mf2))) +void vluxseg4ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8mf2_m))) +void vluxseg4ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8mf4))) +void vluxseg4ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8mf4_m))) +void vluxseg4ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8mf8))) +void vluxseg4ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i8mf8_m))) +void vluxseg4ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8m1))) +void vluxseg4ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8m1_m))) +void vluxseg4ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8m2))) +void vluxseg4ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, const uint8_t * op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8m2_m))) +void vluxseg4ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, vuint8m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8mf2))) +void vluxseg4ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8mf2_m))) +void vluxseg4ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8mf4))) +void vluxseg4ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8mf4_m))) +void vluxseg4ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8mf8))) +void vluxseg4ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u8mf8_m))) +void vluxseg4ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8m1))) +void vluxseg5ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8m1_m))) +void vluxseg5ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint8m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8mf2))) +void vluxseg5ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8mf2_m))) +void vluxseg5ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8mf4))) +void vluxseg5ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8mf4_m))) +void vluxseg5ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8mf8))) +void vluxseg5ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i8mf8_m))) +void vluxseg5ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8m1))) +void vluxseg5ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8m1_m))) +void vluxseg5ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint8m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8mf2))) +void vluxseg5ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8mf2_m))) +void vluxseg5ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8mf4))) +void vluxseg5ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8mf4_m))) +void vluxseg5ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8mf8))) +void vluxseg5ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u8mf8_m))) +void vluxseg5ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8m1))) +void vluxseg6ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8m1_m))) +void vluxseg6ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint8m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8mf2))) +void vluxseg6ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8mf2_m))) +void vluxseg6ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8mf4))) +void vluxseg6ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8mf4_m))) +void vluxseg6ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8mf8))) +void vluxseg6ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i8mf8_m))) +void vluxseg6ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8m1))) +void vluxseg6ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8m1_m))) +void vluxseg6ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint8m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8mf2))) +void vluxseg6ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8mf2_m))) +void vluxseg6ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8mf4))) +void vluxseg6ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8mf4_m))) +void vluxseg6ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8mf8))) +void vluxseg6ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u8mf8_m))) +void vluxseg6ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8m1))) +void vluxseg7ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8m1_m))) +void vluxseg7ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint8m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8mf2))) +void vluxseg7ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8mf2_m))) +void vluxseg7ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8mf4))) +void vluxseg7ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8mf4_m))) +void vluxseg7ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8mf8))) +void vluxseg7ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i8mf8_m))) +void vluxseg7ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8m1))) +void vluxseg7ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8m1_m))) +void vluxseg7ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint8m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8mf2))) +void vluxseg7ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8mf2_m))) +void vluxseg7ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8mf4))) +void vluxseg7ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8mf4_m))) +void vluxseg7ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8mf8))) +void vluxseg7ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u8mf8_m))) +void vluxseg7ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8m1))) +void vluxseg8ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8m1_m))) +void vluxseg8ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint8m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8mf2))) +void vluxseg8ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8mf2_m))) +void vluxseg8ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8mf4))) +void vluxseg8ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8mf4_m))) +void vluxseg8ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8mf8))) +void vluxseg8ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i8mf8_m))) +void vluxseg8ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8m1))) +void vluxseg8ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8m1_m))) +void vluxseg8ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint8m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8mf2))) +void vluxseg8ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8mf2_m))) +void vluxseg8ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8mf4))) +void vluxseg8ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8mf4_m))) +void vluxseg8ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8mf8))) +void vluxseg8ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u8mf8_m))) +void vluxseg8ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8m1))) +void vluxseg2ei16(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8m1_m))) +void vluxseg2ei16(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8m2))) +void vluxseg2ei16(vint8m2_t * op0, vint8m2_t * op1, const int8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8m2_m))) +void vluxseg2ei16(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8m4))) +void vluxseg2ei16(vint8m4_t * op0, vint8m4_t * op1, const int8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8m4_m))) +void vluxseg2ei16(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, vuint16m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8mf2))) +void vluxseg2ei16(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8mf2_m))) +void vluxseg2ei16(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8mf4))) +void vluxseg2ei16(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8mf4_m))) +void vluxseg2ei16(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8mf8))) +void vluxseg2ei16(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i8mf8_m))) +void vluxseg2ei16(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8m1))) +void vluxseg2ei16(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8m1_m))) +void vluxseg2ei16(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8m2))) +void vluxseg2ei16(vuint8m2_t * op0, vuint8m2_t * op1, const uint8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8m2_m))) +void vluxseg2ei16(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8m4))) +void vluxseg2ei16(vuint8m4_t * op0, vuint8m4_t * op1, const uint8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8m4_m))) +void vluxseg2ei16(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, vuint16m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8mf2))) +void vluxseg2ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8mf2_m))) +void vluxseg2ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8mf4))) +void vluxseg2ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8mf4_m))) +void vluxseg2ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8mf8))) +void vluxseg2ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u8mf8_m))) +void vluxseg2ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8m1))) +void vluxseg3ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8m1_m))) +void vluxseg3ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8m2))) +void vluxseg3ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, const int8_t * op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8m2_m))) +void vluxseg3ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, vuint16m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8mf2))) +void vluxseg3ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8mf2_m))) +void vluxseg3ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8mf4))) +void vluxseg3ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8mf4_m))) +void vluxseg3ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8mf8))) +void vluxseg3ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i8mf8_m))) +void vluxseg3ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8m1))) +void vluxseg3ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8m1_m))) +void vluxseg3ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8m2))) +void vluxseg3ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, const uint8_t * op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8m2_m))) +void vluxseg3ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, vuint16m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8mf2))) +void vluxseg3ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8mf2_m))) +void vluxseg3ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8mf4))) +void vluxseg3ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8mf4_m))) +void vluxseg3ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8mf8))) +void vluxseg3ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u8mf8_m))) +void vluxseg3ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8m1))) +void vluxseg4ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8m1_m))) +void vluxseg4ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8m2))) +void vluxseg4ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, const int8_t * op4, vuint16m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8m2_m))) +void vluxseg4ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, vuint16m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8mf2))) +void vluxseg4ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8mf2_m))) +void vluxseg4ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8mf4))) +void vluxseg4ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8mf4_m))) +void vluxseg4ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8mf8))) +void vluxseg4ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i8mf8_m))) +void vluxseg4ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8m1))) +void vluxseg4ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8m1_m))) +void vluxseg4ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8m2))) +void vluxseg4ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, const uint8_t * op4, vuint16m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8m2_m))) +void vluxseg4ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, vuint16m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8mf2))) +void vluxseg4ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8mf2_m))) +void vluxseg4ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8mf4))) +void vluxseg4ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8mf4_m))) +void vluxseg4ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8mf8))) +void vluxseg4ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u8mf8_m))) +void vluxseg4ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8m1))) +void vluxseg5ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8m1_m))) +void vluxseg5ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint16m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8mf2))) +void vluxseg5ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8mf2_m))) +void vluxseg5ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8mf4))) +void vluxseg5ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8mf4_m))) +void vluxseg5ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8mf8))) +void vluxseg5ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i8mf8_m))) +void vluxseg5ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8m1))) +void vluxseg5ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8m1_m))) +void vluxseg5ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint16m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8mf2))) +void vluxseg5ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8mf2_m))) +void vluxseg5ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8mf4))) +void vluxseg5ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8mf4_m))) +void vluxseg5ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8mf8))) +void vluxseg5ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u8mf8_m))) +void vluxseg5ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8m1))) +void vluxseg6ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint16m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8m1_m))) +void vluxseg6ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint16m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8mf2))) +void vluxseg6ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8mf2_m))) +void vluxseg6ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8mf4))) +void vluxseg6ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8mf4_m))) +void vluxseg6ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8mf8))) +void vluxseg6ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i8mf8_m))) +void vluxseg6ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8m1))) +void vluxseg6ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint16m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8m1_m))) +void vluxseg6ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint16m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8mf2))) +void vluxseg6ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8mf2_m))) +void vluxseg6ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8mf4))) +void vluxseg6ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8mf4_m))) +void vluxseg6ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8mf8))) +void vluxseg6ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u8mf8_m))) +void vluxseg6ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8m1))) +void vluxseg7ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8m1_m))) +void vluxseg7ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint16m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8mf2))) +void vluxseg7ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8mf2_m))) +void vluxseg7ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8mf4))) +void vluxseg7ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8mf4_m))) +void vluxseg7ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8mf8))) +void vluxseg7ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i8mf8_m))) +void vluxseg7ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8m1))) +void vluxseg7ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8m1_m))) +void vluxseg7ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint16m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8mf2))) +void vluxseg7ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8mf2_m))) +void vluxseg7ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8mf4))) +void vluxseg7ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8mf4_m))) +void vluxseg7ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8mf8))) +void vluxseg7ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u8mf8_m))) +void vluxseg7ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8m1))) +void vluxseg8ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint16m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8m1_m))) +void vluxseg8ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint16m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8mf2))) +void vluxseg8ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8mf2_m))) +void vluxseg8ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8mf4))) +void vluxseg8ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8mf4_m))) +void vluxseg8ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8mf8))) +void vluxseg8ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i8mf8_m))) +void vluxseg8ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8m1))) +void vluxseg8ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint16m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8m1_m))) +void vluxseg8ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint16m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8mf2))) +void vluxseg8ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8mf2_m))) +void vluxseg8ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8mf4))) +void vluxseg8ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8mf4_m))) +void vluxseg8ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8mf8))) +void vluxseg8ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u8mf8_m))) +void vluxseg8ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8m1))) +void vluxseg2ei32(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8m1_m))) +void vluxseg2ei32(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8m2))) +void vluxseg2ei32(vint8m2_t * op0, vint8m2_t * op1, const int8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8m2_m))) +void vluxseg2ei32(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8mf2))) +void vluxseg2ei32(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8mf2_m))) +void vluxseg2ei32(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8mf4))) +void vluxseg2ei32(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8mf4_m))) +void vluxseg2ei32(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8mf8))) +void vluxseg2ei32(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i8mf8_m))) +void vluxseg2ei32(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8m1))) +void vluxseg2ei32(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8m1_m))) +void vluxseg2ei32(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8m2))) +void vluxseg2ei32(vuint8m2_t * op0, vuint8m2_t * op1, const uint8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8m2_m))) +void vluxseg2ei32(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8mf2))) +void vluxseg2ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8mf2_m))) +void vluxseg2ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8mf4))) +void vluxseg2ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8mf4_m))) +void vluxseg2ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8mf8))) +void vluxseg2ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u8mf8_m))) +void vluxseg2ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8m1))) +void vluxseg3ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8m1_m))) +void vluxseg3ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8m2))) +void vluxseg3ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, const int8_t * op3, vuint32m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8m2_m))) +void vluxseg3ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, vuint32m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8mf2))) +void vluxseg3ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8mf2_m))) +void vluxseg3ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8mf4))) +void vluxseg3ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8mf4_m))) +void vluxseg3ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8mf8))) +void vluxseg3ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i8mf8_m))) +void vluxseg3ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8m1))) +void vluxseg3ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8m1_m))) +void vluxseg3ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8m2))) +void vluxseg3ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, const uint8_t * op3, vuint32m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8m2_m))) +void vluxseg3ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, vuint32m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8mf2))) +void vluxseg3ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8mf2_m))) +void vluxseg3ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8mf4))) +void vluxseg3ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8mf4_m))) +void vluxseg3ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8mf8))) +void vluxseg3ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u8mf8_m))) +void vluxseg3ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8m1))) +void vluxseg4ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8m1_m))) +void vluxseg4ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8m2))) +void vluxseg4ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, const int8_t * op4, vuint32m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8m2_m))) +void vluxseg4ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, vuint32m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8mf2))) +void vluxseg4ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8mf2_m))) +void vluxseg4ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8mf4))) +void vluxseg4ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8mf4_m))) +void vluxseg4ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8mf8))) +void vluxseg4ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i8mf8_m))) +void vluxseg4ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8m1))) +void vluxseg4ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8m1_m))) +void vluxseg4ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8m2))) +void vluxseg4ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, const uint8_t * op4, vuint32m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8m2_m))) +void vluxseg4ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, vuint32m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8mf2))) +void vluxseg4ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8mf2_m))) +void vluxseg4ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8mf4))) +void vluxseg4ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8mf4_m))) +void vluxseg4ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8mf8))) +void vluxseg4ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u8mf8_m))) +void vluxseg4ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8m1))) +void vluxseg5ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8m1_m))) +void vluxseg5ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint32m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8mf2))) +void vluxseg5ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8mf2_m))) +void vluxseg5ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8mf4))) +void vluxseg5ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8mf4_m))) +void vluxseg5ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8mf8))) +void vluxseg5ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i8mf8_m))) +void vluxseg5ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8m1))) +void vluxseg5ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8m1_m))) +void vluxseg5ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint32m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8mf2))) +void vluxseg5ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8mf2_m))) +void vluxseg5ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8mf4))) +void vluxseg5ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8mf4_m))) +void vluxseg5ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8mf8))) +void vluxseg5ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u8mf8_m))) +void vluxseg5ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8m1))) +void vluxseg6ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint32m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8m1_m))) +void vluxseg6ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint32m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8mf2))) +void vluxseg6ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8mf2_m))) +void vluxseg6ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8mf4))) +void vluxseg6ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8mf4_m))) +void vluxseg6ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8mf8))) +void vluxseg6ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i8mf8_m))) +void vluxseg6ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8m1))) +void vluxseg6ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint32m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8m1_m))) +void vluxseg6ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint32m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8mf2))) +void vluxseg6ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8mf2_m))) +void vluxseg6ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8mf4))) +void vluxseg6ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8mf4_m))) +void vluxseg6ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8mf8))) +void vluxseg6ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u8mf8_m))) +void vluxseg6ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8m1))) +void vluxseg7ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8m1_m))) +void vluxseg7ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint32m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8mf2))) +void vluxseg7ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8mf2_m))) +void vluxseg7ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8mf4))) +void vluxseg7ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8mf4_m))) +void vluxseg7ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8mf8))) +void vluxseg7ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i8mf8_m))) +void vluxseg7ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8m1))) +void vluxseg7ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8m1_m))) +void vluxseg7ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint32m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8mf2))) +void vluxseg7ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8mf2_m))) +void vluxseg7ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8mf4))) +void vluxseg7ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8mf4_m))) +void vluxseg7ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8mf8))) +void vluxseg7ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u8mf8_m))) +void vluxseg7ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8m1))) +void vluxseg8ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint32m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8m1_m))) +void vluxseg8ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint32m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8mf2))) +void vluxseg8ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8mf2_m))) +void vluxseg8ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8mf4))) +void vluxseg8ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8mf4_m))) +void vluxseg8ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8mf8))) +void vluxseg8ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i8mf8_m))) +void vluxseg8ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8m1))) +void vluxseg8ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint32m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8m1_m))) +void vluxseg8ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint32m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8mf2))) +void vluxseg8ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8mf2_m))) +void vluxseg8ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8mf4))) +void vluxseg8ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8mf4_m))) +void vluxseg8ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8mf8))) +void vluxseg8ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u8mf8_m))) +void vluxseg8ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8m1))) +void vluxseg2ei64(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8m1_m))) +void vluxseg2ei64(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8mf2))) +void vluxseg2ei64(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8mf2_m))) +void vluxseg2ei64(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8mf4))) +void vluxseg2ei64(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8mf4_m))) +void vluxseg2ei64(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8mf8))) +void vluxseg2ei64(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i8mf8_m))) +void vluxseg2ei64(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8m1))) +void vluxseg2ei64(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8m1_m))) +void vluxseg2ei64(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8mf2))) +void vluxseg2ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8mf2_m))) +void vluxseg2ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8mf4))) +void vluxseg2ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8mf4_m))) +void vluxseg2ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8mf8))) +void vluxseg2ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u8mf8_m))) +void vluxseg2ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8m1))) +void vluxseg3ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8m1_m))) +void vluxseg3ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8mf2))) +void vluxseg3ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8mf2_m))) +void vluxseg3ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8mf4))) +void vluxseg3ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8mf4_m))) +void vluxseg3ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8mf8))) +void vluxseg3ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i8mf8_m))) +void vluxseg3ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8m1))) +void vluxseg3ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8m1_m))) +void vluxseg3ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8mf2))) +void vluxseg3ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8mf2_m))) +void vluxseg3ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8mf4))) +void vluxseg3ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8mf4_m))) +void vluxseg3ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8mf8))) +void vluxseg3ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u8mf8_m))) +void vluxseg3ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8m1))) +void vluxseg4ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8m1_m))) +void vluxseg4ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8mf2))) +void vluxseg4ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8mf2_m))) +void vluxseg4ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8mf4))) +void vluxseg4ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8mf4_m))) +void vluxseg4ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8mf8))) +void vluxseg4ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i8mf8_m))) +void vluxseg4ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8m1))) +void vluxseg4ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8m1_m))) +void vluxseg4ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8mf2))) +void vluxseg4ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8mf2_m))) +void vluxseg4ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8mf4))) +void vluxseg4ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8mf4_m))) +void vluxseg4ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8mf8))) +void vluxseg4ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u8mf8_m))) +void vluxseg4ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8m1))) +void vluxseg5ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8m1_m))) +void vluxseg5ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint64m8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8mf2))) +void vluxseg5ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8mf2_m))) +void vluxseg5ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8mf4))) +void vluxseg5ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8mf4_m))) +void vluxseg5ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8mf8))) +void vluxseg5ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i8mf8_m))) +void vluxseg5ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8m1))) +void vluxseg5ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8m1_m))) +void vluxseg5ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint64m8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8mf2))) +void vluxseg5ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8mf2_m))) +void vluxseg5ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8mf4))) +void vluxseg5ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8mf4_m))) +void vluxseg5ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8mf8))) +void vluxseg5ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u8mf8_m))) +void vluxseg5ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8m1))) +void vluxseg6ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint64m8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8m1_m))) +void vluxseg6ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint64m8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8mf2))) +void vluxseg6ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8mf2_m))) +void vluxseg6ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8mf4))) +void vluxseg6ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8mf4_m))) +void vluxseg6ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8mf8))) +void vluxseg6ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i8mf8_m))) +void vluxseg6ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8m1))) +void vluxseg6ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint64m8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8m1_m))) +void vluxseg6ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint64m8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8mf2))) +void vluxseg6ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8mf2_m))) +void vluxseg6ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8mf4))) +void vluxseg6ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8mf4_m))) +void vluxseg6ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8mf8))) +void vluxseg6ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u8mf8_m))) +void vluxseg6ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8m1))) +void vluxseg7ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8m1_m))) +void vluxseg7ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint64m8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8mf2))) +void vluxseg7ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8mf2_m))) +void vluxseg7ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8mf4))) +void vluxseg7ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8mf4_m))) +void vluxseg7ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8mf8))) +void vluxseg7ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i8mf8_m))) +void vluxseg7ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8m1))) +void vluxseg7ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8m1_m))) +void vluxseg7ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint64m8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8mf2))) +void vluxseg7ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8mf2_m))) +void vluxseg7ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8mf4))) +void vluxseg7ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8mf4_m))) +void vluxseg7ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8mf8))) +void vluxseg7ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u8mf8_m))) +void vluxseg7ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8m1))) +void vluxseg8ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint64m8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8m1_m))) +void vluxseg8ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint64m8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8mf2))) +void vluxseg8ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8mf2_m))) +void vluxseg8ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8mf4))) +void vluxseg8ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8mf4_m))) +void vluxseg8ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8mf8))) +void vluxseg8ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i8mf8_m))) +void vluxseg8ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8m1))) +void vluxseg8ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint64m8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8m1_m))) +void vluxseg8ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint64m8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8mf2))) +void vluxseg8ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8mf2_m))) +void vluxseg8ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8mf4))) +void vluxseg8ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8mf4_m))) +void vluxseg8ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8mf8))) +void vluxseg8ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u8mf8_m))) +void vluxseg8ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16m1))) +void vluxseg2ei8(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16m1_m))) +void vluxseg2ei8(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16m2))) +void vluxseg2ei8(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16m2_m))) +void vluxseg2ei8(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16m4))) +void vluxseg2ei8(vint16m4_t * op0, vint16m4_t * op1, const int16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16m4_m))) +void vluxseg2ei8(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16mf2))) +void vluxseg2ei8(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16mf2_m))) +void vluxseg2ei8(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16mf4))) +void vluxseg2ei8(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i16mf4_m))) +void vluxseg2ei8(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16m1))) +void vluxseg2ei8(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16m1_m))) +void vluxseg2ei8(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16m2))) +void vluxseg2ei8(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16m2_m))) +void vluxseg2ei8(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16m4))) +void vluxseg2ei8(vuint16m4_t * op0, vuint16m4_t * op1, const uint16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16m4_m))) +void vluxseg2ei8(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16mf2))) +void vluxseg2ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16mf2_m))) +void vluxseg2ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16mf4))) +void vluxseg2ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u16mf4_m))) +void vluxseg2ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16m1))) +void vluxseg3ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16m1_m))) +void vluxseg3ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16m2))) +void vluxseg3ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16m2_m))) +void vluxseg3ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16mf2))) +void vluxseg3ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16mf2_m))) +void vluxseg3ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16mf4))) +void vluxseg3ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i16mf4_m))) +void vluxseg3ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16m1))) +void vluxseg3ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16m1_m))) +void vluxseg3ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16m2))) +void vluxseg3ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16m2_m))) +void vluxseg3ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16mf2))) +void vluxseg3ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16mf2_m))) +void vluxseg3ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16mf4))) +void vluxseg3ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u16mf4_m))) +void vluxseg3ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16m1))) +void vluxseg4ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16m1_m))) +void vluxseg4ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16m2))) +void vluxseg4ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16m2_m))) +void vluxseg4ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16mf2))) +void vluxseg4ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16mf2_m))) +void vluxseg4ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16mf4))) +void vluxseg4ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i16mf4_m))) +void vluxseg4ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16m1))) +void vluxseg4ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16m1_m))) +void vluxseg4ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16m2))) +void vluxseg4ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16m2_m))) +void vluxseg4ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16mf2))) +void vluxseg4ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16mf2_m))) +void vluxseg4ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16mf4))) +void vluxseg4ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u16mf4_m))) +void vluxseg4ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i16m1))) +void vluxseg5ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i16m1_m))) +void vluxseg5ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i16mf2))) +void vluxseg5ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i16mf2_m))) +void vluxseg5ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i16mf4))) +void vluxseg5ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i16mf4_m))) +void vluxseg5ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u16m1))) +void vluxseg5ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u16m1_m))) +void vluxseg5ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u16mf2))) +void vluxseg5ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u16mf2_m))) +void vluxseg5ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u16mf4))) +void vluxseg5ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u16mf4_m))) +void vluxseg5ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i16m1))) +void vluxseg6ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i16m1_m))) +void vluxseg6ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i16mf2))) +void vluxseg6ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i16mf2_m))) +void vluxseg6ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i16mf4))) +void vluxseg6ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i16mf4_m))) +void vluxseg6ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u16m1))) +void vluxseg6ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u16m1_m))) +void vluxseg6ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u16mf2))) +void vluxseg6ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u16mf2_m))) +void vluxseg6ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u16mf4))) +void vluxseg6ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u16mf4_m))) +void vluxseg6ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i16m1))) +void vluxseg7ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i16m1_m))) +void vluxseg7ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i16mf2))) +void vluxseg7ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i16mf2_m))) +void vluxseg7ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i16mf4))) +void vluxseg7ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i16mf4_m))) +void vluxseg7ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u16m1))) +void vluxseg7ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u16m1_m))) +void vluxseg7ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u16mf2))) +void vluxseg7ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u16mf2_m))) +void vluxseg7ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u16mf4))) +void vluxseg7ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u16mf4_m))) +void vluxseg7ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i16m1))) +void vluxseg8ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i16m1_m))) +void vluxseg8ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i16mf2))) +void vluxseg8ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i16mf2_m))) +void vluxseg8ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i16mf4))) +void vluxseg8ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i16mf4_m))) +void vluxseg8ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u16m1))) +void vluxseg8ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u16m1_m))) +void vluxseg8ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u16mf2))) +void vluxseg8ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u16mf2_m))) +void vluxseg8ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u16mf4))) +void vluxseg8ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u16mf4_m))) +void vluxseg8ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16m1))) +void vluxseg2ei16(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16m1_m))) +void vluxseg2ei16(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16m2))) +void vluxseg2ei16(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16m2_m))) +void vluxseg2ei16(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16m4))) +void vluxseg2ei16(vint16m4_t * op0, vint16m4_t * op1, const int16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16m4_m))) +void vluxseg2ei16(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16mf2))) +void vluxseg2ei16(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16mf2_m))) +void vluxseg2ei16(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16mf4))) +void vluxseg2ei16(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i16mf4_m))) +void vluxseg2ei16(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16m1))) +void vluxseg2ei16(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16m1_m))) +void vluxseg2ei16(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16m2))) +void vluxseg2ei16(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16m2_m))) +void vluxseg2ei16(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16m4))) +void vluxseg2ei16(vuint16m4_t * op0, vuint16m4_t * op1, const uint16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16m4_m))) +void vluxseg2ei16(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16mf2))) +void vluxseg2ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16mf2_m))) +void vluxseg2ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16mf4))) +void vluxseg2ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u16mf4_m))) +void vluxseg2ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16m1))) +void vluxseg3ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16m1_m))) +void vluxseg3ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16m2))) +void vluxseg3ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16m2_m))) +void vluxseg3ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16mf2))) +void vluxseg3ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16mf2_m))) +void vluxseg3ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16mf4))) +void vluxseg3ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i16mf4_m))) +void vluxseg3ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16m1))) +void vluxseg3ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16m1_m))) +void vluxseg3ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16m2))) +void vluxseg3ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16m2_m))) +void vluxseg3ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16mf2))) +void vluxseg3ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16mf2_m))) +void vluxseg3ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16mf4))) +void vluxseg3ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u16mf4_m))) +void vluxseg3ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16m1))) +void vluxseg4ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16m1_m))) +void vluxseg4ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16m2))) +void vluxseg4ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16m2_m))) +void vluxseg4ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16mf2))) +void vluxseg4ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16mf2_m))) +void vluxseg4ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16mf4))) +void vluxseg4ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i16mf4_m))) +void vluxseg4ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16m1))) +void vluxseg4ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16m1_m))) +void vluxseg4ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16m2))) +void vluxseg4ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16m2_m))) +void vluxseg4ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16mf2))) +void vluxseg4ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16mf2_m))) +void vluxseg4ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16mf4))) +void vluxseg4ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u16mf4_m))) +void vluxseg4ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i16m1))) +void vluxseg5ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i16m1_m))) +void vluxseg5ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i16mf2))) +void vluxseg5ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i16mf2_m))) +void vluxseg5ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i16mf4))) +void vluxseg5ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i16mf4_m))) +void vluxseg5ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u16m1))) +void vluxseg5ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u16m1_m))) +void vluxseg5ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u16mf2))) +void vluxseg5ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u16mf2_m))) +void vluxseg5ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u16mf4))) +void vluxseg5ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u16mf4_m))) +void vluxseg5ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i16m1))) +void vluxseg6ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i16m1_m))) +void vluxseg6ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i16mf2))) +void vluxseg6ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i16mf2_m))) +void vluxseg6ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i16mf4))) +void vluxseg6ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i16mf4_m))) +void vluxseg6ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u16m1))) +void vluxseg6ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u16m1_m))) +void vluxseg6ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u16mf2))) +void vluxseg6ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u16mf2_m))) +void vluxseg6ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u16mf4))) +void vluxseg6ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u16mf4_m))) +void vluxseg6ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i16m1))) +void vluxseg7ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i16m1_m))) +void vluxseg7ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i16mf2))) +void vluxseg7ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i16mf2_m))) +void vluxseg7ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i16mf4))) +void vluxseg7ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i16mf4_m))) +void vluxseg7ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u16m1))) +void vluxseg7ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u16m1_m))) +void vluxseg7ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u16mf2))) +void vluxseg7ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u16mf2_m))) +void vluxseg7ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u16mf4))) +void vluxseg7ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u16mf4_m))) +void vluxseg7ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i16m1))) +void vluxseg8ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i16m1_m))) +void vluxseg8ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i16mf2))) +void vluxseg8ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i16mf2_m))) +void vluxseg8ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i16mf4))) +void vluxseg8ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i16mf4_m))) +void vluxseg8ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u16m1))) +void vluxseg8ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u16m1_m))) +void vluxseg8ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u16mf2))) +void vluxseg8ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u16mf2_m))) +void vluxseg8ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u16mf4))) +void vluxseg8ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u16mf4_m))) +void vluxseg8ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16m1))) +void vluxseg2ei32(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16m1_m))) +void vluxseg2ei32(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16m2))) +void vluxseg2ei32(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16m2_m))) +void vluxseg2ei32(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16m4))) +void vluxseg2ei32(vint16m4_t * op0, vint16m4_t * op1, const int16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16m4_m))) +void vluxseg2ei32(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16mf2))) +void vluxseg2ei32(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16mf2_m))) +void vluxseg2ei32(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16mf4))) +void vluxseg2ei32(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i16mf4_m))) +void vluxseg2ei32(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16m1))) +void vluxseg2ei32(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16m1_m))) +void vluxseg2ei32(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16m2))) +void vluxseg2ei32(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16m2_m))) +void vluxseg2ei32(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16m4))) +void vluxseg2ei32(vuint16m4_t * op0, vuint16m4_t * op1, const uint16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16m4_m))) +void vluxseg2ei32(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16mf2))) +void vluxseg2ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16mf2_m))) +void vluxseg2ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16mf4))) +void vluxseg2ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u16mf4_m))) +void vluxseg2ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16m1))) +void vluxseg3ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16m1_m))) +void vluxseg3ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16m2))) +void vluxseg3ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16m2_m))) +void vluxseg3ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16mf2))) +void vluxseg3ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16mf2_m))) +void vluxseg3ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16mf4))) +void vluxseg3ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i16mf4_m))) +void vluxseg3ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16m1))) +void vluxseg3ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16m1_m))) +void vluxseg3ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16m2))) +void vluxseg3ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16m2_m))) +void vluxseg3ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16mf2))) +void vluxseg3ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16mf2_m))) +void vluxseg3ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16mf4))) +void vluxseg3ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u16mf4_m))) +void vluxseg3ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16m1))) +void vluxseg4ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16m1_m))) +void vluxseg4ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16m2))) +void vluxseg4ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16m2_m))) +void vluxseg4ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16mf2))) +void vluxseg4ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16mf2_m))) +void vluxseg4ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16mf4))) +void vluxseg4ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i16mf4_m))) +void vluxseg4ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16m1))) +void vluxseg4ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16m1_m))) +void vluxseg4ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16m2))) +void vluxseg4ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16m2_m))) +void vluxseg4ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16mf2))) +void vluxseg4ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16mf2_m))) +void vluxseg4ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16mf4))) +void vluxseg4ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u16mf4_m))) +void vluxseg4ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i16m1))) +void vluxseg5ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i16m1_m))) +void vluxseg5ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i16mf2))) +void vluxseg5ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i16mf2_m))) +void vluxseg5ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i16mf4))) +void vluxseg5ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i16mf4_m))) +void vluxseg5ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u16m1))) +void vluxseg5ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u16m1_m))) +void vluxseg5ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u16mf2))) +void vluxseg5ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u16mf2_m))) +void vluxseg5ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u16mf4))) +void vluxseg5ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u16mf4_m))) +void vluxseg5ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i16m1))) +void vluxseg6ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i16m1_m))) +void vluxseg6ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i16mf2))) +void vluxseg6ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i16mf2_m))) +void vluxseg6ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i16mf4))) +void vluxseg6ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i16mf4_m))) +void vluxseg6ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u16m1))) +void vluxseg6ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u16m1_m))) +void vluxseg6ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u16mf2))) +void vluxseg6ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u16mf2_m))) +void vluxseg6ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u16mf4))) +void vluxseg6ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u16mf4_m))) +void vluxseg6ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i16m1))) +void vluxseg7ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i16m1_m))) +void vluxseg7ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i16mf2))) +void vluxseg7ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i16mf2_m))) +void vluxseg7ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i16mf4))) +void vluxseg7ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i16mf4_m))) +void vluxseg7ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u16m1))) +void vluxseg7ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u16m1_m))) +void vluxseg7ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u16mf2))) +void vluxseg7ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u16mf2_m))) +void vluxseg7ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u16mf4))) +void vluxseg7ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u16mf4_m))) +void vluxseg7ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i16m1))) +void vluxseg8ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i16m1_m))) +void vluxseg8ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i16mf2))) +void vluxseg8ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i16mf2_m))) +void vluxseg8ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i16mf4))) +void vluxseg8ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i16mf4_m))) +void vluxseg8ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8m1))) +void vssseg2e8(int8_t * op0, ptrdiff_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8m1_m))) +void vssseg2e8(vbool8_t op0, int8_t * op1, ptrdiff_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8m2))) +void vssseg2e8(int8_t * op0, ptrdiff_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8m2_m))) +void vssseg2e8(vbool4_t op0, int8_t * op1, ptrdiff_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8m4))) +void vssseg2e8(int8_t * op0, ptrdiff_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8m4_m))) +void vssseg2e8(vbool2_t op0, int8_t * op1, ptrdiff_t op2, vint8m4_t op3, vint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8mf2))) +void vssseg2e8(int8_t * op0, ptrdiff_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8mf2_m))) +void vssseg2e8(vbool16_t op0, int8_t * op1, ptrdiff_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8mf4))) +void vssseg2e8(int8_t * op0, ptrdiff_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8mf4_m))) +void vssseg2e8(vbool32_t op0, int8_t * op1, ptrdiff_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8mf8))) +void vssseg2e8(int8_t * op0, ptrdiff_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_i8mf8_m))) +void vssseg2e8(vbool64_t op0, int8_t * op1, ptrdiff_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u16m1))) +void vluxseg8ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u16m1_m))) +void vluxseg8ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u16mf2))) +void vluxseg8ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u16mf2_m))) +void vluxseg8ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u16mf4))) +void vluxseg8ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u16mf4_m))) +void vluxseg8ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16m1))) +void vluxseg2ei64(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16m1_m))) +void vluxseg2ei64(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16m2))) +void vluxseg2ei64(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16m2_m))) +void vluxseg2ei64(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16mf2))) +void vluxseg2ei64(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16mf2_m))) +void vluxseg2ei64(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16mf4))) +void vluxseg2ei64(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i16mf4_m))) +void vluxseg2ei64(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16m1))) +void vluxseg2ei64(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16m1_m))) +void vluxseg2ei64(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16m2))) +void vluxseg2ei64(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16m2_m))) +void vluxseg2ei64(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16mf2))) +void vluxseg2ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16mf2_m))) +void vluxseg2ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16mf4))) +void vluxseg2ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u16mf4_m))) +void vluxseg2ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16m1))) +void vluxseg3ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16m1_m))) +void vluxseg3ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16m2))) +void vluxseg3ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16m2_m))) +void vluxseg3ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16mf2))) +void vluxseg3ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16mf2_m))) +void vluxseg3ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16mf4))) +void vluxseg3ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i16mf4_m))) +void vluxseg3ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16m1))) +void vluxseg3ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16m1_m))) +void vluxseg3ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16m2))) +void vluxseg3ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16m2_m))) +void vluxseg3ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16mf2))) +void vluxseg3ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16mf2_m))) +void vluxseg3ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16mf4))) +void vluxseg3ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u16mf4_m))) +void vluxseg3ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16m1))) +void vluxseg4ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16m1_m))) +void vluxseg4ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16m2))) +void vluxseg4ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16m2_m))) +void vluxseg4ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16mf2))) +void vluxseg4ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16mf2_m))) +void vluxseg4ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16mf4))) +void vluxseg4ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i16mf4_m))) +void vluxseg4ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16m1))) +void vluxseg4ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16m1_m))) +void vluxseg4ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16m2))) +void vluxseg4ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16m2_m))) +void vluxseg4ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16mf2))) +void vluxseg4ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16mf2_m))) +void vluxseg4ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16mf4))) +void vluxseg4ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u16mf4_m))) +void vluxseg4ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i16m1))) +void vluxseg5ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i16m1_m))) +void vluxseg5ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i16mf2))) +void vluxseg5ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i16mf2_m))) +void vluxseg5ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i16mf4))) +void vluxseg5ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i16mf4_m))) +void vluxseg5ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u16m1))) +void vluxseg5ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u16m1_m))) +void vluxseg5ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u16mf2))) +void vluxseg5ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u16mf2_m))) +void vluxseg5ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u16mf4))) +void vluxseg5ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u16mf4_m))) +void vluxseg5ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i16m1))) +void vluxseg6ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i16m1_m))) +void vluxseg6ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i16mf2))) +void vluxseg6ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i16mf2_m))) +void vluxseg6ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i16mf4))) +void vluxseg6ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i16mf4_m))) +void vluxseg6ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8m1))) +void vssseg2e8(uint8_t * op0, ptrdiff_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8m1_m))) +void vssseg2e8(vbool8_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8m2))) +void vssseg2e8(uint8_t * op0, ptrdiff_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8m2_m))) +void vssseg2e8(vbool4_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8m4))) +void vssseg2e8(uint8_t * op0, ptrdiff_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8m4_m))) +void vssseg2e8(vbool2_t op0, uint8_t * op1, ptrdiff_t op2, vuint8m4_t op3, vuint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8mf2))) +void vssseg2e8(uint8_t * op0, ptrdiff_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8mf2_m))) +void vssseg2e8(vbool16_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8mf4))) +void vssseg2e8(uint8_t * op0, ptrdiff_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8mf4_m))) +void vssseg2e8(vbool32_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8mf8))) +void vssseg2e8(uint8_t * op0, ptrdiff_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e8_v_u8mf8_m))) +void vssseg2e8(vbool64_t op0, uint8_t * op1, ptrdiff_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u16m1))) +void vluxseg6ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u16m1_m))) +void vluxseg6ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u16mf2))) +void vluxseg6ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u16mf2_m))) +void vluxseg6ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u16mf4))) +void vluxseg6ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u16mf4_m))) +void vluxseg6ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i16m1))) +void vluxseg7ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i16m1_m))) +void vluxseg7ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i16mf2))) +void vluxseg7ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i16mf2_m))) +void vluxseg7ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i16mf4))) +void vluxseg7ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i16mf4_m))) +void vluxseg7ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u16m1))) +void vluxseg7ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u16m1_m))) +void vluxseg7ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u16mf2))) +void vluxseg7ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u16mf2_m))) +void vluxseg7ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u16mf4))) +void vluxseg7ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u16mf4_m))) +void vluxseg7ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i16m1))) +void vluxseg8ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i16m1_m))) +void vluxseg8ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i16mf2))) +void vluxseg8ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i16mf2_m))) +void vluxseg8ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i16mf4))) +void vluxseg8ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i16mf4_m))) +void vluxseg8ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u16m1))) +void vluxseg8ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u16m1_m))) +void vluxseg8ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u16mf2))) +void vluxseg8ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u16mf2_m))) +void vluxseg8ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u16mf4))) +void vluxseg8ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u16mf4_m))) +void vluxseg8ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32m1))) +void vluxseg2ei8(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32m1_m))) +void vluxseg2ei8(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32m2))) +void vluxseg2ei8(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32m2_m))) +void vluxseg2ei8(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32m4))) +void vluxseg2ei8(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32m4_m))) +void vluxseg2ei8(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32mf2))) +void vluxseg2ei8(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i32mf2_m))) +void vluxseg2ei8(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32m1))) +void vluxseg2ei8(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32m1_m))) +void vluxseg2ei8(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32m2))) +void vluxseg2ei8(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32m2_m))) +void vluxseg2ei8(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32m4))) +void vluxseg2ei8(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32m4_m))) +void vluxseg2ei8(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32mf2))) +void vluxseg2ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u32mf2_m))) +void vluxseg2ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i32m1))) +void vluxseg3ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i32m1_m))) +void vluxseg3ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i32m2))) +void vluxseg3ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i32m2_m))) +void vluxseg3ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i32mf2))) +void vluxseg3ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i32mf2_m))) +void vluxseg3ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u32m1))) +void vluxseg3ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u32m1_m))) +void vluxseg3ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u32m2))) +void vluxseg3ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u32m2_m))) +void vluxseg3ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u32mf2))) +void vluxseg3ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u32mf2_m))) +void vluxseg3ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i32m1))) +void vluxseg4ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i32m1_m))) +void vluxseg4ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i32m2))) +void vluxseg4ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i32m2_m))) +void vluxseg4ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i32mf2))) +void vluxseg4ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i32mf2_m))) +void vluxseg4ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8m1))) +void vsuxseg2ei8(int8_t * op0, vuint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8m1_m))) +void vsuxseg2ei8(vbool8_t op0, int8_t * op1, vuint8m1_t op2, vint8m1_t op3, vint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8m2))) +void vsuxseg2ei8(int8_t * op0, vuint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8m2_m))) +void vsuxseg2ei8(vbool4_t op0, int8_t * op1, vuint8m2_t op2, vint8m2_t op3, vint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8m4))) +void vsuxseg2ei8(int8_t * op0, vuint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8m4_m))) +void vsuxseg2ei8(vbool2_t op0, int8_t * op1, vuint8m4_t op2, vint8m4_t op3, vint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8mf2))) +void vsuxseg2ei8(int8_t * op0, vuint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8mf2_m))) +void vsuxseg2ei8(vbool16_t op0, int8_t * op1, vuint8mf2_t op2, vint8mf2_t op3, vint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8mf4))) +void vsuxseg2ei8(int8_t * op0, vuint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8mf4_m))) +void vsuxseg2ei8(vbool32_t op0, int8_t * op1, vuint8mf4_t op2, vint8mf4_t op3, vint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8mf8))) +void vsuxseg2ei8(int8_t * op0, vuint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_i8mf8_m))) +void vsuxseg2ei8(vbool64_t op0, int8_t * op1, vuint8mf8_t op2, vint8mf8_t op3, vint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u32m1))) +void vluxseg4ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u32m1_m))) +void vluxseg4ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u32m2))) +void vluxseg4ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u32m2_m))) +void vluxseg4ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u32mf2))) +void vluxseg4ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u32mf2_m))) +void vluxseg4ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i32m1))) +void vluxseg5ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i32m1_m))) +void vluxseg5ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i32mf2))) +void vluxseg5ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i32mf2_m))) +void vluxseg5ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u32m1))) +void vluxseg5ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u32m1_m))) +void vluxseg5ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u32mf2))) +void vluxseg5ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u32mf2_m))) +void vluxseg5ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i32m1))) +void vluxseg6ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i32m1_m))) +void vluxseg6ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i32mf2))) +void vluxseg6ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i32mf2_m))) +void vluxseg6ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u32m1))) +void vluxseg6ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u32m1_m))) +void vluxseg6ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u32mf2))) +void vluxseg6ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u32mf2_m))) +void vluxseg6ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i32m1))) +void vluxseg7ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i32m1_m))) +void vluxseg7ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i32mf2))) +void vluxseg7ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i32mf2_m))) +void vluxseg7ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u32m1))) +void vluxseg7ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u32m1_m))) +void vluxseg7ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u32mf2))) +void vluxseg7ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u32mf2_m))) +void vluxseg7ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i32m1))) +void vluxseg8ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i32m1_m))) +void vluxseg8ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i32mf2))) +void vluxseg8ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i32mf2_m))) +void vluxseg8ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u32m1))) +void vluxseg8ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u32m1_m))) +void vluxseg8ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u32mf2))) +void vluxseg8ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u32mf2_m))) +void vluxseg8ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32m1))) +void vluxseg2ei16(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32m1_m))) +void vluxseg2ei16(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32m2))) +void vluxseg2ei16(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32m2_m))) +void vluxseg2ei16(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32m4))) +void vluxseg2ei16(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32m4_m))) +void vluxseg2ei16(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32mf2))) +void vluxseg2ei16(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i32mf2_m))) +void vluxseg2ei16(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8m1))) +void vsuxseg2ei8(uint8_t * op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8m1_m))) +void vsuxseg2ei8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, vuint8m1_t op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8m2))) +void vsuxseg2ei8(uint8_t * op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8m2_m))) +void vsuxseg2ei8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, vuint8m2_t op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8m4))) +void vsuxseg2ei8(uint8_t * op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8m4_m))) +void vsuxseg2ei8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, vuint8m4_t op3, vuint8m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8mf2))) +void vsuxseg2ei8(uint8_t * op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8mf2_m))) +void vsuxseg2ei8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, vuint8mf2_t op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8mf4))) +void vsuxseg2ei8(uint8_t * op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8mf4_m))) +void vsuxseg2ei8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, vuint8mf4_t op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8mf8))) +void vsuxseg2ei8(uint8_t * op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_u8mf8_m))) +void vsuxseg2ei8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, vuint8mf8_t op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32m1))) +void vluxseg2ei16(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32m1_m))) +void vluxseg2ei16(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32m2))) +void vluxseg2ei16(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32m2_m))) +void vluxseg2ei16(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32m4))) +void vluxseg2ei16(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32m4_m))) +void vluxseg2ei16(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32mf2))) +void vluxseg2ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u32mf2_m))) +void vluxseg2ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i32m1))) +void vluxseg3ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i32m1_m))) +void vluxseg3ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i32m2))) +void vluxseg3ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i32m2_m))) +void vluxseg3ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i32mf2))) +void vluxseg3ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i32mf2_m))) +void vluxseg3ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u32m1))) +void vluxseg3ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u32m1_m))) +void vluxseg3ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u32m2))) +void vluxseg3ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u32m2_m))) +void vluxseg3ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u32mf2))) +void vluxseg3ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u32mf2_m))) +void vluxseg3ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i32m1))) +void vluxseg4ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i32m1_m))) +void vluxseg4ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i32m2))) +void vluxseg4ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i32m2_m))) +void vluxseg4ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i32mf2))) +void vluxseg4ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i32mf2_m))) +void vluxseg4ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u32m1))) +void vluxseg4ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u32m1_m))) +void vluxseg4ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u32m2))) +void vluxseg4ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u32m2_m))) +void vluxseg4ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u32mf2))) +void vluxseg4ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u32mf2_m))) +void vluxseg4ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i32m1))) +void vluxseg5ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i32m1_m))) +void vluxseg5ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i32mf2))) +void vluxseg5ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i32mf2_m))) +void vluxseg5ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u32m1))) +void vluxseg5ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u32m1_m))) +void vluxseg5ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u32mf2))) +void vluxseg5ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u32mf2_m))) +void vluxseg5ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i32m1))) +void vluxseg6ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i32m1_m))) +void vluxseg6ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i32mf2))) +void vluxseg6ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i32mf2_m))) +void vluxseg6ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u32m1))) +void vluxseg6ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u32m1_m))) +void vluxseg6ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u32mf2))) +void vluxseg6ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u32mf2_m))) +void vluxseg6ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i32m1))) +void vluxseg7ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i32m1_m))) +void vluxseg7ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i32mf2))) +void vluxseg7ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i32mf2_m))) +void vluxseg7ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u32m1))) +void vluxseg7ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u32m1_m))) +void vluxseg7ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u32mf2))) +void vluxseg7ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u32mf2_m))) +void vluxseg7ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i32m1))) +void vluxseg8ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i32m1_m))) +void vluxseg8ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i32mf2))) +void vluxseg8ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i32mf2_m))) +void vluxseg8ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u32m1))) +void vluxseg8ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u32m1_m))) +void vluxseg8ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u32mf2))) +void vluxseg8ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u32mf2_m))) +void vluxseg8ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32m1))) +void vluxseg2ei32(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32m1_m))) +void vluxseg2ei32(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32m2))) +void vluxseg2ei32(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32m2_m))) +void vluxseg2ei32(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32m4))) +void vluxseg2ei32(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32m4_m))) +void vluxseg2ei32(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32mf2))) +void vluxseg2ei32(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i32mf2_m))) +void vluxseg2ei32(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32m1))) +void vluxseg2ei32(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32m1_m))) +void vluxseg2ei32(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32m2))) +void vluxseg2ei32(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32m2_m))) +void vluxseg2ei32(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32m4))) +void vluxseg2ei32(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32m4_m))) +void vluxseg2ei32(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32mf2))) +void vluxseg2ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u32mf2_m))) +void vluxseg2ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i32m1))) +void vluxseg3ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i32m1_m))) +void vluxseg3ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i32m2))) +void vluxseg3ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i32m2_m))) +void vluxseg3ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i32mf2))) +void vluxseg3ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i32mf2_m))) +void vluxseg3ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u32m1))) +void vluxseg3ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u32m1_m))) +void vluxseg3ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u32m2))) +void vluxseg3ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u32m2_m))) +void vluxseg3ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u32mf2))) +void vluxseg3ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u32mf2_m))) +void vluxseg3ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i32m1))) +void vluxseg4ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i32m1_m))) +void vluxseg4ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i32m2))) +void vluxseg4ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i32m2_m))) +void vluxseg4ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i32mf2))) +void vluxseg4ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i32mf2_m))) +void vluxseg4ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u32m1))) +void vluxseg4ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u32m1_m))) +void vluxseg4ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u32m2))) +void vluxseg4ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u32m2_m))) +void vluxseg4ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u32mf2))) +void vluxseg4ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u32mf2_m))) +void vluxseg4ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i32m1))) +void vluxseg5ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i32m1_m))) +void vluxseg5ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i32mf2))) +void vluxseg5ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i32mf2_m))) +void vluxseg5ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u32m1))) +void vluxseg5ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u32m1_m))) +void vluxseg5ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u32mf2))) +void vluxseg5ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u32mf2_m))) +void vluxseg5ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i32m1))) +void vluxseg6ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i32m1_m))) +void vluxseg6ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i32mf2))) +void vluxseg6ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i32mf2_m))) +void vluxseg6ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u32m1))) +void vluxseg6ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u32m1_m))) +void vluxseg6ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u32mf2))) +void vluxseg6ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u32mf2_m))) +void vluxseg6ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i32m1))) +void vluxseg7ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i32m1_m))) +void vluxseg7ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i32mf2))) +void vluxseg7ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i32mf2_m))) +void vluxseg7ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u32m1))) +void vluxseg7ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u32m1_m))) +void vluxseg7ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u32mf2))) +void vluxseg7ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u32mf2_m))) +void vluxseg7ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i32m1))) +void vluxseg8ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i32m1_m))) +void vluxseg8ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i32mf2))) +void vluxseg8ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i32mf2_m))) +void vluxseg8ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u32m1))) +void vluxseg8ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u32m1_m))) +void vluxseg8ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u32mf2))) +void vluxseg8ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u32mf2_m))) +void vluxseg8ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32m1))) +void vluxseg2ei64(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32m1_m))) +void vluxseg2ei64(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32m2))) +void vluxseg2ei64(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32m2_m))) +void vluxseg2ei64(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32m4))) +void vluxseg2ei64(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32m4_m))) +void vluxseg2ei64(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32mf2))) +void vluxseg2ei64(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i32mf2_m))) +void vluxseg2ei64(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32m1))) +void vluxseg2ei64(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32m1_m))) +void vluxseg2ei64(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32m2))) +void vluxseg2ei64(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32m2_m))) +void vluxseg2ei64(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32m4))) +void vluxseg2ei64(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32m4_m))) +void vluxseg2ei64(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32mf2))) +void vluxseg2ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u32mf2_m))) +void vluxseg2ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i32m1))) +void vluxseg3ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i32m1_m))) +void vluxseg3ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i32m2))) +void vluxseg3ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i32m2_m))) +void vluxseg3ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i32mf2))) +void vluxseg3ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i32mf2_m))) +void vluxseg3ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u32m1))) +void vluxseg3ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u32m1_m))) +void vluxseg3ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u32m2))) +void vluxseg3ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u32m2_m))) +void vluxseg3ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u32mf2))) +void vluxseg3ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u32mf2_m))) +void vluxseg3ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i32m1))) +void vluxseg4ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i32m1_m))) +void vluxseg4ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i32m2))) +void vluxseg4ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i32m2_m))) +void vluxseg4ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i32mf2))) +void vluxseg4ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i32mf2_m))) +void vluxseg4ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u32m1))) +void vluxseg4ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u32m1_m))) +void vluxseg4ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u32m2))) +void vluxseg4ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u32m2_m))) +void vluxseg4ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u32mf2))) +void vluxseg4ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u32mf2_m))) +void vluxseg4ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i32m1))) +void vluxseg5ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i32m1_m))) +void vluxseg5ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i32mf2))) +void vluxseg5ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i32mf2_m))) +void vluxseg5ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u32m1))) +void vluxseg5ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u32m1_m))) +void vluxseg5ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u32mf2))) +void vluxseg5ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u32mf2_m))) +void vluxseg5ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i32m1))) +void vluxseg6ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i32m1_m))) +void vluxseg6ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i32mf2))) +void vluxseg6ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i32mf2_m))) +void vluxseg6ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u32m1))) +void vluxseg6ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u32m1_m))) +void vluxseg6ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u32mf2))) +void vluxseg6ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u32mf2_m))) +void vluxseg6ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i32m1))) +void vluxseg7ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i32m1_m))) +void vluxseg7ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i32mf2))) +void vluxseg7ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i32mf2_m))) +void vluxseg7ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u32m1))) +void vluxseg7ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u32m1_m))) +void vluxseg7ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u32mf2))) +void vluxseg7ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u32mf2_m))) +void vluxseg7ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i32m1))) +void vluxseg8ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i32m1_m))) +void vluxseg8ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i32mf2))) +void vluxseg8ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i32mf2_m))) +void vluxseg8ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u32m1))) +void vluxseg8ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u32m1_m))) +void vluxseg8ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u32mf2))) +void vluxseg8ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u32mf2_m))) +void vluxseg8ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i64m1))) +void vluxseg2ei8(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i64m1_m))) +void vluxseg2ei8(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i64m2))) +void vluxseg2ei8(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i64m2_m))) +void vluxseg2ei8(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i64m4))) +void vluxseg2ei8(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_i64m4_m))) +void vluxseg2ei8(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u64m1))) +void vluxseg2ei8(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u64m1_m))) +void vluxseg2ei8(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u64m2))) +void vluxseg2ei8(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u64m2_m))) +void vluxseg2ei8(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u64m4))) +void vluxseg2ei8(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_u64m4_m))) +void vluxseg2ei8(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i64m1))) +void vluxseg3ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i64m1_m))) +void vluxseg3ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i64m2))) +void vluxseg3ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_i64m2_m))) +void vluxseg3ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u64m1))) +void vluxseg3ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u64m1_m))) +void vluxseg3ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u64m2))) +void vluxseg3ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_u64m2_m))) +void vluxseg3ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i64m1))) +void vluxseg4ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i64m1_m))) +void vluxseg4ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i64m2))) +void vluxseg4ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_i64m2_m))) +void vluxseg4ei8(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u64m1))) +void vluxseg4ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u64m1_m))) +void vluxseg4ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u64m2))) +void vluxseg4ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_u64m2_m))) +void vluxseg4ei8(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i64m1))) +void vluxseg5ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_i64m1_m))) +void vluxseg5ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u64m1))) +void vluxseg5ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_u64m1_m))) +void vluxseg5ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i64m1))) +void vluxseg6ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_i64m1_m))) +void vluxseg6ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u64m1))) +void vluxseg6ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_u64m1_m))) +void vluxseg6ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i64m1))) +void vluxseg7ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_i64m1_m))) +void vluxseg7ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u64m1))) +void vluxseg7ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_u64m1_m))) +void vluxseg7ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i64m1))) +void vluxseg8ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_i64m1_m))) +void vluxseg8ei8(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u64m1))) +void vluxseg8ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_u64m1_m))) +void vluxseg8ei8(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i64m1))) +void vluxseg2ei16(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i64m1_m))) +void vluxseg2ei16(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i64m2))) +void vluxseg2ei16(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i64m2_m))) +void vluxseg2ei16(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i64m4))) +void vluxseg2ei16(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_i64m4_m))) +void vluxseg2ei16(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u64m1))) +void vluxseg2ei16(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u64m1_m))) +void vluxseg2ei16(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u64m2))) +void vluxseg2ei16(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u64m2_m))) +void vluxseg2ei16(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u64m4))) +void vluxseg2ei16(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_u64m4_m))) +void vluxseg2ei16(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i64m1))) +void vluxseg3ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i64m1_m))) +void vluxseg3ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i64m2))) +void vluxseg3ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_i64m2_m))) +void vluxseg3ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u64m1))) +void vluxseg3ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u64m1_m))) +void vluxseg3ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u64m2))) +void vluxseg3ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_u64m2_m))) +void vluxseg3ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i64m1))) +void vluxseg4ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i64m1_m))) +void vluxseg4ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i64m2))) +void vluxseg4ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_i64m2_m))) +void vluxseg4ei16(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u64m1))) +void vluxseg4ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u64m1_m))) +void vluxseg4ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u64m2))) +void vluxseg4ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_u64m2_m))) +void vluxseg4ei16(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i64m1))) +void vluxseg5ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_i64m1_m))) +void vluxseg5ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u64m1))) +void vluxseg5ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_u64m1_m))) +void vluxseg5ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i64m1))) +void vluxseg6ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_i64m1_m))) +void vluxseg6ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u64m1))) +void vluxseg6ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_u64m1_m))) +void vluxseg6ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i64m1))) +void vluxseg7ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_i64m1_m))) +void vluxseg7ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u64m1))) +void vluxseg7ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_u64m1_m))) +void vluxseg7ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i64m1))) +void vluxseg8ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_i64m1_m))) +void vluxseg8ei16(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u64m1))) +void vluxseg8ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_u64m1_m))) +void vluxseg8ei16(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i64m1))) +void vluxseg2ei32(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i64m1_m))) +void vluxseg2ei32(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i64m2))) +void vluxseg2ei32(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i64m2_m))) +void vluxseg2ei32(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i64m4))) +void vluxseg2ei32(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_i64m4_m))) +void vluxseg2ei32(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u64m1))) +void vluxseg2ei32(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u64m1_m))) +void vluxseg2ei32(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u64m2))) +void vluxseg2ei32(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u64m2_m))) +void vluxseg2ei32(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u64m4))) +void vluxseg2ei32(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_u64m4_m))) +void vluxseg2ei32(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i64m1))) +void vluxseg3ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i64m1_m))) +void vluxseg3ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i64m2))) +void vluxseg3ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_i64m2_m))) +void vluxseg3ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u64m1))) +void vluxseg3ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u64m1_m))) +void vluxseg3ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u64m2))) +void vluxseg3ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_u64m2_m))) +void vluxseg3ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i64m1))) +void vluxseg4ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i64m1_m))) +void vluxseg4ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i64m2))) +void vluxseg4ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_i64m2_m))) +void vluxseg4ei32(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u64m1))) +void vluxseg4ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u64m1_m))) +void vluxseg4ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u64m2))) +void vluxseg4ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_u64m2_m))) +void vluxseg4ei32(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i64m1))) +void vluxseg5ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_i64m1_m))) +void vluxseg5ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u64m1))) +void vluxseg5ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_u64m1_m))) +void vluxseg5ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i64m1))) +void vluxseg6ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_i64m1_m))) +void vluxseg6ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u64m1))) +void vluxseg6ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_u64m1_m))) +void vluxseg6ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i64m1))) +void vluxseg7ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_i64m1_m))) +void vluxseg7ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u64m1))) +void vluxseg7ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_u64m1_m))) +void vluxseg7ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i64m1))) +void vluxseg8ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_i64m1_m))) +void vluxseg8ei32(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u64m1))) +void vluxseg8ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_u64m1_m))) +void vluxseg8ei32(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i64m1))) +void vluxseg2ei64(vint64m1_t * op0, vint64m1_t * op1, const int64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i64m1_m))) +void vluxseg2ei64(vint64m1_t * op0, vint64m1_t * op1, vbool64_t op2, vint64m1_t op3, vint64m1_t op4, const int64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i64m2))) +void vluxseg2ei64(vint64m2_t * op0, vint64m2_t * op1, const int64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i64m2_m))) +void vluxseg2ei64(vint64m2_t * op0, vint64m2_t * op1, vbool32_t op2, vint64m2_t op3, vint64m2_t op4, const int64_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i64m4))) +void vluxseg2ei64(vint64m4_t * op0, vint64m4_t * op1, const int64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_i64m4_m))) +void vluxseg2ei64(vint64m4_t * op0, vint64m4_t * op1, vbool16_t op2, vint64m4_t op3, vint64m4_t op4, const int64_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u64m1))) +void vluxseg2ei64(vuint64m1_t * op0, vuint64m1_t * op1, const uint64_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u64m1_m))) +void vluxseg2ei64(vuint64m1_t * op0, vuint64m1_t * op1, vbool64_t op2, vuint64m1_t op3, vuint64m1_t op4, const uint64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u64m2))) +void vluxseg2ei64(vuint64m2_t * op0, vuint64m2_t * op1, const uint64_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u64m2_m))) +void vluxseg2ei64(vuint64m2_t * op0, vuint64m2_t * op1, vbool32_t op2, vuint64m2_t op3, vuint64m2_t op4, const uint64_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u64m4))) +void vluxseg2ei64(vuint64m4_t * op0, vuint64m4_t * op1, const uint64_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_u64m4_m))) +void vluxseg2ei64(vuint64m4_t * op0, vuint64m4_t * op1, vbool16_t op2, vuint64m4_t op3, vuint64m4_t op4, const uint64_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i64m1))) +void vluxseg3ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, const int64_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i64m1_m))) +void vluxseg3ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vbool64_t op3, vint64m1_t op4, vint64m1_t op5, vint64m1_t op6, const int64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i64m2))) +void vluxseg3ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, const int64_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_i64m2_m))) +void vluxseg3ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vbool32_t op3, vint64m2_t op4, vint64m2_t op5, vint64m2_t op6, const int64_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u64m1))) +void vluxseg3ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, const uint64_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u64m1_m))) +void vluxseg3ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vbool64_t op3, vuint64m1_t op4, vuint64m1_t op5, vuint64m1_t op6, const uint64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u64m2))) +void vluxseg3ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, const uint64_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_u64m2_m))) +void vluxseg3ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vbool32_t op3, vuint64m2_t op4, vuint64m2_t op5, vuint64m2_t op6, const uint64_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i64m1))) +void vluxseg4ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, const int64_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i64m1_m))) +void vluxseg4ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vbool64_t op4, vint64m1_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, const int64_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i64m2))) +void vluxseg4ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, const int64_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_i64m2_m))) +void vluxseg4ei64(vint64m2_t * op0, vint64m2_t * op1, vint64m2_t * op2, vint64m2_t * op3, vbool32_t op4, vint64m2_t op5, vint64m2_t op6, vint64m2_t op7, vint64m2_t op8, const int64_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u64m1))) +void vluxseg4ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, const uint64_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u64m1_m))) +void vluxseg4ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vbool64_t op4, vuint64m1_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, const uint64_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u64m2))) +void vluxseg4ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, const uint64_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_u64m2_m))) +void vluxseg4ei64(vuint64m2_t * op0, vuint64m2_t * op1, vuint64m2_t * op2, vuint64m2_t * op3, vbool32_t op4, vuint64m2_t op5, vuint64m2_t op6, vuint64m2_t op7, vuint64m2_t op8, const uint64_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i64m1))) +void vluxseg5ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, const int64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_i64m1_m))) +void vluxseg5ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vbool64_t op5, vint64m1_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, const int64_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u64m1))) +void vluxseg5ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, const uint64_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_u64m1_m))) +void vluxseg5ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vbool64_t op5, vuint64m1_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, const uint64_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i64m1))) +void vluxseg6ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, const int64_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_i64m1_m))) +void vluxseg6ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vbool64_t op6, vint64m1_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, const int64_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u64m1))) +void vluxseg6ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, const uint64_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_u64m1_m))) +void vluxseg6ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vbool64_t op6, vuint64m1_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, const uint64_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i64m1))) +void vluxseg7ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, const int64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_i64m1_m))) +void vluxseg7ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vbool64_t op7, vint64m1_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, const int64_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u64m1))) +void vluxseg7ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, const uint64_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_u64m1_m))) +void vluxseg7ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vbool64_t op7, vuint64m1_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, const uint64_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i64m1))) +void vluxseg8ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, const int64_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_i64m1_m))) +void vluxseg8ei64(vint64m1_t * op0, vint64m1_t * op1, vint64m1_t * op2, vint64m1_t * op3, vint64m1_t * op4, vint64m1_t * op5, vint64m1_t * op6, vint64m1_t * op7, vbool64_t op8, vint64m1_t op9, vint64m1_t op10, vint64m1_t op11, vint64m1_t op12, vint64m1_t op13, vint64m1_t op14, vint64m1_t op15, vint64m1_t op16, const int64_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u64m1))) +void vluxseg8ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, const uint64_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_u64m1_m))) +void vluxseg8ei64(vuint64m1_t * op0, vuint64m1_t * op1, vuint64m1_t * op2, vuint64m1_t * op3, vuint64m1_t * op4, vuint64m1_t * op5, vuint64m1_t * op6, vuint64m1_t * op7, vbool64_t op8, vuint64m1_t op9, vuint64m1_t op10, vuint64m1_t op11, vuint64m1_t op12, vuint64m1_t op13, vuint64m1_t op14, vuint64m1_t op15, vuint64m1_t op16, const uint64_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8m1))) +void vloxseg2ei8(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8m1_m))) +void vloxseg2ei8(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8m2))) +void vloxseg2ei8(vint8m2_t * op0, vint8m2_t * op1, const int8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8m2_m))) +void vloxseg2ei8(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8m4))) +void vloxseg2ei8(vint8m4_t * op0, vint8m4_t * op1, const int8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8m4_m))) +void vloxseg2ei8(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, vuint8m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8mf2))) +void vloxseg2ei8(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8mf2_m))) +void vloxseg2ei8(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8mf4))) +void vloxseg2ei8(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8mf4_m))) +void vloxseg2ei8(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8mf8))) +void vloxseg2ei8(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i8mf8_m))) +void vloxseg2ei8(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8m1))) +void vloxseg2ei8(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8m1_m))) +void vloxseg2ei8(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8m2))) +void vloxseg2ei8(vuint8m2_t * op0, vuint8m2_t * op1, const uint8_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8m2_m))) +void vloxseg2ei8(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8m4))) +void vloxseg2ei8(vuint8m4_t * op0, vuint8m4_t * op1, const uint8_t * op2, vuint8m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8m4_m))) +void vloxseg2ei8(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, vuint8m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8mf2))) +void vloxseg2ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8mf2_m))) +void vloxseg2ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8mf4))) +void vloxseg2ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8mf4_m))) +void vloxseg2ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8mf8))) +void vloxseg2ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u8mf8_m))) +void vloxseg2ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8m1))) +void vloxseg3ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8m1_m))) +void vloxseg3ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8m2))) +void vloxseg3ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, const int8_t * op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8m2_m))) +void vloxseg3ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, vuint8m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8mf2))) +void vloxseg3ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8mf2_m))) +void vloxseg3ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8mf4))) +void vloxseg3ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8mf4_m))) +void vloxseg3ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8mf8))) +void vloxseg3ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i8mf8_m))) +void vloxseg3ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8m1))) +void vloxseg3ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8m1_m))) +void vloxseg3ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8m2))) +void vloxseg3ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, const uint8_t * op3, vuint8m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8m2_m))) +void vloxseg3ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, vuint8m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8mf2))) +void vloxseg3ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8mf2_m))) +void vloxseg3ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8mf4))) +void vloxseg3ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8mf4_m))) +void vloxseg3ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8mf8))) +void vloxseg3ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u8mf8_m))) +void vloxseg3ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8m1))) +void vloxseg4ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8m1_m))) +void vloxseg4ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8m2))) +void vloxseg4ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, const int8_t * op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8m2_m))) +void vloxseg4ei8(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, vuint8m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8mf2))) +void vloxseg4ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8mf2_m))) +void vloxseg4ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8mf4))) +void vloxseg4ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8mf4_m))) +void vloxseg4ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8mf8))) +void vloxseg4ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i8mf8_m))) +void vloxseg4ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8m1))) +void vloxseg4ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8m1_m))) +void vloxseg4ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8m2))) +void vloxseg4ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, const uint8_t * op4, vuint8m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8m2_m))) +void vloxseg4ei8(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, vuint8m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8mf2))) +void vloxseg4ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8mf2_m))) +void vloxseg4ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8mf4))) +void vloxseg4ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8mf4_m))) +void vloxseg4ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8mf8))) +void vloxseg4ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u8mf8_m))) +void vloxseg4ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8m1))) +void vloxseg5ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8m1_m))) +void vloxseg5ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint8m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8mf2))) +void vloxseg5ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8mf2_m))) +void vloxseg5ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8mf4))) +void vloxseg5ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8mf4_m))) +void vloxseg5ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8mf8))) +void vloxseg5ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i8mf8_m))) +void vloxseg5ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8m1))) +void vloxseg5ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8m1_m))) +void vloxseg5ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint8m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8mf2))) +void vloxseg5ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8mf2_m))) +void vloxseg5ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8mf4))) +void vloxseg5ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8mf4_m))) +void vloxseg5ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8mf8))) +void vloxseg5ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u8mf8_m))) +void vloxseg5ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8m1))) +void vloxseg6ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8m1_m))) +void vloxseg6ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint8m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8mf2))) +void vloxseg6ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8mf2_m))) +void vloxseg6ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8mf4))) +void vloxseg6ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8mf4_m))) +void vloxseg6ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8mf8))) +void vloxseg6ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i8mf8_m))) +void vloxseg6ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8m1))) +void vloxseg6ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint8m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8m1_m))) +void vloxseg6ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint8m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8mf2))) +void vloxseg6ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8mf2_m))) +void vloxseg6ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8mf4))) +void vloxseg6ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8mf4_m))) +void vloxseg6ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8mf8))) +void vloxseg6ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u8mf8_m))) +void vloxseg6ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8m1))) +void vloxseg7ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8m1_m))) +void vloxseg7ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint8m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8mf2))) +void vloxseg7ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8mf2_m))) +void vloxseg7ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8mf4))) +void vloxseg7ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8mf4_m))) +void vloxseg7ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8mf8))) +void vloxseg7ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i8mf8_m))) +void vloxseg7ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8m1))) +void vloxseg7ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8m1_m))) +void vloxseg7ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint8m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8mf2))) +void vloxseg7ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8mf2_m))) +void vloxseg7ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8mf4))) +void vloxseg7ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8mf4_m))) +void vloxseg7ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8mf8))) +void vloxseg7ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u8mf8_m))) +void vloxseg7ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8m1))) +void vloxseg8ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8m1_m))) +void vloxseg8ei8(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint8m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8mf2))) +void vloxseg8ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8mf2_m))) +void vloxseg8ei8(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8mf4))) +void vloxseg8ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8mf4_m))) +void vloxseg8ei8(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8mf8))) +void vloxseg8ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i8mf8_m))) +void vloxseg8ei8(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8m1))) +void vloxseg8ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint8m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8m1_m))) +void vloxseg8ei8(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint8m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8mf2))) +void vloxseg8ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8mf2_m))) +void vloxseg8ei8(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8mf4))) +void vloxseg8ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8mf4_m))) +void vloxseg8ei8(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8mf8))) +void vloxseg8ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u8mf8_m))) +void vloxseg8ei8(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8m1))) +void vloxseg2ei16(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8m1_m))) +void vloxseg2ei16(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8m2))) +void vloxseg2ei16(vint8m2_t * op0, vint8m2_t * op1, const int8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8m2_m))) +void vloxseg2ei16(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8m4))) +void vloxseg2ei16(vint8m4_t * op0, vint8m4_t * op1, const int8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8m4_m))) +void vloxseg2ei16(vint8m4_t * op0, vint8m4_t * op1, vbool2_t op2, vint8m4_t op3, vint8m4_t op4, const int8_t * op5, vuint16m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8mf2))) +void vloxseg2ei16(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8mf2_m))) +void vloxseg2ei16(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8mf4))) +void vloxseg2ei16(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8mf4_m))) +void vloxseg2ei16(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8mf8))) +void vloxseg2ei16(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i8mf8_m))) +void vloxseg2ei16(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8m1))) +void vloxseg2ei16(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8m1_m))) +void vloxseg2ei16(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8m2))) +void vloxseg2ei16(vuint8m2_t * op0, vuint8m2_t * op1, const uint8_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8m2_m))) +void vloxseg2ei16(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8m4))) +void vloxseg2ei16(vuint8m4_t * op0, vuint8m4_t * op1, const uint8_t * op2, vuint16m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8m4_m))) +void vloxseg2ei16(vuint8m4_t * op0, vuint8m4_t * op1, vbool2_t op2, vuint8m4_t op3, vuint8m4_t op4, const uint8_t * op5, vuint16m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8mf2))) +void vloxseg2ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8mf2_m))) +void vloxseg2ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8mf4))) +void vloxseg2ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8mf4_m))) +void vloxseg2ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8mf8))) +void vloxseg2ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u8mf8_m))) +void vloxseg2ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8m1))) +void vloxseg3ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8m1_m))) +void vloxseg3ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8m2))) +void vloxseg3ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, const int8_t * op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8m2_m))) +void vloxseg3ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, vuint16m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8mf2))) +void vloxseg3ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8mf2_m))) +void vloxseg3ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8mf4))) +void vloxseg3ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8mf4_m))) +void vloxseg3ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8mf8))) +void vloxseg3ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i8mf8_m))) +void vloxseg3ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8m1))) +void vloxseg3ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8m1_m))) +void vloxseg3ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8m2))) +void vloxseg3ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, const uint8_t * op3, vuint16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8m2_m))) +void vloxseg3ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, vuint16m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8mf2))) +void vloxseg3ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8mf2_m))) +void vloxseg3ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8mf4))) +void vloxseg3ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8mf4_m))) +void vloxseg3ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8mf8))) +void vloxseg3ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u8mf8_m))) +void vloxseg3ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8m1))) +void vloxseg4ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8m1_m))) +void vloxseg4ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8m2))) +void vloxseg4ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, const int8_t * op4, vuint16m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8m2_m))) +void vloxseg4ei16(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, vuint16m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8mf2))) +void vloxseg4ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8mf2_m))) +void vloxseg4ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8mf4))) +void vloxseg4ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8mf4_m))) +void vloxseg4ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8mf8))) +void vloxseg4ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i8mf8_m))) +void vloxseg4ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8m1))) +void vloxseg4ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8m1_m))) +void vloxseg4ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8m2))) +void vloxseg4ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, const uint8_t * op4, vuint16m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8m2_m))) +void vloxseg4ei16(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, vuint16m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8mf2))) +void vloxseg4ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8mf2_m))) +void vloxseg4ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8mf4))) +void vloxseg4ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8mf4_m))) +void vloxseg4ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8mf8))) +void vloxseg4ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u8mf8_m))) +void vloxseg4ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8m1))) +void vloxseg5ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8m1_m))) +void vloxseg5ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint16m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8mf2))) +void vloxseg5ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8mf2_m))) +void vloxseg5ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8mf4))) +void vloxseg5ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8mf4_m))) +void vloxseg5ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8mf8))) +void vloxseg5ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i8mf8_m))) +void vloxseg5ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8m1))) +void vloxseg5ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8m1_m))) +void vloxseg5ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint16m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8mf2))) +void vloxseg5ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8mf2_m))) +void vloxseg5ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8mf4))) +void vloxseg5ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8mf4_m))) +void vloxseg5ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8mf8))) +void vloxseg5ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u8mf8_m))) +void vloxseg5ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8m1))) +void vloxseg6ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint16m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8m1_m))) +void vloxseg6ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint16m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8mf2))) +void vloxseg6ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8mf2_m))) +void vloxseg6ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8mf4))) +void vloxseg6ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8mf4_m))) +void vloxseg6ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8mf8))) +void vloxseg6ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i8mf8_m))) +void vloxseg6ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8m1))) +void vloxseg6ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint16m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8m1_m))) +void vloxseg6ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint16m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8mf2))) +void vloxseg6ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8mf2_m))) +void vloxseg6ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8mf4))) +void vloxseg6ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8mf4_m))) +void vloxseg6ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8mf8))) +void vloxseg6ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u8mf8_m))) +void vloxseg6ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8m1))) +void vloxseg7ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8m1_m))) +void vloxseg7ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint16m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8mf2))) +void vloxseg7ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8mf2_m))) +void vloxseg7ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8mf4))) +void vloxseg7ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8mf4_m))) +void vloxseg7ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8mf8))) +void vloxseg7ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i8mf8_m))) +void vloxseg7ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8m1))) +void vloxseg7ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8m1_m))) +void vloxseg7ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint16m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8mf2))) +void vloxseg7ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8mf2_m))) +void vloxseg7ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8mf4))) +void vloxseg7ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8mf4_m))) +void vloxseg7ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8mf8))) +void vloxseg7ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u8mf8_m))) +void vloxseg7ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8m1))) +void vloxseg8ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint16m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8m1_m))) +void vloxseg8ei16(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint16m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8mf2))) +void vloxseg8ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8mf2_m))) +void vloxseg8ei16(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8mf4))) +void vloxseg8ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8mf4_m))) +void vloxseg8ei16(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8mf8))) +void vloxseg8ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i8mf8_m))) +void vloxseg8ei16(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8m1))) +void vloxseg8ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint16m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8m1_m))) +void vloxseg8ei16(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint16m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8mf2))) +void vloxseg8ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8mf2_m))) +void vloxseg8ei16(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8mf4))) +void vloxseg8ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8mf4_m))) +void vloxseg8ei16(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8mf8))) +void vloxseg8ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u8mf8_m))) +void vloxseg8ei16(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8m1))) +void vloxseg2ei32(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8m1_m))) +void vloxseg2ei32(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8m2))) +void vloxseg2ei32(vint8m2_t * op0, vint8m2_t * op1, const int8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8m2_m))) +void vloxseg2ei32(vint8m2_t * op0, vint8m2_t * op1, vbool4_t op2, vint8m2_t op3, vint8m2_t op4, const int8_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8mf2))) +void vloxseg2ei32(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8mf2_m))) +void vloxseg2ei32(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8mf4))) +void vloxseg2ei32(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8mf4_m))) +void vloxseg2ei32(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8mf8))) +void vloxseg2ei32(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i8mf8_m))) +void vloxseg2ei32(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8m1))) +void vloxseg2ei32(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8m1_m))) +void vloxseg2ei32(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8m2))) +void vloxseg2ei32(vuint8m2_t * op0, vuint8m2_t * op1, const uint8_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8m2_m))) +void vloxseg2ei32(vuint8m2_t * op0, vuint8m2_t * op1, vbool4_t op2, vuint8m2_t op3, vuint8m2_t op4, const uint8_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8mf2))) +void vloxseg2ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8mf2_m))) +void vloxseg2ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8mf4))) +void vloxseg2ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8mf4_m))) +void vloxseg2ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8mf8))) +void vloxseg2ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u8mf8_m))) +void vloxseg2ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8m1))) +void vloxseg3ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8m1_m))) +void vloxseg3ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8m2))) +void vloxseg3ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, const int8_t * op3, vuint32m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8m2_m))) +void vloxseg3ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vbool4_t op3, vint8m2_t op4, vint8m2_t op5, vint8m2_t op6, const int8_t * op7, vuint32m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8mf2))) +void vloxseg3ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8mf2_m))) +void vloxseg3ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8mf4))) +void vloxseg3ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8mf4_m))) +void vloxseg3ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8mf8))) +void vloxseg3ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i8mf8_m))) +void vloxseg3ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8m1))) +void vloxseg3ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8m1_m))) +void vloxseg3ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8m2))) +void vloxseg3ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, const uint8_t * op3, vuint32m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8m2_m))) +void vloxseg3ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vbool4_t op3, vuint8m2_t op4, vuint8m2_t op5, vuint8m2_t op6, const uint8_t * op7, vuint32m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8mf2))) +void vloxseg3ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8mf2_m))) +void vloxseg3ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8mf4))) +void vloxseg3ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8mf4_m))) +void vloxseg3ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8mf8))) +void vloxseg3ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u8mf8_m))) +void vloxseg3ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8m1))) +void vloxseg4ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8m1_m))) +void vloxseg4ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8m2))) +void vloxseg4ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, const int8_t * op4, vuint32m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8m2_m))) +void vloxseg4ei32(vint8m2_t * op0, vint8m2_t * op1, vint8m2_t * op2, vint8m2_t * op3, vbool4_t op4, vint8m2_t op5, vint8m2_t op6, vint8m2_t op7, vint8m2_t op8, const int8_t * op9, vuint32m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8mf2))) +void vloxseg4ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8mf2_m))) +void vloxseg4ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8mf4))) +void vloxseg4ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8mf4_m))) +void vloxseg4ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8mf8))) +void vloxseg4ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i8mf8_m))) +void vloxseg4ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8m1))) +void vloxseg4ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8m1_m))) +void vloxseg4ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8m2))) +void vloxseg4ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, const uint8_t * op4, vuint32m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8m2_m))) +void vloxseg4ei32(vuint8m2_t * op0, vuint8m2_t * op1, vuint8m2_t * op2, vuint8m2_t * op3, vbool4_t op4, vuint8m2_t op5, vuint8m2_t op6, vuint8m2_t op7, vuint8m2_t op8, const uint8_t * op9, vuint32m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8mf2))) +void vloxseg4ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8mf2_m))) +void vloxseg4ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8mf4))) +void vloxseg4ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8mf4_m))) +void vloxseg4ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8mf8))) +void vloxseg4ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u8mf8_m))) +void vloxseg4ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8m1))) +void vloxseg5ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8m1_m))) +void vloxseg5ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint32m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8mf2))) +void vloxseg5ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8mf2_m))) +void vloxseg5ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8mf4))) +void vloxseg5ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8mf4_m))) +void vloxseg5ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8mf8))) +void vloxseg5ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i8mf8_m))) +void vloxseg5ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8m1))) +void vloxseg5ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8m1_m))) +void vloxseg5ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint32m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8mf2))) +void vloxseg5ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8mf2_m))) +void vloxseg5ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8mf4))) +void vloxseg5ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8mf4_m))) +void vloxseg5ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8mf8))) +void vloxseg5ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u8mf8_m))) +void vloxseg5ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8m1))) +void vloxseg6ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint32m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8m1_m))) +void vloxseg6ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint32m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8mf2))) +void vloxseg6ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8mf2_m))) +void vloxseg6ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8mf4))) +void vloxseg6ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8mf4_m))) +void vloxseg6ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8mf8))) +void vloxseg6ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i8mf8_m))) +void vloxseg6ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8m1))) +void vloxseg6ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint32m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8m1_m))) +void vloxseg6ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint32m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8mf2))) +void vloxseg6ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8mf2_m))) +void vloxseg6ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8mf4))) +void vloxseg6ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8mf4_m))) +void vloxseg6ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8mf8))) +void vloxseg6ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u8mf8_m))) +void vloxseg6ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8m1))) +void vloxseg7ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8m1_m))) +void vloxseg7ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint32m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8mf2))) +void vloxseg7ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8mf2_m))) +void vloxseg7ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8mf4))) +void vloxseg7ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8mf4_m))) +void vloxseg7ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8mf8))) +void vloxseg7ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i8mf8_m))) +void vloxseg7ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8m1))) +void vloxseg7ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8m1_m))) +void vloxseg7ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint32m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8mf2))) +void vloxseg7ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8mf2_m))) +void vloxseg7ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8mf4))) +void vloxseg7ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8mf4_m))) +void vloxseg7ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8mf8))) +void vloxseg7ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u8mf8_m))) +void vloxseg7ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8m1))) +void vloxseg8ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint32m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8m1_m))) +void vloxseg8ei32(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint32m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8mf2))) +void vloxseg8ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8mf2_m))) +void vloxseg8ei32(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8mf4))) +void vloxseg8ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8mf4_m))) +void vloxseg8ei32(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8mf8))) +void vloxseg8ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i8mf8_m))) +void vloxseg8ei32(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8m1))) +void vloxseg8ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint32m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8m1_m))) +void vloxseg8ei32(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint32m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8mf2))) +void vloxseg8ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8mf2_m))) +void vloxseg8ei32(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8mf4))) +void vloxseg8ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8mf4_m))) +void vloxseg8ei32(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8mf8))) +void vloxseg8ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u8mf8_m))) +void vloxseg8ei32(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8m1))) +void vloxseg2ei64(vint8m1_t * op0, vint8m1_t * op1, const int8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8m1_m))) +void vloxseg2ei64(vint8m1_t * op0, vint8m1_t * op1, vbool8_t op2, vint8m1_t op3, vint8m1_t op4, const int8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8mf2))) +void vloxseg2ei64(vint8mf2_t * op0, vint8mf2_t * op1, const int8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8mf2_m))) +void vloxseg2ei64(vint8mf2_t * op0, vint8mf2_t * op1, vbool16_t op2, vint8mf2_t op3, vint8mf2_t op4, const int8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8mf4))) +void vloxseg2ei64(vint8mf4_t * op0, vint8mf4_t * op1, const int8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8mf4_m))) +void vloxseg2ei64(vint8mf4_t * op0, vint8mf4_t * op1, vbool32_t op2, vint8mf4_t op3, vint8mf4_t op4, const int8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8mf8))) +void vloxseg2ei64(vint8mf8_t * op0, vint8mf8_t * op1, const int8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i8mf8_m))) +void vloxseg2ei64(vint8mf8_t * op0, vint8mf8_t * op1, vbool64_t op2, vint8mf8_t op3, vint8mf8_t op4, const int8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8m1))) +void vloxseg2ei64(vuint8m1_t * op0, vuint8m1_t * op1, const uint8_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8m1_m))) +void vloxseg2ei64(vuint8m1_t * op0, vuint8m1_t * op1, vbool8_t op2, vuint8m1_t op3, vuint8m1_t op4, const uint8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8mf2))) +void vloxseg2ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, const uint8_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8mf2_m))) +void vloxseg2ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vbool16_t op2, vuint8mf2_t op3, vuint8mf2_t op4, const uint8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8mf4))) +void vloxseg2ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, const uint8_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8mf4_m))) +void vloxseg2ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vbool32_t op2, vuint8mf4_t op3, vuint8mf4_t op4, const uint8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8mf8))) +void vloxseg2ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, const uint8_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u8mf8_m))) +void vloxseg2ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vbool64_t op2, vuint8mf8_t op3, vuint8mf8_t op4, const uint8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8m1))) +void vloxseg3ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, const int8_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8m1_m))) +void vloxseg3ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vbool8_t op3, vint8m1_t op4, vint8m1_t op5, vint8m1_t op6, const int8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8mf2))) +void vloxseg3ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, const int8_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8mf2_m))) +void vloxseg3ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vbool16_t op3, vint8mf2_t op4, vint8mf2_t op5, vint8mf2_t op6, const int8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8mf4))) +void vloxseg3ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, const int8_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8mf4_m))) +void vloxseg3ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vbool32_t op3, vint8mf4_t op4, vint8mf4_t op5, vint8mf4_t op6, const int8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8mf8))) +void vloxseg3ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, const int8_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i8mf8_m))) +void vloxseg3ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vbool64_t op3, vint8mf8_t op4, vint8mf8_t op5, vint8mf8_t op6, const int8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8m1))) +void vloxseg3ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, const uint8_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8m1_m))) +void vloxseg3ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vbool8_t op3, vuint8m1_t op4, vuint8m1_t op5, vuint8m1_t op6, const uint8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8mf2))) +void vloxseg3ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, const uint8_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8mf2_m))) +void vloxseg3ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vbool16_t op3, vuint8mf2_t op4, vuint8mf2_t op5, vuint8mf2_t op6, const uint8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8mf4))) +void vloxseg3ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, const uint8_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8mf4_m))) +void vloxseg3ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vbool32_t op3, vuint8mf4_t op4, vuint8mf4_t op5, vuint8mf4_t op6, const uint8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8mf8))) +void vloxseg3ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, const uint8_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u8mf8_m))) +void vloxseg3ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vbool64_t op3, vuint8mf8_t op4, vuint8mf8_t op5, vuint8mf8_t op6, const uint8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8m1))) +void vloxseg4ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, const int8_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8m1_m))) +void vloxseg4ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vbool8_t op4, vint8m1_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, const int8_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8mf2))) +void vloxseg4ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, const int8_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8mf2_m))) +void vloxseg4ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vbool16_t op4, vint8mf2_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, const int8_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8mf4))) +void vloxseg4ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, const int8_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8mf4_m))) +void vloxseg4ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vbool32_t op4, vint8mf4_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, const int8_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8mf8))) +void vloxseg4ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, const int8_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i8mf8_m))) +void vloxseg4ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vbool64_t op4, vint8mf8_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, const int8_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8m1))) +void vloxseg4ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, const uint8_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8m1_m))) +void vloxseg4ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vbool8_t op4, vuint8m1_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, const uint8_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8mf2))) +void vloxseg4ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, const uint8_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8mf2_m))) +void vloxseg4ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vbool16_t op4, vuint8mf2_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, const uint8_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8mf4))) +void vloxseg4ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, const uint8_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8mf4_m))) +void vloxseg4ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vbool32_t op4, vuint8mf4_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, const uint8_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8mf8))) +void vloxseg4ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, const uint8_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u8mf8_m))) +void vloxseg4ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vbool64_t op4, vuint8mf8_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, const uint8_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8m1))) +void vloxseg5ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, const int8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8m1_m))) +void vloxseg5ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vbool8_t op5, vint8m1_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, const int8_t * op11, vuint64m8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8mf2))) +void vloxseg5ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, const int8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8mf2_m))) +void vloxseg5ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vbool16_t op5, vint8mf2_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, const int8_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8mf4))) +void vloxseg5ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, const int8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8mf4_m))) +void vloxseg5ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vbool32_t op5, vint8mf4_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, const int8_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8mf8))) +void vloxseg5ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, const int8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i8mf8_m))) +void vloxseg5ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vbool64_t op5, vint8mf8_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, const int8_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8m1))) +void vloxseg5ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, const uint8_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8m1_m))) +void vloxseg5ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vbool8_t op5, vuint8m1_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, const uint8_t * op11, vuint64m8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8mf2))) +void vloxseg5ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, const uint8_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8mf2_m))) +void vloxseg5ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vbool16_t op5, vuint8mf2_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, const uint8_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8mf4))) +void vloxseg5ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, const uint8_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8mf4_m))) +void vloxseg5ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vbool32_t op5, vuint8mf4_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, const uint8_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8mf8))) +void vloxseg5ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, const uint8_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u8mf8_m))) +void vloxseg5ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vbool64_t op5, vuint8mf8_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, const uint8_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8m1))) +void vloxseg6ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, const int8_t * op6, vuint64m8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8m1_m))) +void vloxseg6ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vbool8_t op6, vint8m1_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, const int8_t * op13, vuint64m8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8mf2))) +void vloxseg6ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, const int8_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8mf2_m))) +void vloxseg6ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vbool16_t op6, vint8mf2_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, const int8_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8mf4))) +void vloxseg6ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, const int8_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8mf4_m))) +void vloxseg6ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vbool32_t op6, vint8mf4_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, const int8_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8mf8))) +void vloxseg6ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, const int8_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i8mf8_m))) +void vloxseg6ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vbool64_t op6, vint8mf8_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, const int8_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8m1))) +void vloxseg6ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, const uint8_t * op6, vuint64m8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8m1_m))) +void vloxseg6ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vbool8_t op6, vuint8m1_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, const uint8_t * op13, vuint64m8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8mf2))) +void vloxseg6ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, const uint8_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8mf2_m))) +void vloxseg6ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vbool16_t op6, vuint8mf2_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, const uint8_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8mf4))) +void vloxseg6ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, const uint8_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8mf4_m))) +void vloxseg6ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vbool32_t op6, vuint8mf4_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, const uint8_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8mf8))) +void vloxseg6ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, const uint8_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u8mf8_m))) +void vloxseg6ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vbool64_t op6, vuint8mf8_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, const uint8_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8m1))) +void vloxseg7ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, const int8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8m1_m))) +void vloxseg7ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vbool8_t op7, vint8m1_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, const int8_t * op15, vuint64m8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8mf2))) +void vloxseg7ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, const int8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8mf2_m))) +void vloxseg7ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vbool16_t op7, vint8mf2_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, const int8_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8mf4))) +void vloxseg7ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, const int8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8mf4_m))) +void vloxseg7ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vbool32_t op7, vint8mf4_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, const int8_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8mf8))) +void vloxseg7ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, const int8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i8mf8_m))) +void vloxseg7ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vbool64_t op7, vint8mf8_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, const int8_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8m1))) +void vloxseg7ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, const uint8_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8m1_m))) +void vloxseg7ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vbool8_t op7, vuint8m1_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, const uint8_t * op15, vuint64m8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8mf2))) +void vloxseg7ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, const uint8_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8mf2_m))) +void vloxseg7ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vbool16_t op7, vuint8mf2_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, const uint8_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8mf4))) +void vloxseg7ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, const uint8_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8mf4_m))) +void vloxseg7ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vbool32_t op7, vuint8mf4_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, const uint8_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8mf8))) +void vloxseg7ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, const uint8_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u8mf8_m))) +void vloxseg7ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vbool64_t op7, vuint8mf8_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, const uint8_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8m1))) +void vloxseg8ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, const int8_t * op8, vuint64m8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8m1_m))) +void vloxseg8ei64(vint8m1_t * op0, vint8m1_t * op1, vint8m1_t * op2, vint8m1_t * op3, vint8m1_t * op4, vint8m1_t * op5, vint8m1_t * op6, vint8m1_t * op7, vbool8_t op8, vint8m1_t op9, vint8m1_t op10, vint8m1_t op11, vint8m1_t op12, vint8m1_t op13, vint8m1_t op14, vint8m1_t op15, vint8m1_t op16, const int8_t * op17, vuint64m8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8mf2))) +void vloxseg8ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, const int8_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8mf2_m))) +void vloxseg8ei64(vint8mf2_t * op0, vint8mf2_t * op1, vint8mf2_t * op2, vint8mf2_t * op3, vint8mf2_t * op4, vint8mf2_t * op5, vint8mf2_t * op6, vint8mf2_t * op7, vbool16_t op8, vint8mf2_t op9, vint8mf2_t op10, vint8mf2_t op11, vint8mf2_t op12, vint8mf2_t op13, vint8mf2_t op14, vint8mf2_t op15, vint8mf2_t op16, const int8_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8mf4))) +void vloxseg8ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, const int8_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8mf4_m))) +void vloxseg8ei64(vint8mf4_t * op0, vint8mf4_t * op1, vint8mf4_t * op2, vint8mf4_t * op3, vint8mf4_t * op4, vint8mf4_t * op5, vint8mf4_t * op6, vint8mf4_t * op7, vbool32_t op8, vint8mf4_t op9, vint8mf4_t op10, vint8mf4_t op11, vint8mf4_t op12, vint8mf4_t op13, vint8mf4_t op14, vint8mf4_t op15, vint8mf4_t op16, const int8_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8mf8))) +void vloxseg8ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, const int8_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i8mf8_m))) +void vloxseg8ei64(vint8mf8_t * op0, vint8mf8_t * op1, vint8mf8_t * op2, vint8mf8_t * op3, vint8mf8_t * op4, vint8mf8_t * op5, vint8mf8_t * op6, vint8mf8_t * op7, vbool64_t op8, vint8mf8_t op9, vint8mf8_t op10, vint8mf8_t op11, vint8mf8_t op12, vint8mf8_t op13, vint8mf8_t op14, vint8mf8_t op15, vint8mf8_t op16, const int8_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8m1))) +void vloxseg8ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, const uint8_t * op8, vuint64m8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8m1_m))) +void vloxseg8ei64(vuint8m1_t * op0, vuint8m1_t * op1, vuint8m1_t * op2, vuint8m1_t * op3, vuint8m1_t * op4, vuint8m1_t * op5, vuint8m1_t * op6, vuint8m1_t * op7, vbool8_t op8, vuint8m1_t op9, vuint8m1_t op10, vuint8m1_t op11, vuint8m1_t op12, vuint8m1_t op13, vuint8m1_t op14, vuint8m1_t op15, vuint8m1_t op16, const uint8_t * op17, vuint64m8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8mf2))) +void vloxseg8ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, const uint8_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8mf2_m))) +void vloxseg8ei64(vuint8mf2_t * op0, vuint8mf2_t * op1, vuint8mf2_t * op2, vuint8mf2_t * op3, vuint8mf2_t * op4, vuint8mf2_t * op5, vuint8mf2_t * op6, vuint8mf2_t * op7, vbool16_t op8, vuint8mf2_t op9, vuint8mf2_t op10, vuint8mf2_t op11, vuint8mf2_t op12, vuint8mf2_t op13, vuint8mf2_t op14, vuint8mf2_t op15, vuint8mf2_t op16, const uint8_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8mf4))) +void vloxseg8ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, const uint8_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8mf4_m))) +void vloxseg8ei64(vuint8mf4_t * op0, vuint8mf4_t * op1, vuint8mf4_t * op2, vuint8mf4_t * op3, vuint8mf4_t * op4, vuint8mf4_t * op5, vuint8mf4_t * op6, vuint8mf4_t * op7, vbool32_t op8, vuint8mf4_t op9, vuint8mf4_t op10, vuint8mf4_t op11, vuint8mf4_t op12, vuint8mf4_t op13, vuint8mf4_t op14, vuint8mf4_t op15, vuint8mf4_t op16, const uint8_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8mf8))) +void vloxseg8ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, const uint8_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u8mf8_m))) +void vloxseg8ei64(vuint8mf8_t * op0, vuint8mf8_t * op1, vuint8mf8_t * op2, vuint8mf8_t * op3, vuint8mf8_t * op4, vuint8mf8_t * op5, vuint8mf8_t * op6, vuint8mf8_t * op7, vbool64_t op8, vuint8mf8_t op9, vuint8mf8_t op10, vuint8mf8_t op11, vuint8mf8_t op12, vuint8mf8_t op13, vuint8mf8_t op14, vuint8mf8_t op15, vuint8mf8_t op16, const uint8_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16m1))) +void vloxseg2ei8(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16m1_m))) +void vloxseg2ei8(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16m2))) +void vloxseg2ei8(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16m2_m))) +void vloxseg2ei8(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16m4))) +void vloxseg2ei8(vint16m4_t * op0, vint16m4_t * op1, const int16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16m4_m))) +void vloxseg2ei8(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16mf2))) +void vloxseg2ei8(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16mf2_m))) +void vloxseg2ei8(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16mf4))) +void vloxseg2ei8(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i16mf4_m))) +void vloxseg2ei8(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16m1))) +void vloxseg2ei8(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16m1_m))) +void vloxseg2ei8(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16m2))) +void vloxseg2ei8(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16m2_m))) +void vloxseg2ei8(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16m4))) +void vloxseg2ei8(vuint16m4_t * op0, vuint16m4_t * op1, const uint16_t * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16m4_m))) +void vloxseg2ei8(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16mf2))) +void vloxseg2ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16mf2_m))) +void vloxseg2ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16mf4))) +void vloxseg2ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u16mf4_m))) +void vloxseg2ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16m1))) +void vloxseg3ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16m1_m))) +void vloxseg3ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16m2))) +void vloxseg3ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16m2_m))) +void vloxseg3ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16mf2))) +void vloxseg3ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16mf2_m))) +void vloxseg3ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16mf4))) +void vloxseg3ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i16mf4_m))) +void vloxseg3ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16m1))) +void vloxseg3ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16m1_m))) +void vloxseg3ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16m2))) +void vloxseg3ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16m2_m))) +void vloxseg3ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16mf2))) +void vloxseg3ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16mf2_m))) +void vloxseg3ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16mf4))) +void vloxseg3ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u16mf4_m))) +void vloxseg3ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16m1))) +void vloxseg4ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16m1_m))) +void vloxseg4ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16m2))) +void vloxseg4ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16m2_m))) +void vloxseg4ei8(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16mf2))) +void vloxseg4ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16mf2_m))) +void vloxseg4ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16mf4))) +void vloxseg4ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i16mf4_m))) +void vloxseg4ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16m1))) +void vloxseg4ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16m1_m))) +void vloxseg4ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16m2))) +void vloxseg4ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16m2_m))) +void vloxseg4ei8(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16mf2))) +void vloxseg4ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16mf2_m))) +void vloxseg4ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16mf4))) +void vloxseg4ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u16mf4_m))) +void vloxseg4ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i16m1))) +void vloxseg5ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i16m1_m))) +void vloxseg5ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i16mf2))) +void vloxseg5ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i16mf2_m))) +void vloxseg5ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i16mf4))) +void vloxseg5ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i16mf4_m))) +void vloxseg5ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u16m1))) +void vloxseg5ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u16m1_m))) +void vloxseg5ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u16mf2))) +void vloxseg5ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u16mf2_m))) +void vloxseg5ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u16mf4))) +void vloxseg5ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u16mf4_m))) +void vloxseg5ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i16m1))) +void vloxseg6ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i16m1_m))) +void vloxseg6ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i16mf2))) +void vloxseg6ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i16mf2_m))) +void vloxseg6ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i16mf4))) +void vloxseg6ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i16mf4_m))) +void vloxseg6ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u16m1))) +void vloxseg6ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u16m1_m))) +void vloxseg6ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u16mf2))) +void vloxseg6ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u16mf2_m))) +void vloxseg6ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u16mf4))) +void vloxseg6ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u16mf4_m))) +void vloxseg6ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i16m1))) +void vloxseg7ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i16m1_m))) +void vloxseg7ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i16mf2))) +void vloxseg7ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i16mf2_m))) +void vloxseg7ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i16mf4))) +void vloxseg7ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i16mf4_m))) +void vloxseg7ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u16m1))) +void vloxseg7ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u16m1_m))) +void vloxseg7ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u16mf2))) +void vloxseg7ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u16mf2_m))) +void vloxseg7ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u16mf4))) +void vloxseg7ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u16mf4_m))) +void vloxseg7ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i16m1))) +void vloxseg8ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i16m1_m))) +void vloxseg8ei8(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i16mf2))) +void vloxseg8ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i16mf2_m))) +void vloxseg8ei8(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i16mf4))) +void vloxseg8ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i16mf4_m))) +void vloxseg8ei8(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u16m1))) +void vloxseg8ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u16m1_m))) +void vloxseg8ei8(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u16mf2))) +void vloxseg8ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u16mf2_m))) +void vloxseg8ei8(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u16mf4))) +void vloxseg8ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u16mf4_m))) +void vloxseg8ei8(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16m1))) +void vloxseg2ei16(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16m1_m))) +void vloxseg2ei16(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16m2))) +void vloxseg2ei16(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16m2_m))) +void vloxseg2ei16(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16m4))) +void vloxseg2ei16(vint16m4_t * op0, vint16m4_t * op1, const int16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16m4_m))) +void vloxseg2ei16(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16mf2))) +void vloxseg2ei16(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16mf2_m))) +void vloxseg2ei16(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16mf4))) +void vloxseg2ei16(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i16mf4_m))) +void vloxseg2ei16(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16m1))) +void vloxseg2ei16(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16m1_m))) +void vloxseg2ei16(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16m2))) +void vloxseg2ei16(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16m2_m))) +void vloxseg2ei16(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16m4))) +void vloxseg2ei16(vuint16m4_t * op0, vuint16m4_t * op1, const uint16_t * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16m4_m))) +void vloxseg2ei16(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16mf2))) +void vloxseg2ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16mf2_m))) +void vloxseg2ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16mf4))) +void vloxseg2ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u16mf4_m))) +void vloxseg2ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16m1))) +void vloxseg3ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16m1_m))) +void vloxseg3ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16m2))) +void vloxseg3ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16m2_m))) +void vloxseg3ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16mf2))) +void vloxseg3ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16mf2_m))) +void vloxseg3ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16mf4))) +void vloxseg3ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i16mf4_m))) +void vloxseg3ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16m1))) +void vloxseg3ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16m1_m))) +void vloxseg3ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16m2))) +void vloxseg3ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16m2_m))) +void vloxseg3ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16mf2))) +void vloxseg3ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16mf2_m))) +void vloxseg3ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16mf4))) +void vloxseg3ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u16mf4_m))) +void vloxseg3ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16m1))) +void vloxseg4ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16m1_m))) +void vloxseg4ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16m2))) +void vloxseg4ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16m2_m))) +void vloxseg4ei16(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16mf2))) +void vloxseg4ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16mf2_m))) +void vloxseg4ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16mf4))) +void vloxseg4ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i16mf4_m))) +void vloxseg4ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16m1))) +void vloxseg4ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16m1_m))) +void vloxseg4ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16m2))) +void vloxseg4ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16m2_m))) +void vloxseg4ei16(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16mf2))) +void vloxseg4ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16mf2_m))) +void vloxseg4ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16mf4))) +void vloxseg4ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u16mf4_m))) +void vloxseg4ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i16m1))) +void vloxseg5ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i16m1_m))) +void vloxseg5ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i16mf2))) +void vloxseg5ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i16mf2_m))) +void vloxseg5ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i16mf4))) +void vloxseg5ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i16mf4_m))) +void vloxseg5ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u16m1))) +void vloxseg5ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u16m1_m))) +void vloxseg5ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u16mf2))) +void vloxseg5ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u16mf2_m))) +void vloxseg5ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u16mf4))) +void vloxseg5ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u16mf4_m))) +void vloxseg5ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i16m1))) +void vloxseg6ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i16m1_m))) +void vloxseg6ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i16mf2))) +void vloxseg6ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i16mf2_m))) +void vloxseg6ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i16mf4))) +void vloxseg6ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i16mf4_m))) +void vloxseg6ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u16m1))) +void vloxseg6ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u16m1_m))) +void vloxseg6ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u16mf2))) +void vloxseg6ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u16mf2_m))) +void vloxseg6ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u16mf4))) +void vloxseg6ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u16mf4_m))) +void vloxseg6ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i16m1))) +void vloxseg7ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i16m1_m))) +void vloxseg7ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i16mf2))) +void vloxseg7ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i16mf2_m))) +void vloxseg7ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i16mf4))) +void vloxseg7ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i16mf4_m))) +void vloxseg7ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u16m1))) +void vloxseg7ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u16m1_m))) +void vloxseg7ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u16mf2))) +void vloxseg7ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u16mf2_m))) +void vloxseg7ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u16mf4))) +void vloxseg7ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u16mf4_m))) +void vloxseg7ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i16m1))) +void vloxseg8ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i16m1_m))) +void vloxseg8ei16(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i16mf2))) +void vloxseg8ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i16mf2_m))) +void vloxseg8ei16(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i16mf4))) +void vloxseg8ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i16mf4_m))) +void vloxseg8ei16(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u16m1))) +void vloxseg8ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u16m1_m))) +void vloxseg8ei16(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u16mf2))) +void vloxseg8ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u16mf2_m))) +void vloxseg8ei16(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u16mf4))) +void vloxseg8ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u16mf4_m))) +void vloxseg8ei16(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16m1))) +void vloxseg2ei32(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16m1_m))) +void vloxseg2ei32(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16m2))) +void vloxseg2ei32(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16m2_m))) +void vloxseg2ei32(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16m4))) +void vloxseg2ei32(vint16m4_t * op0, vint16m4_t * op1, const int16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16m4_m))) +void vloxseg2ei32(vint16m4_t * op0, vint16m4_t * op1, vbool4_t op2, vint16m4_t op3, vint16m4_t op4, const int16_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16mf2))) +void vloxseg2ei32(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16mf2_m))) +void vloxseg2ei32(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16mf4))) +void vloxseg2ei32(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i16mf4_m))) +void vloxseg2ei32(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16m1))) +void vloxseg2ei32(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16m1_m))) +void vloxseg2ei32(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16m2))) +void vloxseg2ei32(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16m2_m))) +void vloxseg2ei32(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16m4))) +void vloxseg2ei32(vuint16m4_t * op0, vuint16m4_t * op1, const uint16_t * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16m4_m))) +void vloxseg2ei32(vuint16m4_t * op0, vuint16m4_t * op1, vbool4_t op2, vuint16m4_t op3, vuint16m4_t op4, const uint16_t * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16mf2))) +void vloxseg2ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16mf2_m))) +void vloxseg2ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16mf4))) +void vloxseg2ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u16mf4_m))) +void vloxseg2ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16m1))) +void vloxseg3ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16m1_m))) +void vloxseg3ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16m2))) +void vloxseg3ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16m2_m))) +void vloxseg3ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16mf2))) +void vloxseg3ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16mf2_m))) +void vloxseg3ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16mf4))) +void vloxseg3ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i16mf4_m))) +void vloxseg3ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16m1))) +void vloxseg3ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16m1_m))) +void vloxseg3ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16m2))) +void vloxseg3ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16m2_m))) +void vloxseg3ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16mf2))) +void vloxseg3ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16mf2_m))) +void vloxseg3ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16mf4))) +void vloxseg3ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u16mf4_m))) +void vloxseg3ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16m1))) +void vloxseg4ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16m1_m))) +void vloxseg4ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16m2))) +void vloxseg4ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16m2_m))) +void vloxseg4ei32(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16mf2))) +void vloxseg4ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16mf2_m))) +void vloxseg4ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16mf4))) +void vloxseg4ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i16mf4_m))) +void vloxseg4ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16m1))) +void vloxseg4ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16m1_m))) +void vloxseg4ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16m2))) +void vloxseg4ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16m2_m))) +void vloxseg4ei32(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16mf2))) +void vloxseg4ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16mf2_m))) +void vloxseg4ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16mf4))) +void vloxseg4ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u16mf4_m))) +void vloxseg4ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i16m1))) +void vloxseg5ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i16m1_m))) +void vloxseg5ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i16mf2))) +void vloxseg5ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i16mf2_m))) +void vloxseg5ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i16mf4))) +void vloxseg5ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i16mf4_m))) +void vloxseg5ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u16m1))) +void vloxseg5ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u16m1_m))) +void vloxseg5ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u16mf2))) +void vloxseg5ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u16mf2_m))) +void vloxseg5ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u16mf4))) +void vloxseg5ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u16mf4_m))) +void vloxseg5ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i16m1))) +void vloxseg6ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i16m1_m))) +void vloxseg6ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i16mf2))) +void vloxseg6ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i16mf2_m))) +void vloxseg6ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i16mf4))) +void vloxseg6ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i16mf4_m))) +void vloxseg6ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u16m1))) +void vloxseg6ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u16m1_m))) +void vloxseg6ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u16mf2))) +void vloxseg6ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u16mf2_m))) +void vloxseg6ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u16mf4))) +void vloxseg6ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u16mf4_m))) +void vloxseg6ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i16m1))) +void vloxseg7ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i16m1_m))) +void vloxseg7ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i16mf2))) +void vloxseg7ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i16mf2_m))) +void vloxseg7ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i16mf4))) +void vloxseg7ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i16mf4_m))) +void vloxseg7ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u16m1))) +void vloxseg7ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u16m1_m))) +void vloxseg7ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u16mf2))) +void vloxseg7ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u16mf2_m))) +void vloxseg7ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u16mf4))) +void vloxseg7ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u16mf4_m))) +void vloxseg7ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i16m1))) +void vloxseg8ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i16m1_m))) +void vloxseg8ei32(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i16mf2))) +void vloxseg8ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i16mf2_m))) +void vloxseg8ei32(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i16mf4))) +void vloxseg8ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i16mf4_m))) +void vloxseg8ei32(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u16m1))) +void vloxseg8ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u16m1_m))) +void vloxseg8ei32(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u16mf2))) +void vloxseg8ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u16mf2_m))) +void vloxseg8ei32(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u16mf4))) +void vloxseg8ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u16mf4_m))) +void vloxseg8ei32(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16m1))) +void vloxseg2ei64(vint16m1_t * op0, vint16m1_t * op1, const int16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16m1_m))) +void vloxseg2ei64(vint16m1_t * op0, vint16m1_t * op1, vbool16_t op2, vint16m1_t op3, vint16m1_t op4, const int16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16m2))) +void vloxseg2ei64(vint16m2_t * op0, vint16m2_t * op1, const int16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16m2_m))) +void vloxseg2ei64(vint16m2_t * op0, vint16m2_t * op1, vbool8_t op2, vint16m2_t op3, vint16m2_t op4, const int16_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16mf2))) +void vloxseg2ei64(vint16mf2_t * op0, vint16mf2_t * op1, const int16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16mf2_m))) +void vloxseg2ei64(vint16mf2_t * op0, vint16mf2_t * op1, vbool32_t op2, vint16mf2_t op3, vint16mf2_t op4, const int16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16mf4))) +void vloxseg2ei64(vint16mf4_t * op0, vint16mf4_t * op1, const int16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i16mf4_m))) +void vloxseg2ei64(vint16mf4_t * op0, vint16mf4_t * op1, vbool64_t op2, vint16mf4_t op3, vint16mf4_t op4, const int16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16m1))) +void vloxseg2ei64(vuint16m1_t * op0, vuint16m1_t * op1, const uint16_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16m1_m))) +void vloxseg2ei64(vuint16m1_t * op0, vuint16m1_t * op1, vbool16_t op2, vuint16m1_t op3, vuint16m1_t op4, const uint16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16m2))) +void vloxseg2ei64(vuint16m2_t * op0, vuint16m2_t * op1, const uint16_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16m2_m))) +void vloxseg2ei64(vuint16m2_t * op0, vuint16m2_t * op1, vbool8_t op2, vuint16m2_t op3, vuint16m2_t op4, const uint16_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16mf2))) +void vloxseg2ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, const uint16_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16mf2_m))) +void vloxseg2ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vbool32_t op2, vuint16mf2_t op3, vuint16mf2_t op4, const uint16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16mf4))) +void vloxseg2ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, const uint16_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u16mf4_m))) +void vloxseg2ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vbool64_t op2, vuint16mf4_t op3, vuint16mf4_t op4, const uint16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16m1))) +void vloxseg3ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, const int16_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16m1_m))) +void vloxseg3ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vbool16_t op3, vint16m1_t op4, vint16m1_t op5, vint16m1_t op6, const int16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16m2))) +void vloxseg3ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, const int16_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16m2_m))) +void vloxseg3ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vbool8_t op3, vint16m2_t op4, vint16m2_t op5, vint16m2_t op6, const int16_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16mf2))) +void vloxseg3ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, const int16_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16mf2_m))) +void vloxseg3ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vbool32_t op3, vint16mf2_t op4, vint16mf2_t op5, vint16mf2_t op6, const int16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16mf4))) +void vloxseg3ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, const int16_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i16mf4_m))) +void vloxseg3ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vbool64_t op3, vint16mf4_t op4, vint16mf4_t op5, vint16mf4_t op6, const int16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16m1))) +void vloxseg3ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, const uint16_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16m1_m))) +void vloxseg3ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vbool16_t op3, vuint16m1_t op4, vuint16m1_t op5, vuint16m1_t op6, const uint16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16m2))) +void vloxseg3ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, const uint16_t * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16m2_m))) +void vloxseg3ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vbool8_t op3, vuint16m2_t op4, vuint16m2_t op5, vuint16m2_t op6, const uint16_t * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16mf2))) +void vloxseg3ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, const uint16_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16mf2_m))) +void vloxseg3ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vbool32_t op3, vuint16mf2_t op4, vuint16mf2_t op5, vuint16mf2_t op6, const uint16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16mf4))) +void vloxseg3ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, const uint16_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u16mf4_m))) +void vloxseg3ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vbool64_t op3, vuint16mf4_t op4, vuint16mf4_t op5, vuint16mf4_t op6, const uint16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16m1))) +void vloxseg4ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, const int16_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16m1_m))) +void vloxseg4ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vbool16_t op4, vint16m1_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, const int16_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16m2))) +void vloxseg4ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, const int16_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16m2_m))) +void vloxseg4ei64(vint16m2_t * op0, vint16m2_t * op1, vint16m2_t * op2, vint16m2_t * op3, vbool8_t op4, vint16m2_t op5, vint16m2_t op6, vint16m2_t op7, vint16m2_t op8, const int16_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16mf2))) +void vloxseg4ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, const int16_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16mf2_m))) +void vloxseg4ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vbool32_t op4, vint16mf2_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, const int16_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16mf4))) +void vloxseg4ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, const int16_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i16mf4_m))) +void vloxseg4ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vbool64_t op4, vint16mf4_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, const int16_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16m1))) +void vloxseg4ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, const uint16_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16m1_m))) +void vloxseg4ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vbool16_t op4, vuint16m1_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, const uint16_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16m2))) +void vloxseg4ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, const uint16_t * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16m2_m))) +void vloxseg4ei64(vuint16m2_t * op0, vuint16m2_t * op1, vuint16m2_t * op2, vuint16m2_t * op3, vbool8_t op4, vuint16m2_t op5, vuint16m2_t op6, vuint16m2_t op7, vuint16m2_t op8, const uint16_t * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16mf2))) +void vloxseg4ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, const uint16_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16mf2_m))) +void vloxseg4ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vbool32_t op4, vuint16mf2_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, const uint16_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16mf4))) +void vloxseg4ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, const uint16_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u16mf4_m))) +void vloxseg4ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vbool64_t op4, vuint16mf4_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, const uint16_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i16m1))) +void vloxseg5ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, const int16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i16m1_m))) +void vloxseg5ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vbool16_t op5, vint16m1_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, const int16_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i16mf2))) +void vloxseg5ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, const int16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i16mf2_m))) +void vloxseg5ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vbool32_t op5, vint16mf2_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, const int16_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i16mf4))) +void vloxseg5ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, const int16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i16mf4_m))) +void vloxseg5ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vbool64_t op5, vint16mf4_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, const int16_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u16m1))) +void vloxseg5ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, const uint16_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u16m1_m))) +void vloxseg5ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vbool16_t op5, vuint16m1_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, const uint16_t * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u16mf2))) +void vloxseg5ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, const uint16_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u16mf2_m))) +void vloxseg5ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vbool32_t op5, vuint16mf2_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, const uint16_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u16mf4))) +void vloxseg5ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, const uint16_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u16mf4_m))) +void vloxseg5ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vbool64_t op5, vuint16mf4_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, const uint16_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i16m1))) +void vloxseg6ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, const int16_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i16m1_m))) +void vloxseg6ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vbool16_t op6, vint16m1_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, const int16_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i16mf2))) +void vloxseg6ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, const int16_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i16mf2_m))) +void vloxseg6ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vbool32_t op6, vint16mf2_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, const int16_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i16mf4))) +void vloxseg6ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, const int16_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i16mf4_m))) +void vloxseg6ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vbool64_t op6, vint16mf4_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, const int16_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u16m1))) +void vloxseg6ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, const uint16_t * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u16m1_m))) +void vloxseg6ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vbool16_t op6, vuint16m1_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, const uint16_t * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u16mf2))) +void vloxseg6ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, const uint16_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u16mf2_m))) +void vloxseg6ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vbool32_t op6, vuint16mf2_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, const uint16_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u16mf4))) +void vloxseg6ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, const uint16_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u16mf4_m))) +void vloxseg6ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vbool64_t op6, vuint16mf4_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, const uint16_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i16m1))) +void vloxseg7ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, const int16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i16m1_m))) +void vloxseg7ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vbool16_t op7, vint16m1_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, const int16_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i16mf2))) +void vloxseg7ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, const int16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i16mf2_m))) +void vloxseg7ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vbool32_t op7, vint16mf2_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, const int16_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i16mf4))) +void vloxseg7ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, const int16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i16mf4_m))) +void vloxseg7ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vbool64_t op7, vint16mf4_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, const int16_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u16m1))) +void vloxseg7ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, const uint16_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u16m1_m))) +void vloxseg7ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vbool16_t op7, vuint16m1_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, const uint16_t * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u16mf2))) +void vloxseg7ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, const uint16_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u16mf2_m))) +void vloxseg7ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vbool32_t op7, vuint16mf2_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, const uint16_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u16mf4))) +void vloxseg7ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, const uint16_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u16mf4_m))) +void vloxseg7ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vbool64_t op7, vuint16mf4_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, const uint16_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i16m1))) +void vloxseg8ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, const int16_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i16m1_m))) +void vloxseg8ei64(vint16m1_t * op0, vint16m1_t * op1, vint16m1_t * op2, vint16m1_t * op3, vint16m1_t * op4, vint16m1_t * op5, vint16m1_t * op6, vint16m1_t * op7, vbool16_t op8, vint16m1_t op9, vint16m1_t op10, vint16m1_t op11, vint16m1_t op12, vint16m1_t op13, vint16m1_t op14, vint16m1_t op15, vint16m1_t op16, const int16_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i16mf2))) +void vloxseg8ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, const int16_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i16mf2_m))) +void vloxseg8ei64(vint16mf2_t * op0, vint16mf2_t * op1, vint16mf2_t * op2, vint16mf2_t * op3, vint16mf2_t * op4, vint16mf2_t * op5, vint16mf2_t * op6, vint16mf2_t * op7, vbool32_t op8, vint16mf2_t op9, vint16mf2_t op10, vint16mf2_t op11, vint16mf2_t op12, vint16mf2_t op13, vint16mf2_t op14, vint16mf2_t op15, vint16mf2_t op16, const int16_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i16mf4))) +void vloxseg8ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, const int16_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i16mf4_m))) +void vloxseg8ei64(vint16mf4_t * op0, vint16mf4_t * op1, vint16mf4_t * op2, vint16mf4_t * op3, vint16mf4_t * op4, vint16mf4_t * op5, vint16mf4_t * op6, vint16mf4_t * op7, vbool64_t op8, vint16mf4_t op9, vint16mf4_t op10, vint16mf4_t op11, vint16mf4_t op12, vint16mf4_t op13, vint16mf4_t op14, vint16mf4_t op15, vint16mf4_t op16, const int16_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u16m1))) +void vloxseg8ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, const uint16_t * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u16m1_m))) +void vloxseg8ei64(vuint16m1_t * op0, vuint16m1_t * op1, vuint16m1_t * op2, vuint16m1_t * op3, vuint16m1_t * op4, vuint16m1_t * op5, vuint16m1_t * op6, vuint16m1_t * op7, vbool16_t op8, vuint16m1_t op9, vuint16m1_t op10, vuint16m1_t op11, vuint16m1_t op12, vuint16m1_t op13, vuint16m1_t op14, vuint16m1_t op15, vuint16m1_t op16, const uint16_t * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u16mf2))) +void vloxseg8ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, const uint16_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u16mf2_m))) +void vloxseg8ei64(vuint16mf2_t * op0, vuint16mf2_t * op1, vuint16mf2_t * op2, vuint16mf2_t * op3, vuint16mf2_t * op4, vuint16mf2_t * op5, vuint16mf2_t * op6, vuint16mf2_t * op7, vbool32_t op8, vuint16mf2_t op9, vuint16mf2_t op10, vuint16mf2_t op11, vuint16mf2_t op12, vuint16mf2_t op13, vuint16mf2_t op14, vuint16mf2_t op15, vuint16mf2_t op16, const uint16_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u16mf4))) +void vloxseg8ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, const uint16_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u16mf4_m))) +void vloxseg8ei64(vuint16mf4_t * op0, vuint16mf4_t * op1, vuint16mf4_t * op2, vuint16mf4_t * op3, vuint16mf4_t * op4, vuint16mf4_t * op5, vuint16mf4_t * op6, vuint16mf4_t * op7, vbool64_t op8, vuint16mf4_t op9, vuint16mf4_t op10, vuint16mf4_t op11, vuint16mf4_t op12, vuint16mf4_t op13, vuint16mf4_t op14, vuint16mf4_t op15, vuint16mf4_t op16, const uint16_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32m1))) +void vloxseg2ei8(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32m1_m))) +void vloxseg2ei8(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32m2))) +void vloxseg2ei8(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32m2_m))) +void vloxseg2ei8(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32m4))) +void vloxseg2ei8(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32m4_m))) +void vloxseg2ei8(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32mf2))) +void vloxseg2ei8(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_i32mf2_m))) +void vloxseg2ei8(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32m1))) +void vloxseg2ei8(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32m1_m))) +void vloxseg2ei8(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32m2))) +void vloxseg2ei8(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32m2_m))) +void vloxseg2ei8(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32m4))) +void vloxseg2ei8(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32m4_m))) +void vloxseg2ei8(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32mf2))) +void vloxseg2ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_u32mf2_m))) +void vloxseg2ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i32m1))) +void vloxseg3ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i32m1_m))) +void vloxseg3ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i32m2))) +void vloxseg3ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i32m2_m))) +void vloxseg3ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i32mf2))) +void vloxseg3ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_i32mf2_m))) +void vloxseg3ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u32m1))) +void vloxseg3ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u32m1_m))) +void vloxseg3ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u32m2))) +void vloxseg3ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u32m2_m))) +void vloxseg3ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u32mf2))) +void vloxseg3ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_u32mf2_m))) +void vloxseg3ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i32m1))) +void vloxseg4ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i32m1_m))) +void vloxseg4ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i32m2))) +void vloxseg4ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i32m2_m))) +void vloxseg4ei8(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i32mf2))) +void vloxseg4ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_i32mf2_m))) +void vloxseg4ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u32m1))) +void vloxseg4ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u32m1_m))) +void vloxseg4ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u32m2))) +void vloxseg4ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u32m2_m))) +void vloxseg4ei8(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u32mf2))) +void vloxseg4ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_u32mf2_m))) +void vloxseg4ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i32m1))) +void vloxseg5ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i32m1_m))) +void vloxseg5ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i32mf2))) +void vloxseg5ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_i32mf2_m))) +void vloxseg5ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u32m1))) +void vloxseg5ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u32m1_m))) +void vloxseg5ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u32mf2))) +void vloxseg5ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_u32mf2_m))) +void vloxseg5ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i32m1))) +void vloxseg6ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i32m1_m))) +void vloxseg6ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i32mf2))) +void vloxseg6ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_i32mf2_m))) +void vloxseg6ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u32m1))) +void vloxseg6ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u32m1_m))) +void vloxseg6ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u32mf2))) +void vloxseg6ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_u32mf2_m))) +void vloxseg6ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i32m1))) +void vloxseg7ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i32m1_m))) +void vloxseg7ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i32mf2))) +void vloxseg7ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_i32mf2_m))) +void vloxseg7ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u32m1))) +void vloxseg7ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u32m1_m))) +void vloxseg7ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u32mf2))) +void vloxseg7ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_u32mf2_m))) +void vloxseg7ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i32m1))) +void vloxseg8ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i32m1_m))) +void vloxseg8ei8(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i32mf2))) +void vloxseg8ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_i32mf2_m))) +void vloxseg8ei8(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u32m1))) +void vloxseg8ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u32m1_m))) +void vloxseg8ei8(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u32mf2))) +void vloxseg8ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_u32mf2_m))) +void vloxseg8ei8(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32m1))) +void vloxseg2ei16(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32m1_m))) +void vloxseg2ei16(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32m2))) +void vloxseg2ei16(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32m2_m))) +void vloxseg2ei16(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32m4))) +void vloxseg2ei16(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32m4_m))) +void vloxseg2ei16(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32mf2))) +void vloxseg2ei16(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_i32mf2_m))) +void vloxseg2ei16(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32m1))) +void vloxseg2ei16(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32m1_m))) +void vloxseg2ei16(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32m2))) +void vloxseg2ei16(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32m2_m))) +void vloxseg2ei16(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32m4))) +void vloxseg2ei16(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32m4_m))) +void vloxseg2ei16(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32mf2))) +void vloxseg2ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_u32mf2_m))) +void vloxseg2ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i32m1))) +void vloxseg3ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i32m1_m))) +void vloxseg3ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i32m2))) +void vloxseg3ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i32m2_m))) +void vloxseg3ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i32mf2))) +void vloxseg3ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_i32mf2_m))) +void vloxseg3ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u32m1))) +void vloxseg3ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u32m1_m))) +void vloxseg3ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u32m2))) +void vloxseg3ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u32m2_m))) +void vloxseg3ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u32mf2))) +void vloxseg3ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_u32mf2_m))) +void vloxseg3ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i32m1))) +void vloxseg4ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i32m1_m))) +void vloxseg4ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i32m2))) +void vloxseg4ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i32m2_m))) +void vloxseg4ei16(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i32mf2))) +void vloxseg4ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_i32mf2_m))) +void vloxseg4ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u32m1))) +void vloxseg4ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u32m1_m))) +void vloxseg4ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u32m2))) +void vloxseg4ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u32m2_m))) +void vloxseg4ei16(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u32mf2))) +void vloxseg4ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_u32mf2_m))) +void vloxseg4ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i32m1))) +void vloxseg5ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i32m1_m))) +void vloxseg5ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i32mf2))) +void vloxseg5ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_i32mf2_m))) +void vloxseg5ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u32m1))) +void vloxseg5ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u32m1_m))) +void vloxseg5ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u32mf2))) +void vloxseg5ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_u32mf2_m))) +void vloxseg5ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i32m1))) +void vloxseg6ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i32m1_m))) +void vloxseg6ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i32mf2))) +void vloxseg6ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_i32mf2_m))) +void vloxseg6ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u32m1))) +void vloxseg6ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u32m1_m))) +void vloxseg6ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u32mf2))) +void vloxseg6ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_u32mf2_m))) +void vloxseg6ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i32m1))) +void vloxseg7ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i32m1_m))) +void vloxseg7ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i32mf2))) +void vloxseg7ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_i32mf2_m))) +void vloxseg7ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u32m1))) +void vloxseg7ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u32m1_m))) +void vloxseg7ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u32mf2))) +void vloxseg7ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_u32mf2_m))) +void vloxseg7ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i32m1))) +void vloxseg8ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i32m1_m))) +void vloxseg8ei16(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i32mf2))) +void vloxseg8ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_i32mf2_m))) +void vloxseg8ei16(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u32m1))) +void vloxseg8ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u32m1_m))) +void vloxseg8ei16(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u32mf2))) +void vloxseg8ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_u32mf2_m))) +void vloxseg8ei16(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32m1))) +void vloxseg2ei32(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32m1_m))) +void vloxseg2ei32(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32m2))) +void vloxseg2ei32(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32m2_m))) +void vloxseg2ei32(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32m4))) +void vloxseg2ei32(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32m4_m))) +void vloxseg2ei32(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32mf2))) +void vloxseg2ei32(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_i32mf2_m))) +void vloxseg2ei32(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32m1))) +void vloxseg2ei32(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32m1_m))) +void vloxseg2ei32(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32m2))) +void vloxseg2ei32(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32m2_m))) +void vloxseg2ei32(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32m4))) +void vloxseg2ei32(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32m4_m))) +void vloxseg2ei32(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32mf2))) +void vloxseg2ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_u32mf2_m))) +void vloxseg2ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i32m1))) +void vloxseg3ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i32m1_m))) +void vloxseg3ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i32m2))) +void vloxseg3ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i32m2_m))) +void vloxseg3ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i32mf2))) +void vloxseg3ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_i32mf2_m))) +void vloxseg3ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u32m1))) +void vloxseg3ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u32m1_m))) +void vloxseg3ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u32m2))) +void vloxseg3ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u32m2_m))) +void vloxseg3ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u32mf2))) +void vloxseg3ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_u32mf2_m))) +void vloxseg3ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i32m1))) +void vloxseg4ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i32m1_m))) +void vloxseg4ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i32m2))) +void vloxseg4ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i32m2_m))) +void vloxseg4ei32(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i32mf2))) +void vloxseg4ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_i32mf2_m))) +void vloxseg4ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u32m1))) +void vloxseg4ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u32m1_m))) +void vloxseg4ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u32m2))) +void vloxseg4ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u32m2_m))) +void vloxseg4ei32(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u32mf2))) +void vloxseg4ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_u32mf2_m))) +void vloxseg4ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i32m1))) +void vloxseg5ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i32m1_m))) +void vloxseg5ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i32mf2))) +void vloxseg5ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_i32mf2_m))) +void vloxseg5ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u32m1))) +void vloxseg5ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u32m1_m))) +void vloxseg5ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u32mf2))) +void vloxseg5ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_u32mf2_m))) +void vloxseg5ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i32m1))) +void vloxseg6ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i32m1_m))) +void vloxseg6ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i32mf2))) +void vloxseg6ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_i32mf2_m))) +void vloxseg6ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u32m1))) +void vloxseg6ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u32m1_m))) +void vloxseg6ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u32mf2))) +void vloxseg6ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_u32mf2_m))) +void vloxseg6ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i32m1))) +void vloxseg7ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i32m1_m))) +void vloxseg7ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i32mf2))) +void vloxseg7ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_i32mf2_m))) +void vloxseg7ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u32m1))) +void vloxseg7ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u32m1_m))) +void vloxseg7ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u32mf2))) +void vloxseg7ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_u32mf2_m))) +void vloxseg7ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i32m1))) +void vloxseg8ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i32m1_m))) +void vloxseg8ei32(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i32mf2))) +void vloxseg8ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_i32mf2_m))) +void vloxseg8ei32(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u32m1))) +void vloxseg8ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u32m1_m))) +void vloxseg8ei32(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u32mf2))) +void vloxseg8ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_u32mf2_m))) +void vloxseg8ei32(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32m1))) +void vloxseg2ei64(vint32m1_t * op0, vint32m1_t * op1, const int32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32m1_m))) +void vloxseg2ei64(vint32m1_t * op0, vint32m1_t * op1, vbool32_t op2, vint32m1_t op3, vint32m1_t op4, const int32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32m2))) +void vloxseg2ei64(vint32m2_t * op0, vint32m2_t * op1, const int32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32m2_m))) +void vloxseg2ei64(vint32m2_t * op0, vint32m2_t * op1, vbool16_t op2, vint32m2_t op3, vint32m2_t op4, const int32_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32m4))) +void vloxseg2ei64(vint32m4_t * op0, vint32m4_t * op1, const int32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32m4_m))) +void vloxseg2ei64(vint32m4_t * op0, vint32m4_t * op1, vbool8_t op2, vint32m4_t op3, vint32m4_t op4, const int32_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32mf2))) +void vloxseg2ei64(vint32mf2_t * op0, vint32mf2_t * op1, const int32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_i32mf2_m))) +void vloxseg2ei64(vint32mf2_t * op0, vint32mf2_t * op1, vbool64_t op2, vint32mf2_t op3, vint32mf2_t op4, const int32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32m1))) +void vloxseg2ei64(vuint32m1_t * op0, vuint32m1_t * op1, const uint32_t * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32m1_m))) +void vloxseg2ei64(vuint32m1_t * op0, vuint32m1_t * op1, vbool32_t op2, vuint32m1_t op3, vuint32m1_t op4, const uint32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32m2))) +void vloxseg2ei64(vuint32m2_t * op0, vuint32m2_t * op1, const uint32_t * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32m2_m))) +void vloxseg2ei64(vuint32m2_t * op0, vuint32m2_t * op1, vbool16_t op2, vuint32m2_t op3, vuint32m2_t op4, const uint32_t * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32m4))) +void vloxseg2ei64(vuint32m4_t * op0, vuint32m4_t * op1, const uint32_t * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32m4_m))) +void vloxseg2ei64(vuint32m4_t * op0, vuint32m4_t * op1, vbool8_t op2, vuint32m4_t op3, vuint32m4_t op4, const uint32_t * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32mf2))) +void vloxseg2ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, const uint32_t * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_u32mf2_m))) +void vloxseg2ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vbool64_t op2, vuint32mf2_t op3, vuint32mf2_t op4, const uint32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i32m1))) +void vloxseg3ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, const int32_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i32m1_m))) +void vloxseg3ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vbool32_t op3, vint32m1_t op4, vint32m1_t op5, vint32m1_t op6, const int32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i32m2))) +void vloxseg3ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, const int32_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i32m2_m))) +void vloxseg3ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vbool16_t op3, vint32m2_t op4, vint32m2_t op5, vint32m2_t op6, const int32_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i32mf2))) +void vloxseg3ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, const int32_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_i32mf2_m))) +void vloxseg3ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vbool64_t op3, vint32mf2_t op4, vint32mf2_t op5, vint32mf2_t op6, const int32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u32m1))) +void vloxseg3ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, const uint32_t * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u32m1_m))) +void vloxseg3ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vbool32_t op3, vuint32m1_t op4, vuint32m1_t op5, vuint32m1_t op6, const uint32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u32m2))) +void vloxseg3ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, const uint32_t * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u32m2_m))) +void vloxseg3ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vbool16_t op3, vuint32m2_t op4, vuint32m2_t op5, vuint32m2_t op6, const uint32_t * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u32mf2))) +void vloxseg3ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, const uint32_t * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_u32mf2_m))) +void vloxseg3ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vbool64_t op3, vuint32mf2_t op4, vuint32mf2_t op5, vuint32mf2_t op6, const uint32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i32m1))) +void vloxseg4ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, const int32_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i32m1_m))) +void vloxseg4ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vbool32_t op4, vint32m1_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, const int32_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i32m2))) +void vloxseg4ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, const int32_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i32m2_m))) +void vloxseg4ei64(vint32m2_t * op0, vint32m2_t * op1, vint32m2_t * op2, vint32m2_t * op3, vbool16_t op4, vint32m2_t op5, vint32m2_t op6, vint32m2_t op7, vint32m2_t op8, const int32_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i32mf2))) +void vloxseg4ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, const int32_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_i32mf2_m))) +void vloxseg4ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vbool64_t op4, vint32mf2_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, const int32_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u32m1))) +void vloxseg4ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, const uint32_t * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u32m1_m))) +void vloxseg4ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vbool32_t op4, vuint32m1_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, const uint32_t * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u32m2))) +void vloxseg4ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, const uint32_t * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u32m2_m))) +void vloxseg4ei64(vuint32m2_t * op0, vuint32m2_t * op1, vuint32m2_t * op2, vuint32m2_t * op3, vbool16_t op4, vuint32m2_t op5, vuint32m2_t op6, vuint32m2_t op7, vuint32m2_t op8, const uint32_t * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u32mf2))) +void vloxseg4ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, const uint32_t * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_u32mf2_m))) +void vloxseg4ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vbool64_t op4, vuint32mf2_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, const uint32_t * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i32m1))) +void vloxseg5ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, const int32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i32m1_m))) +void vloxseg5ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vbool32_t op5, vint32m1_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, const int32_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i32mf2))) +void vloxseg5ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, const int32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_i32mf2_m))) +void vloxseg5ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vbool64_t op5, vint32mf2_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, const int32_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u32m1))) +void vloxseg5ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, const uint32_t * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u32m1_m))) +void vloxseg5ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vbool32_t op5, vuint32m1_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, const uint32_t * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u32mf2))) +void vloxseg5ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, const uint32_t * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_u32mf2_m))) +void vloxseg5ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vbool64_t op5, vuint32mf2_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, const uint32_t * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i32m1))) +void vloxseg6ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, const int32_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i32m1_m))) +void vloxseg6ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vbool32_t op6, vint32m1_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, const int32_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i32mf2))) +void vloxseg6ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, const int32_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_i32mf2_m))) +void vloxseg6ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vbool64_t op6, vint32mf2_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, const int32_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u32m1))) +void vloxseg6ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, const uint32_t * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u32m1_m))) +void vloxseg6ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vbool32_t op6, vuint32m1_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, const uint32_t * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u32mf2))) +void vloxseg6ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, const uint32_t * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_u32mf2_m))) +void vloxseg6ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vbool64_t op6, vuint32mf2_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, const uint32_t * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i32m1))) +void vloxseg7ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, const int32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i32m1_m))) +void vloxseg7ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vbool32_t op7, vint32m1_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, const int32_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i32mf2))) +void vloxseg7ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, const int32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_i32mf2_m))) +void vloxseg7ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vbool64_t op7, vint32mf2_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, const int32_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u32m1))) +void vloxseg7ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, const uint32_t * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u32m1_m))) +void vloxseg7ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vbool32_t op7, vuint32m1_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, const uint32_t * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u32mf2))) +void vloxseg7ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, const uint32_t * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_u32mf2_m))) +void vloxseg7ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vbool64_t op7, vuint32mf2_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, const uint32_t * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i32m1))) +void vloxseg8ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, const int32_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i32m1_m))) +void vloxseg8ei64(vint32m1_t * op0, vint32m1_t * op1, vint32m1_t * op2, vint32m1_t * op3, vint32m1_t * op4, vint32m1_t * op5, vint32m1_t * op6, vint32m1_t * op7, vbool32_t op8, vint32m1_t op9, vint32m1_t op10, vint32m1_t op11, vint32m1_t op12, vint32m1_t op13, vint32m1_t op14, vint32m1_t op15, vint32m1_t op16, const int32_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i32mf2))) +void vloxseg8ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, const int32_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_i32mf2_m))) +void vloxseg8ei64(vint32mf2_t * op0, vint32mf2_t * op1, vint32mf2_t * op2, vint32mf2_t * op3, vint32mf2_t * op4, vint32mf2_t * op5, vint32mf2_t * op6, vint32mf2_t * op7, vbool64_t op8, vint32mf2_t op9, vint32mf2_t op10, vint32mf2_t op11, vint32mf2_t op12, vint32mf2_t op13, vint32mf2_t op14, vint32mf2_t op15, vint32mf2_t op16, const int32_t * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u32m1))) +void vloxseg8ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, const uint32_t * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u32m1_m))) +void vloxseg8ei64(vuint32m1_t * op0, vuint32m1_t * op1, vuint32m1_t * op2, vuint32m1_t * op3, vuint32m1_t * op4, vuint32m1_t * op5, vuint32m1_t * op6, vuint32m1_t * op7, vbool32_t op8, vuint32m1_t op9, vuint32m1_t op10, vuint32m1_t op11, vuint32m1_t op12, vuint32m1_t op13, vuint32m1_t op14, vuint32m1_t op15, vuint32m1_t op16, const uint32_t * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u32mf2))) +void vloxseg8ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, const uint32_t * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_u32mf2_m))) +void vloxseg8ei64(vuint32mf2_t * op0, vuint32mf2_t * op1, vuint32mf2_t * op2, vuint32mf2_t * op3, vuint32mf2_t * op4, vuint32mf2_t * op5, vuint32mf2_t * op6, vuint32mf2_t * op7, vbool64_t op8, vuint32mf2_t op9, vuint32mf2_t op10, vuint32mf2_t op11, vuint32mf2_t op12, vuint32mf2_t op13, vuint32mf2_t op14, vuint32mf2_t op15, vuint32mf2_t op16, const uint32_t * op17, vuint64m1_t op18, size_t op19); + +#endif + +#if defined(__riscv_f) && defined(__riscv_zvlsseg) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32m1))) +void vloxseg2ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32m1_m))) +void vloxseg2ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32m2))) +void vloxseg2ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32m2_m))) +void vloxseg2ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32m4))) +void vloxseg2ei8(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32m4_m))) +void vloxseg2ei8(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32mf2))) +void vloxseg2ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f32mf2_m))) +void vloxseg2ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f32m1))) +void vloxseg3ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f32m1_m))) +void vloxseg3ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f32m2))) +void vloxseg3ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f32m2_m))) +void vloxseg3ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f32mf2))) +void vloxseg3ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f32mf2_m))) +void vloxseg3ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f32m1))) +void vloxseg4ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f32m1_m))) +void vloxseg4ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f32m2))) +void vloxseg4ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f32m2_m))) +void vloxseg4ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f32mf2))) +void vloxseg4ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f32mf2_m))) +void vloxseg4ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f32m1))) +void vloxseg5ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f32m1_m))) +void vloxseg5ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f32mf2))) +void vloxseg5ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f32mf2_m))) +void vloxseg5ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f32m1))) +void vloxseg6ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f32m1_m))) +void vloxseg6ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f32mf2))) +void vloxseg6ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f32mf2_m))) +void vloxseg6ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f32m1))) +void vloxseg7ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f32m1_m))) +void vloxseg7ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f32mf2))) +void vloxseg7ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f32mf2_m))) +void vloxseg7ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f32m1))) +void vloxseg8ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f32m1_m))) +void vloxseg8ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f32mf2))) +void vloxseg8ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f32mf2_m))) +void vloxseg8ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32m1))) +void vloxseg2ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32m1_m))) +void vloxseg2ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32m2))) +void vloxseg2ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32m2_m))) +void vloxseg2ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32m4))) +void vloxseg2ei16(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32m4_m))) +void vloxseg2ei16(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32mf2))) +void vloxseg2ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f32mf2_m))) +void vloxseg2ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f32m1))) +void vloxseg3ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f32m1_m))) +void vloxseg3ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f32m2))) +void vloxseg3ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f32m2_m))) +void vloxseg3ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f32mf2))) +void vloxseg3ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f32mf2_m))) +void vloxseg3ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f32m1))) +void vloxseg4ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f32m1_m))) +void vloxseg4ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f32m2))) +void vloxseg4ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f32m2_m))) +void vloxseg4ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f32mf2))) +void vloxseg4ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f32mf2_m))) +void vloxseg4ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f32m1))) +void vloxseg5ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f32m1_m))) +void vloxseg5ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f32mf2))) +void vloxseg5ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f32mf2_m))) +void vloxseg5ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f32m1))) +void vloxseg6ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f32m1_m))) +void vloxseg6ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f32mf2))) +void vloxseg6ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f32mf2_m))) +void vloxseg6ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f32m1))) +void vloxseg7ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f32m1_m))) +void vloxseg7ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f32mf2))) +void vloxseg7ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f32mf2_m))) +void vloxseg7ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f32m1))) +void vloxseg8ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f32m1_m))) +void vloxseg8ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f32mf2))) +void vloxseg8ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f32mf2_m))) +void vloxseg8ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32m1))) +void vloxseg2ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32m1_m))) +void vloxseg2ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32m2))) +void vloxseg2ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32m2_m))) +void vloxseg2ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32m4))) +void vloxseg2ei32(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32m4_m))) +void vloxseg2ei32(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32mf2))) +void vloxseg2ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f32mf2_m))) +void vloxseg2ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f32m1))) +void vloxseg3ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f32m1_m))) +void vloxseg3ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f32m2))) +void vloxseg3ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f32m2_m))) +void vloxseg3ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f32mf2))) +void vloxseg3ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f32mf2_m))) +void vloxseg3ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f32m1))) +void vloxseg4ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f32m1_m))) +void vloxseg4ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f32m2))) +void vloxseg4ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f32m2_m))) +void vloxseg4ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f32mf2))) +void vloxseg4ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f32mf2_m))) +void vloxseg4ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f32m1))) +void vloxseg5ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f32m1_m))) +void vloxseg5ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f32mf2))) +void vloxseg5ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f32mf2_m))) +void vloxseg5ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f32m1))) +void vloxseg6ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f32m1_m))) +void vloxseg6ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f32mf2))) +void vloxseg6ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f32mf2_m))) +void vloxseg6ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f32m1))) +void vloxseg7ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f32m1_m))) +void vloxseg7ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f32mf2))) +void vloxseg7ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f32mf2_m))) +void vloxseg7ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f32m1))) +void vloxseg8ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f32m1_m))) +void vloxseg8ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f32mf2))) +void vloxseg8ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f32mf2_m))) +void vloxseg8ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32m1))) +void vloxseg2ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32m1_m))) +void vloxseg2ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32m2))) +void vloxseg2ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32m2_m))) +void vloxseg2ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32m4))) +void vloxseg2ei64(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32m4_m))) +void vloxseg2ei64(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32mf2))) +void vloxseg2ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f32mf2_m))) +void vloxseg2ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f32m1))) +void vloxseg3ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f32m1_m))) +void vloxseg3ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f32m2))) +void vloxseg3ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f32m2_m))) +void vloxseg3ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f32mf2))) +void vloxseg3ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f32mf2_m))) +void vloxseg3ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f32m1))) +void vloxseg4ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f32m1_m))) +void vloxseg4ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f32m2))) +void vloxseg4ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f32m2_m))) +void vloxseg4ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f32mf2))) +void vloxseg4ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f32mf2_m))) +void vloxseg4ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f32m1))) +void vloxseg5ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f32m1_m))) +void vloxseg5ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f32mf2))) +void vloxseg5ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f32mf2_m))) +void vloxseg5ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f32m1))) +void vloxseg6ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f32m1_m))) +void vloxseg6ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f32mf2))) +void vloxseg6ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f32mf2_m))) +void vloxseg6ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f32m1))) +void vloxseg7ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f32m1_m))) +void vloxseg7ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f32mf2))) +void vloxseg7ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f32mf2_m))) +void vloxseg7ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f32m1))) +void vloxseg8ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f32m1_m))) +void vloxseg8ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f32mf2))) +void vloxseg8ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f32mf2_m))) +void vloxseg8ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32m1))) +void vsseg2e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32m1_m))) +void vsseg2e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32m2))) +void vsseg2e32(float * op0, vfloat32m2_t op1, vfloat32m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32m2_m))) +void vsseg2e32(vbool16_t op0, float * op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32m4))) +void vsseg2e32(float * op0, vfloat32m4_t op1, vfloat32m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32m4_m))) +void vsseg2e32(vbool8_t op0, float * op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32mf2))) +void vsseg2e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e32_v_f32mf2_m))) +void vsseg2e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_f32m1))) +void vsseg3e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_f32m1_m))) +void vsseg3e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_f32m2))) +void vsseg3e32(float * op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_f32m2_m))) +void vsseg3e32(vbool16_t op0, float * op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_f32mf2))) +void vsseg3e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e32_v_f32mf2_m))) +void vsseg3e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_f32m1))) +void vsseg4e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_f32m1_m))) +void vsseg4e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_f32m2))) +void vsseg4e32(float * op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_f32m2_m))) +void vsseg4e32(vbool16_t op0, float * op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_f32mf2))) +void vsseg4e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e32_v_f32mf2_m))) +void vsseg4e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_f32m1))) +void vsseg5e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_f32m1_m))) +void vsseg5e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_f32mf2))) +void vsseg5e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e32_v_f32mf2_m))) +void vsseg5e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_f32m1))) +void vsseg6e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_f32m1_m))) +void vsseg6e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_f32mf2))) +void vsseg6e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e32_v_f32mf2_m))) +void vsseg6e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_f32m1))) +void vsseg7e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_f32m1_m))) +void vsseg7e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_f32mf2))) +void vsseg7e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e32_v_f32mf2_m))) +void vsseg7e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_f32m1))) +void vsseg8e32(float * op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_f32m1_m))) +void vsseg8e32(vbool32_t op0, float * op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_f32mf2))) +void vsseg8e32(float * op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e32_v_f32mf2_m))) +void vsseg8e32(vbool64_t op0, float * op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32m1))) +void vssseg2e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32m1_m))) +void vssseg2e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32m2))) +void vssseg2e32(float * op0, ptrdiff_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32m2_m))) +void vssseg2e32(vbool16_t op0, float * op1, ptrdiff_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32m4))) +void vssseg2e32(float * op0, ptrdiff_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32m4_m))) +void vssseg2e32(vbool8_t op0, float * op1, ptrdiff_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32mf2))) +void vssseg2e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e32_v_f32mf2_m))) +void vssseg2e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_f32m1))) +void vssseg3e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_f32m1_m))) +void vssseg3e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_f32m2))) +void vssseg3e32(float * op0, ptrdiff_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_f32m2_m))) +void vssseg3e32(vbool16_t op0, float * op1, ptrdiff_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_f32mf2))) +void vssseg3e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e32_v_f32mf2_m))) +void vssseg3e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_f32m1))) +void vssseg4e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_f32m1_m))) +void vssseg4e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_f32m2))) +void vssseg4e32(float * op0, ptrdiff_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_f32m2_m))) +void vssseg4e32(vbool16_t op0, float * op1, ptrdiff_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_f32mf2))) +void vssseg4e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e32_v_f32mf2_m))) +void vssseg4e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_f32m1))) +void vssseg5e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_f32m1_m))) +void vssseg5e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_f32mf2))) +void vssseg5e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e32_v_f32mf2_m))) +void vssseg5e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_f32m1))) +void vssseg6e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_f32m1_m))) +void vssseg6e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_f32mf2))) +void vssseg6e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e32_v_f32mf2_m))) +void vssseg6e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_f32m1))) +void vssseg7e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_f32m1_m))) +void vssseg7e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_f32mf2))) +void vssseg7e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e32_v_f32mf2_m))) +void vssseg7e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_f32m1))) +void vssseg8e32(float * op0, ptrdiff_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_f32m1_m))) +void vssseg8e32(vbool32_t op0, float * op1, ptrdiff_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_f32mf2))) +void vssseg8e32(float * op0, ptrdiff_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e32_v_f32mf2_m))) +void vssseg8e32(vbool64_t op0, float * op1, ptrdiff_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32m1))) +void vsuxseg2ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32m1_m))) +void vsuxseg2ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32m2))) +void vsuxseg2ei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32m2_m))) +void vsuxseg2ei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32m4))) +void vsuxseg2ei8(float * op0, vuint8m1_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32m4_m))) +void vsuxseg2ei8(vbool8_t op0, float * op1, vuint8m1_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32mf2))) +void vsuxseg2ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f32mf2_m))) +void vsuxseg2ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f32m1))) +void vsuxseg3ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f32m1_m))) +void vsuxseg3ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f32m2))) +void vsuxseg3ei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f32m2_m))) +void vsuxseg3ei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f32mf2))) +void vsuxseg3ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f32mf2_m))) +void vsuxseg3ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f32m1))) +void vsuxseg4ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f32m1_m))) +void vsuxseg4ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f32m2))) +void vsuxseg4ei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f32m2_m))) +void vsuxseg4ei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f32mf2))) +void vsuxseg4ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f32mf2_m))) +void vsuxseg4ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f32m1))) +void vsuxseg5ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f32m1_m))) +void vsuxseg5ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f32mf2))) +void vsuxseg5ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f32mf2_m))) +void vsuxseg5ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f32m1))) +void vsuxseg6ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f32m1_m))) +void vsuxseg6ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f32mf2))) +void vsuxseg6ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f32mf2_m))) +void vsuxseg6ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f32m1))) +void vsuxseg7ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f32m1_m))) +void vsuxseg7ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f32mf2))) +void vsuxseg7ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f32mf2_m))) +void vsuxseg7ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f32m1))) +void vsuxseg8ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f32m1_m))) +void vsuxseg8ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f32mf2))) +void vsuxseg8ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f32mf2_m))) +void vsuxseg8ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32m1))) +void vsuxseg2ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32m1_m))) +void vsuxseg2ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32m2))) +void vsuxseg2ei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32m2_m))) +void vsuxseg2ei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32m4))) +void vsuxseg2ei16(float * op0, vuint16m2_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32m4_m))) +void vsuxseg2ei16(vbool8_t op0, float * op1, vuint16m2_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32mf2))) +void vsuxseg2ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f32mf2_m))) +void vsuxseg2ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f32m1))) +void vsuxseg3ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f32m1_m))) +void vsuxseg3ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f32m2))) +void vsuxseg3ei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f32m2_m))) +void vsuxseg3ei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f32mf2))) +void vsuxseg3ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f32mf2_m))) +void vsuxseg3ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f32m1))) +void vsuxseg4ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f32m1_m))) +void vsuxseg4ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f32m2))) +void vsuxseg4ei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f32m2_m))) +void vsuxseg4ei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f32mf2))) +void vsuxseg4ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f32mf2_m))) +void vsuxseg4ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f32m1))) +void vsuxseg5ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f32m1_m))) +void vsuxseg5ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f32mf2))) +void vsuxseg5ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f32mf2_m))) +void vsuxseg5ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f32m1))) +void vsuxseg6ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f32m1_m))) +void vsuxseg6ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f32mf2))) +void vsuxseg6ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f32mf2_m))) +void vsuxseg6ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f32m1))) +void vsuxseg7ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f32m1_m))) +void vsuxseg7ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f32mf2))) +void vsuxseg7ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f32mf2_m))) +void vsuxseg7ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f32m1))) +void vsuxseg8ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f32m1_m))) +void vsuxseg8ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f32mf2))) +void vsuxseg8ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f32mf2_m))) +void vsuxseg8ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32m1))) +void vsuxseg2ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32m1_m))) +void vsuxseg2ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32m2))) +void vsuxseg2ei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32m2_m))) +void vsuxseg2ei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32m4))) +void vsuxseg2ei32(float * op0, vuint32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32m4_m))) +void vsuxseg2ei32(vbool8_t op0, float * op1, vuint32m4_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32mf2))) +void vsuxseg2ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f32mf2_m))) +void vsuxseg2ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f32m1))) +void vsuxseg3ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f32m1_m))) +void vsuxseg3ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f32m2))) +void vsuxseg3ei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f32m2_m))) +void vsuxseg3ei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f32mf2))) +void vsuxseg3ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f32mf2_m))) +void vsuxseg3ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f32m1))) +void vsuxseg4ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f32m1_m))) +void vsuxseg4ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f32m2))) +void vsuxseg4ei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f32m2_m))) +void vsuxseg4ei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f32mf2))) +void vsuxseg4ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f32mf2_m))) +void vsuxseg4ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f32m1))) +void vsuxseg5ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f32m1_m))) +void vsuxseg5ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f32mf2))) +void vsuxseg5ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f32mf2_m))) +void vsuxseg5ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f32m1))) +void vsuxseg6ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f32m1_m))) +void vsuxseg6ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f32mf2))) +void vsuxseg6ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f32mf2_m))) +void vsuxseg6ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f32m1))) +void vsuxseg7ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f32m1_m))) +void vsuxseg7ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f32mf2))) +void vsuxseg7ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f32mf2_m))) +void vsuxseg7ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f32m1))) +void vsuxseg8ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f32m1_m))) +void vsuxseg8ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f32mf2))) +void vsuxseg8ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f32mf2_m))) +void vsuxseg8ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32m1))) +void vsuxseg2ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32m1_m))) +void vsuxseg2ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32m2))) +void vsuxseg2ei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32m2_m))) +void vsuxseg2ei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32m4))) +void vsuxseg2ei64(float * op0, vuint64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32m4_m))) +void vsuxseg2ei64(vbool8_t op0, float * op1, vuint64m8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32mf2))) +void vsuxseg2ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f32mf2_m))) +void vsuxseg2ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f32m1))) +void vsuxseg3ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f32m1_m))) +void vsuxseg3ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f32m2))) +void vsuxseg3ei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f32m2_m))) +void vsuxseg3ei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f32mf2))) +void vsuxseg3ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f32mf2_m))) +void vsuxseg3ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f32m1))) +void vsuxseg4ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f32m1_m))) +void vsuxseg4ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f32m2))) +void vsuxseg4ei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f32m2_m))) +void vsuxseg4ei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f32mf2))) +void vsuxseg4ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f32mf2_m))) +void vsuxseg4ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f32m1))) +void vsuxseg5ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f32m1_m))) +void vsuxseg5ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f32mf2))) +void vsuxseg5ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f32mf2_m))) +void vsuxseg5ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f32m1))) +void vsuxseg6ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f32m1_m))) +void vsuxseg6ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f32mf2))) +void vsuxseg6ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f32mf2_m))) +void vsuxseg6ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f32m1))) +void vsuxseg7ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f32m1_m))) +void vsuxseg7ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f32mf2))) +void vsuxseg7ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f32mf2_m))) +void vsuxseg7ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f32m1))) +void vsuxseg8ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f32m1_m))) +void vsuxseg8ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f32mf2))) +void vsuxseg8ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f32mf2_m))) +void vsuxseg8ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32m1))) +void vsoxseg2ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32m1_m))) +void vsoxseg2ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32m2))) +void vsoxseg2ei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32m2_m))) +void vsoxseg2ei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32m4))) +void vsoxseg2ei8(float * op0, vuint8m1_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32m4_m))) +void vsoxseg2ei8(vbool8_t op0, float * op1, vuint8m1_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32mf2))) +void vsoxseg2ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f32mf2_m))) +void vsoxseg2ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f32m1))) +void vsoxseg3ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f32m1_m))) +void vsoxseg3ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f32m2))) +void vsoxseg3ei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f32m2_m))) +void vsoxseg3ei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f32mf2))) +void vsoxseg3ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f32mf2_m))) +void vsoxseg3ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f32m1))) +void vsoxseg4ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f32m1_m))) +void vsoxseg4ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f32m2))) +void vsoxseg4ei8(float * op0, vuint8mf2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f32m2_m))) +void vsoxseg4ei8(vbool16_t op0, float * op1, vuint8mf2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f32mf2))) +void vsoxseg4ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f32mf2_m))) +void vsoxseg4ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f32m1))) +void vsoxseg5ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f32m1_m))) +void vsoxseg5ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f32mf2))) +void vsoxseg5ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f32mf2_m))) +void vsoxseg5ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f32m1))) +void vsoxseg6ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f32m1_m))) +void vsoxseg6ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f32mf2))) +void vsoxseg6ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f32mf2_m))) +void vsoxseg6ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f32m1))) +void vsoxseg7ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f32m1_m))) +void vsoxseg7ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f32mf2))) +void vsoxseg7ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f32mf2_m))) +void vsoxseg7ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f32m1))) +void vsoxseg8ei8(float * op0, vuint8mf4_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f32m1_m))) +void vsoxseg8ei8(vbool32_t op0, float * op1, vuint8mf4_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f32mf2))) +void vsoxseg8ei8(float * op0, vuint8mf8_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f32mf2_m))) +void vsoxseg8ei8(vbool64_t op0, float * op1, vuint8mf8_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32m1))) +void vsoxseg2ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32m1_m))) +void vsoxseg2ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32m2))) +void vsoxseg2ei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32m2_m))) +void vsoxseg2ei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32m4))) +void vsoxseg2ei16(float * op0, vuint16m2_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32m4_m))) +void vsoxseg2ei16(vbool8_t op0, float * op1, vuint16m2_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32mf2))) +void vsoxseg2ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f32mf2_m))) +void vsoxseg2ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f32m1))) +void vsoxseg3ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f32m1_m))) +void vsoxseg3ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f32m2))) +void vsoxseg3ei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f32m2_m))) +void vsoxseg3ei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f32mf2))) +void vsoxseg3ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f32mf2_m))) +void vsoxseg3ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f32m1))) +void vsoxseg4ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f32m1_m))) +void vsoxseg4ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f32m2))) +void vsoxseg4ei16(float * op0, vuint16m1_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f32m2_m))) +void vsoxseg4ei16(vbool16_t op0, float * op1, vuint16m1_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f32mf2))) +void vsoxseg4ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f32mf2_m))) +void vsoxseg4ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f32m1))) +void vsoxseg5ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f32m1_m))) +void vsoxseg5ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f32mf2))) +void vsoxseg5ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f32mf2_m))) +void vsoxseg5ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f32m1))) +void vsoxseg6ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f32m1_m))) +void vsoxseg6ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f32mf2))) +void vsoxseg6ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f32mf2_m))) +void vsoxseg6ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f32m1))) +void vsoxseg7ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f32m1_m))) +void vsoxseg7ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f32mf2))) +void vsoxseg7ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f32mf2_m))) +void vsoxseg7ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f32m1))) +void vsoxseg8ei16(float * op0, vuint16mf2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f32m1_m))) +void vsoxseg8ei16(vbool32_t op0, float * op1, vuint16mf2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f32mf2))) +void vsoxseg8ei16(float * op0, vuint16mf4_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f32mf2_m))) +void vsoxseg8ei16(vbool64_t op0, float * op1, vuint16mf4_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32m1))) +void vsoxseg2ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32m1_m))) +void vsoxseg2ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32m2))) +void vsoxseg2ei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32m2_m))) +void vsoxseg2ei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32m4))) +void vsoxseg2ei32(float * op0, vuint32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32m4_m))) +void vsoxseg2ei32(vbool8_t op0, float * op1, vuint32m4_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32mf2))) +void vsoxseg2ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f32mf2_m))) +void vsoxseg2ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f32m1))) +void vsoxseg3ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f32m1_m))) +void vsoxseg3ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f32m2))) +void vsoxseg3ei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f32m2_m))) +void vsoxseg3ei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f32mf2))) +void vsoxseg3ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f32mf2_m))) +void vsoxseg3ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f32m1))) +void vsoxseg4ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f32m1_m))) +void vsoxseg4ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f32m2))) +void vsoxseg4ei32(float * op0, vuint32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f32m2_m))) +void vsoxseg4ei32(vbool16_t op0, float * op1, vuint32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f32mf2))) +void vsoxseg4ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f32mf2_m))) +void vsoxseg4ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f32m1))) +void vsoxseg5ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f32m1_m))) +void vsoxseg5ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f32mf2))) +void vsoxseg5ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f32mf2_m))) +void vsoxseg5ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f32m1))) +void vsoxseg6ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f32m1_m))) +void vsoxseg6ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f32mf2))) +void vsoxseg6ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f32mf2_m))) +void vsoxseg6ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f32m1))) +void vsoxseg7ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f32m1_m))) +void vsoxseg7ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f32mf2))) +void vsoxseg7ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f32mf2_m))) +void vsoxseg7ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f32m1))) +void vsoxseg8ei32(float * op0, vuint32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f32m1_m))) +void vsoxseg8ei32(vbool32_t op0, float * op1, vuint32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f32mf2))) +void vsoxseg8ei32(float * op0, vuint32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f32mf2_m))) +void vsoxseg8ei32(vbool64_t op0, float * op1, vuint32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32m1))) +void vsoxseg2ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32m1_m))) +void vsoxseg2ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32m2))) +void vsoxseg2ei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32m2_m))) +void vsoxseg2ei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32m4))) +void vsoxseg2ei64(float * op0, vuint64m8_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32m4_m))) +void vsoxseg2ei64(vbool8_t op0, float * op1, vuint64m8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32mf2))) +void vsoxseg2ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f32mf2_m))) +void vsoxseg2ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f32m1))) +void vsoxseg3ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f32m1_m))) +void vsoxseg3ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f32m2))) +void vsoxseg3ei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f32m2_m))) +void vsoxseg3ei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f32mf2))) +void vsoxseg3ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f32mf2_m))) +void vsoxseg3ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f32m1))) +void vsoxseg4ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f32m1_m))) +void vsoxseg4ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f32m2))) +void vsoxseg4ei64(float * op0, vuint64m4_t op1, vfloat32m2_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f32m2_m))) +void vsoxseg4ei64(vbool16_t op0, float * op1, vuint64m4_t op2, vfloat32m2_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f32mf2))) +void vsoxseg4ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f32mf2_m))) +void vsoxseg4ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f32m1))) +void vsoxseg5ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f32m1_m))) +void vsoxseg5ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f32mf2))) +void vsoxseg5ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f32mf2_m))) +void vsoxseg5ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f32m1))) +void vsoxseg6ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f32m1_m))) +void vsoxseg6ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f32mf2))) +void vsoxseg6ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f32mf2_m))) +void vsoxseg6ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f32m1))) +void vsoxseg7ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f32m1_m))) +void vsoxseg7ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f32mf2))) +void vsoxseg7ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f32mf2_m))) +void vsoxseg7ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f32m1))) +void vsoxseg8ei64(float * op0, vuint64m2_t op1, vfloat32m1_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f32m1_m))) +void vsoxseg8ei64(vbool32_t op0, float * op1, vuint64m2_t op2, vfloat32m1_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f32mf2))) +void vsoxseg8ei64(float * op0, vuint64m1_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f32mf2_m))) +void vsoxseg8ei64(vbool64_t op0, float * op1, vuint64m1_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_f32m1_m))) +void vlseg2e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_f32m2_m))) +void vlseg2e32(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_f32m4_m))) +void vlseg2e32(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32_v_f32mf2_m))) +void vlseg2e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_f32m1_m))) +void vlseg3e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_f32m2_m))) +void vlseg3e32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32_v_f32mf2_m))) +void vlseg3e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_f32m1_m))) +void vlseg4e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_f32m2_m))) +void vlseg4e32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32_v_f32mf2_m))) +void vlseg4e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32_v_f32m1_m))) +void vlseg5e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32_v_f32mf2_m))) +void vlseg5e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32_v_f32m1_m))) +void vlseg6e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32_v_f32mf2_m))) +void vlseg6e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32_v_f32m1_m))) +void vlseg7e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32_v_f32mf2_m))) +void vlseg7e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32_v_f32m1_m))) +void vlseg8e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32_v_f32mf2_m))) +void vlseg8e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_f32m1_m))) +void vlseg2e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_f32m2_m))) +void vlseg2e32ff(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_f32m4_m))) +void vlseg2e32ff(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e32ff_v_f32mf2_m))) +void vlseg2e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_f32m1_m))) +void vlseg3e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_f32m2_m))) +void vlseg3e32ff(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e32ff_v_f32mf2_m))) +void vlseg3e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_f32m1_m))) +void vlseg4e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_f32m2_m))) +void vlseg4e32ff(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e32ff_v_f32mf2_m))) +void vlseg4e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32ff_v_f32m1_m))) +void vlseg5e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e32ff_v_f32mf2_m))) +void vlseg5e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32ff_v_f32m1_m))) +void vlseg6e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e32ff_v_f32mf2_m))) +void vlseg6e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32ff_v_f32m1_m))) +void vlseg7e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e32ff_v_f32mf2_m))) +void vlseg7e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32ff_v_f32m1_m))) +void vlseg8e32ff(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e32ff_v_f32mf2_m))) +void vlseg8e32ff(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_f32m1_m))) +void vlsseg2e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_f32m2_m))) +void vlsseg2e32(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_f32m4_m))) +void vlsseg2e32(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e32_v_f32mf2_m))) +void vlsseg2e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_f32m1_m))) +void vlsseg3e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_f32m2_m))) +void vlsseg3e32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e32_v_f32mf2_m))) +void vlsseg3e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_f32m1_m))) +void vlsseg4e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_f32m2_m))) +void vlsseg4e32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e32_v_f32mf2_m))) +void vlsseg4e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e32_v_f32m1_m))) +void vlsseg5e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e32_v_f32mf2_m))) +void vlsseg5e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e32_v_f32m1_m))) +void vlsseg6e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e32_v_f32mf2_m))) +void vlsseg6e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e32_v_f32m1_m))) +void vlsseg7e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e32_v_f32mf2_m))) +void vlsseg7e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e32_v_f32m1_m))) +void vlsseg8e32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e32_v_f32mf2_m))) +void vlsseg8e32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32m1))) +void vluxseg2ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32m1_m))) +void vluxseg2ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32m2))) +void vluxseg2ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32m2_m))) +void vluxseg2ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32m4))) +void vluxseg2ei8(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32m4_m))) +void vluxseg2ei8(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32mf2))) +void vluxseg2ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f32mf2_m))) +void vluxseg2ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f32m1))) +void vluxseg3ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f32m1_m))) +void vluxseg3ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f32m2))) +void vluxseg3ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f32m2_m))) +void vluxseg3ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f32mf2))) +void vluxseg3ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f32mf2_m))) +void vluxseg3ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f32m1))) +void vluxseg4ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f32m1_m))) +void vluxseg4ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f32m2))) +void vluxseg4ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f32m2_m))) +void vluxseg4ei8(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f32mf2))) +void vluxseg4ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f32mf2_m))) +void vluxseg4ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f32m1))) +void vluxseg5ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f32m1_m))) +void vluxseg5ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f32mf2))) +void vluxseg5ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f32mf2_m))) +void vluxseg5ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f32m1))) +void vluxseg6ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f32m1_m))) +void vluxseg6ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f32mf2))) +void vluxseg6ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f32mf2_m))) +void vluxseg6ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f32m1))) +void vluxseg7ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f32m1_m))) +void vluxseg7ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f32mf2))) +void vluxseg7ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f32mf2_m))) +void vluxseg7ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f32m1))) +void vluxseg8ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f32m1_m))) +void vluxseg8ei8(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f32mf2))) +void vluxseg8ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f32mf2_m))) +void vluxseg8ei8(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32m1))) +void vluxseg2ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32m1_m))) +void vluxseg2ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32m2))) +void vluxseg2ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32m2_m))) +void vluxseg2ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32m4))) +void vluxseg2ei16(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32m4_m))) +void vluxseg2ei16(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32mf2))) +void vluxseg2ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f32mf2_m))) +void vluxseg2ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f32m1))) +void vluxseg3ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f32m1_m))) +void vluxseg3ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f32m2))) +void vluxseg3ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f32m2_m))) +void vluxseg3ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f32mf2))) +void vluxseg3ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f32mf2_m))) +void vluxseg3ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f32m1))) +void vluxseg4ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f32m1_m))) +void vluxseg4ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f32m2))) +void vluxseg4ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f32m2_m))) +void vluxseg4ei16(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f32mf2))) +void vluxseg4ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f32mf2_m))) +void vluxseg4ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f32m1))) +void vluxseg5ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f32m1_m))) +void vluxseg5ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f32mf2))) +void vluxseg5ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f32mf2_m))) +void vluxseg5ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f32m1))) +void vluxseg6ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f32m1_m))) +void vluxseg6ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f32mf2))) +void vluxseg6ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f32mf2_m))) +void vluxseg6ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f32m1))) +void vluxseg7ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f32m1_m))) +void vluxseg7ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f32mf2))) +void vluxseg7ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f32mf2_m))) +void vluxseg7ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f32m1))) +void vluxseg8ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f32m1_m))) +void vluxseg8ei16(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f32mf2))) +void vluxseg8ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f32mf2_m))) +void vluxseg8ei16(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32m1))) +void vluxseg2ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32m1_m))) +void vluxseg2ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32m2))) +void vluxseg2ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32m2_m))) +void vluxseg2ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32m4))) +void vluxseg2ei32(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32m4_m))) +void vluxseg2ei32(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32mf2))) +void vluxseg2ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f32mf2_m))) +void vluxseg2ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f32m1))) +void vluxseg3ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f32m1_m))) +void vluxseg3ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f32m2))) +void vluxseg3ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f32m2_m))) +void vluxseg3ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f32mf2))) +void vluxseg3ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f32mf2_m))) +void vluxseg3ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f32m1))) +void vluxseg4ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f32m1_m))) +void vluxseg4ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f32m2))) +void vluxseg4ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f32m2_m))) +void vluxseg4ei32(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f32mf2))) +void vluxseg4ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f32mf2_m))) +void vluxseg4ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f32m1))) +void vluxseg5ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f32m1_m))) +void vluxseg5ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f32mf2))) +void vluxseg5ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f32mf2_m))) +void vluxseg5ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f32m1))) +void vluxseg6ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f32m1_m))) +void vluxseg6ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f32mf2))) +void vluxseg6ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f32mf2_m))) +void vluxseg6ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f32m1))) +void vluxseg7ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f32m1_m))) +void vluxseg7ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f32mf2))) +void vluxseg7ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f32mf2_m))) +void vluxseg7ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f32m1))) +void vluxseg8ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f32m1_m))) +void vluxseg8ei32(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f32mf2))) +void vluxseg8ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f32mf2_m))) +void vluxseg8ei32(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32m1))) +void vluxseg2ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, const float * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32m1_m))) +void vluxseg2ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vbool32_t op2, vfloat32m1_t op3, vfloat32m1_t op4, const float * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32m2))) +void vluxseg2ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, const float * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32m2_m))) +void vluxseg2ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vbool16_t op2, vfloat32m2_t op3, vfloat32m2_t op4, const float * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32m4))) +void vluxseg2ei64(vfloat32m4_t * op0, vfloat32m4_t * op1, const float * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32m4_m))) +void vluxseg2ei64(vfloat32m4_t * op0, vfloat32m4_t * op1, vbool8_t op2, vfloat32m4_t op3, vfloat32m4_t op4, const float * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32mf2))) +void vluxseg2ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, const float * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f32mf2_m))) +void vluxseg2ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vbool64_t op2, vfloat32mf2_t op3, vfloat32mf2_t op4, const float * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f32m1))) +void vluxseg3ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, const float * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f32m1_m))) +void vluxseg3ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vbool32_t op3, vfloat32m1_t op4, vfloat32m1_t op5, vfloat32m1_t op6, const float * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f32m2))) +void vluxseg3ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, const float * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f32m2_m))) +void vluxseg3ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vbool16_t op3, vfloat32m2_t op4, vfloat32m2_t op5, vfloat32m2_t op6, const float * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f32mf2))) +void vluxseg3ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, const float * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f32mf2_m))) +void vluxseg3ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vbool64_t op3, vfloat32mf2_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, const float * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f32m1))) +void vluxseg4ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, const float * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f32m1_m))) +void vluxseg4ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vbool32_t op4, vfloat32m1_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, const float * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f32m2))) +void vluxseg4ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, const float * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f32m2_m))) +void vluxseg4ei64(vfloat32m2_t * op0, vfloat32m2_t * op1, vfloat32m2_t * op2, vfloat32m2_t * op3, vbool16_t op4, vfloat32m2_t op5, vfloat32m2_t op6, vfloat32m2_t op7, vfloat32m2_t op8, const float * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f32mf2))) +void vluxseg4ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, const float * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f32mf2_m))) +void vluxseg4ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vbool64_t op4, vfloat32mf2_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, const float * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f32m1))) +void vluxseg5ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, const float * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f32m1_m))) +void vluxseg5ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vbool32_t op5, vfloat32m1_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, const float * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f32mf2))) +void vluxseg5ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, const float * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f32mf2_m))) +void vluxseg5ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vbool64_t op5, vfloat32mf2_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, const float * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f32m1))) +void vluxseg6ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, const float * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f32m1_m))) +void vluxseg6ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vbool32_t op6, vfloat32m1_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, const float * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f32mf2))) +void vluxseg6ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, const float * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f32mf2_m))) +void vluxseg6ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vbool64_t op6, vfloat32mf2_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, const float * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f32m1))) +void vluxseg7ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, const float * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f32m1_m))) +void vluxseg7ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vbool32_t op7, vfloat32m1_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, const float * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f32mf2))) +void vluxseg7ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, const float * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f32mf2_m))) +void vluxseg7ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vbool64_t op7, vfloat32mf2_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, const float * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f32m1))) +void vluxseg8ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, const float * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f32m1_m))) +void vluxseg8ei64(vfloat32m1_t * op0, vfloat32m1_t * op1, vfloat32m1_t * op2, vfloat32m1_t * op3, vfloat32m1_t * op4, vfloat32m1_t * op5, vfloat32m1_t * op6, vfloat32m1_t * op7, vbool32_t op8, vfloat32m1_t op9, vfloat32m1_t op10, vfloat32m1_t op11, vfloat32m1_t op12, vfloat32m1_t op13, vfloat32m1_t op14, vfloat32m1_t op15, vfloat32m1_t op16, const float * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f32mf2))) +void vluxseg8ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, const float * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f32mf2_m))) +void vluxseg8ei64(vfloat32mf2_t * op0, vfloat32mf2_t * op1, vfloat32mf2_t * op2, vfloat32mf2_t * op3, vfloat32mf2_t * op4, vfloat32mf2_t * op5, vfloat32mf2_t * op6, vfloat32mf2_t * op7, vbool64_t op8, vfloat32mf2_t op9, vfloat32mf2_t op10, vfloat32mf2_t op11, vfloat32mf2_t op12, vfloat32mf2_t op13, vfloat32mf2_t op14, vfloat32mf2_t op15, vfloat32mf2_t op16, const float * op17, vuint64m1_t op18, size_t op19); + +#endif + +#if defined(__riscv_d) && defined(__riscv_zvlsseg) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f64m1))) +void vloxseg2ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f64m1_m))) +void vloxseg2ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f64m2))) +void vloxseg2ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f64m2_m))) +void vloxseg2ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f64m4))) +void vloxseg2ei8(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f64m4_m))) +void vloxseg2ei8(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f64m1))) +void vloxseg3ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f64m1_m))) +void vloxseg3ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f64m2))) +void vloxseg3ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f64m2_m))) +void vloxseg3ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f64m1))) +void vloxseg4ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f64m1_m))) +void vloxseg4ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f64m2))) +void vloxseg4ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f64m2_m))) +void vloxseg4ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f64m1))) +void vloxseg5ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f64m1_m))) +void vloxseg5ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f64m1))) +void vloxseg6ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f64m1_m))) +void vloxseg6ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f64m1))) +void vloxseg7ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f64m1_m))) +void vloxseg7ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f64m1))) +void vloxseg8ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f64m1_m))) +void vloxseg8ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f64m1))) +void vloxseg2ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f64m1_m))) +void vloxseg2ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f64m2))) +void vloxseg2ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f64m2_m))) +void vloxseg2ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f64m4))) +void vloxseg2ei16(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f64m4_m))) +void vloxseg2ei16(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f64m1))) +void vloxseg3ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f64m1_m))) +void vloxseg3ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f64m2))) +void vloxseg3ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f64m2_m))) +void vloxseg3ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f64m1))) +void vloxseg4ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f64m1_m))) +void vloxseg4ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f64m2))) +void vloxseg4ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f64m2_m))) +void vloxseg4ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f64m1))) +void vloxseg5ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f64m1_m))) +void vloxseg5ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f64m1))) +void vloxseg6ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f64m1_m))) +void vloxseg6ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f64m1))) +void vloxseg7ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f64m1_m))) +void vloxseg7ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f64m1))) +void vloxseg8ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f64m1_m))) +void vloxseg8ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f64m1))) +void vloxseg2ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f64m1_m))) +void vloxseg2ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f64m2))) +void vloxseg2ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f64m2_m))) +void vloxseg2ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f64m4))) +void vloxseg2ei32(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f64m4_m))) +void vloxseg2ei32(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f64m1))) +void vloxseg3ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f64m1_m))) +void vloxseg3ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f64m2))) +void vloxseg3ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f64m2_m))) +void vloxseg3ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f64m1))) +void vloxseg4ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f64m1_m))) +void vloxseg4ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f64m2))) +void vloxseg4ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f64m2_m))) +void vloxseg4ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f64m1))) +void vloxseg5ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f64m1_m))) +void vloxseg5ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f64m1))) +void vloxseg6ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f64m1_m))) +void vloxseg6ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f64m1))) +void vloxseg7ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f64m1_m))) +void vloxseg7ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f64m1))) +void vloxseg8ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f64m1_m))) +void vloxseg8ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f64m1))) +void vloxseg2ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f64m1_m))) +void vloxseg2ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f64m2))) +void vloxseg2ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f64m2_m))) +void vloxseg2ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f64m4))) +void vloxseg2ei64(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f64m4_m))) +void vloxseg2ei64(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f64m1))) +void vloxseg3ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f64m1_m))) +void vloxseg3ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f64m2))) +void vloxseg3ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f64m2_m))) +void vloxseg3ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f64m1))) +void vloxseg4ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f64m1_m))) +void vloxseg4ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f64m2))) +void vloxseg4ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f64m2_m))) +void vloxseg4ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f64m1))) +void vloxseg5ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f64m1_m))) +void vloxseg5ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f64m1))) +void vloxseg6ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f64m1_m))) +void vloxseg6ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f64m1))) +void vloxseg7ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f64m1_m))) +void vloxseg7ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f64m1))) +void vloxseg8ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f64m1_m))) +void vloxseg8ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_f64m1))) +void vsseg2e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_f64m1_m))) +void vsseg2e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_f64m2))) +void vsseg2e64(double * op0, vfloat64m2_t op1, vfloat64m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_f64m2_m))) +void vsseg2e64(vbool32_t op0, double * op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_f64m4))) +void vsseg2e64(double * op0, vfloat64m4_t op1, vfloat64m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e64_v_f64m4_m))) +void vsseg2e64(vbool16_t op0, double * op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_f64m1))) +void vsseg3e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_f64m1_m))) +void vsseg3e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_f64m2))) +void vsseg3e64(double * op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e64_v_f64m2_m))) +void vsseg3e64(vbool32_t op0, double * op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_f64m1))) +void vsseg4e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_f64m1_m))) +void vsseg4e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_f64m2))) +void vsseg4e64(double * op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e64_v_f64m2_m))) +void vsseg4e64(vbool32_t op0, double * op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e64_v_f64m1))) +void vsseg5e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e64_v_f64m1_m))) +void vsseg5e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e64_v_f64m1))) +void vsseg6e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e64_v_f64m1_m))) +void vsseg6e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e64_v_f64m1))) +void vsseg7e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e64_v_f64m1_m))) +void vsseg7e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e64_v_f64m1))) +void vsseg8e64(double * op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e64_v_f64m1_m))) +void vsseg8e64(vbool64_t op0, double * op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_f64m1))) +void vssseg2e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_f64m1_m))) +void vssseg2e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_f64m2))) +void vssseg2e64(double * op0, ptrdiff_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_f64m2_m))) +void vssseg2e64(vbool32_t op0, double * op1, ptrdiff_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_f64m4))) +void vssseg2e64(double * op0, ptrdiff_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e64_v_f64m4_m))) +void vssseg2e64(vbool16_t op0, double * op1, ptrdiff_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_f64m1))) +void vssseg3e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_f64m1_m))) +void vssseg3e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_f64m2))) +void vssseg3e64(double * op0, ptrdiff_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e64_v_f64m2_m))) +void vssseg3e64(vbool32_t op0, double * op1, ptrdiff_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_f64m1))) +void vssseg4e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_f64m1_m))) +void vssseg4e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_f64m2))) +void vssseg4e64(double * op0, ptrdiff_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e64_v_f64m2_m))) +void vssseg4e64(vbool32_t op0, double * op1, ptrdiff_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e64_v_f64m1))) +void vssseg5e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e64_v_f64m1_m))) +void vssseg5e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e64_v_f64m1))) +void vssseg6e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e64_v_f64m1_m))) +void vssseg6e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e64_v_f64m1))) +void vssseg7e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e64_v_f64m1_m))) +void vssseg7e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e64_v_f64m1))) +void vssseg8e64(double * op0, ptrdiff_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e64_v_f64m1_m))) +void vssseg8e64(vbool64_t op0, double * op1, ptrdiff_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f64m1))) +void vsuxseg2ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f64m1_m))) +void vsuxseg2ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f64m2))) +void vsuxseg2ei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f64m2_m))) +void vsuxseg2ei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f64m4))) +void vsuxseg2ei8(double * op0, vuint8mf2_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f64m4_m))) +void vsuxseg2ei8(vbool16_t op0, double * op1, vuint8mf2_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f64m1))) +void vsuxseg3ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f64m1_m))) +void vsuxseg3ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f64m2))) +void vsuxseg3ei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f64m2_m))) +void vsuxseg3ei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f64m1))) +void vsuxseg4ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f64m1_m))) +void vsuxseg4ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f64m2))) +void vsuxseg4ei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f64m2_m))) +void vsuxseg4ei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f64m1))) +void vsuxseg5ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f64m1_m))) +void vsuxseg5ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f64m1))) +void vsuxseg6ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f64m1_m))) +void vsuxseg6ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f64m1))) +void vsuxseg7ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f64m1_m))) +void vsuxseg7ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f64m1))) +void vsuxseg8ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f64m1_m))) +void vsuxseg8ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f64m1))) +void vsuxseg2ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f64m1_m))) +void vsuxseg2ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f64m2))) +void vsuxseg2ei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f64m2_m))) +void vsuxseg2ei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f64m4))) +void vsuxseg2ei16(double * op0, vuint16m1_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f64m4_m))) +void vsuxseg2ei16(vbool16_t op0, double * op1, vuint16m1_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f64m1))) +void vsuxseg3ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f64m1_m))) +void vsuxseg3ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f64m2))) +void vsuxseg3ei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f64m2_m))) +void vsuxseg3ei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f64m1))) +void vsuxseg4ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f64m1_m))) +void vsuxseg4ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f64m2))) +void vsuxseg4ei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f64m2_m))) +void vsuxseg4ei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f64m1))) +void vsuxseg5ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f64m1_m))) +void vsuxseg5ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f64m1))) +void vsuxseg6ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f64m1_m))) +void vsuxseg6ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f64m1))) +void vsuxseg7ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f64m1_m))) +void vsuxseg7ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f64m1))) +void vsuxseg8ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f64m1_m))) +void vsuxseg8ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f64m1))) +void vsuxseg2ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f64m1_m))) +void vsuxseg2ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f64m2))) +void vsuxseg2ei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f64m2_m))) +void vsuxseg2ei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f64m4))) +void vsuxseg2ei32(double * op0, vuint32m2_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f64m4_m))) +void vsuxseg2ei32(vbool16_t op0, double * op1, vuint32m2_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f64m1))) +void vsuxseg3ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f64m1_m))) +void vsuxseg3ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f64m2))) +void vsuxseg3ei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f64m2_m))) +void vsuxseg3ei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f64m1))) +void vsuxseg4ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f64m1_m))) +void vsuxseg4ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f64m2))) +void vsuxseg4ei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f64m2_m))) +void vsuxseg4ei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f64m1))) +void vsuxseg5ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f64m1_m))) +void vsuxseg5ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f64m1))) +void vsuxseg6ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f64m1_m))) +void vsuxseg6ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f64m1))) +void vsuxseg7ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f64m1_m))) +void vsuxseg7ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f64m1))) +void vsuxseg8ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f64m1_m))) +void vsuxseg8ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f64m1))) +void vsuxseg2ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f64m1_m))) +void vsuxseg2ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f64m2))) +void vsuxseg2ei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f64m2_m))) +void vsuxseg2ei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f64m4))) +void vsuxseg2ei64(double * op0, vuint64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f64m4_m))) +void vsuxseg2ei64(vbool16_t op0, double * op1, vuint64m4_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f64m1))) +void vsuxseg3ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f64m1_m))) +void vsuxseg3ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f64m2))) +void vsuxseg3ei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f64m2_m))) +void vsuxseg3ei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f64m1))) +void vsuxseg4ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f64m1_m))) +void vsuxseg4ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f64m2))) +void vsuxseg4ei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f64m2_m))) +void vsuxseg4ei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f64m1))) +void vsuxseg5ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f64m1_m))) +void vsuxseg5ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f64m1))) +void vsuxseg6ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f64m1_m))) +void vsuxseg6ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f64m1))) +void vsuxseg7ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f64m1_m))) +void vsuxseg7ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f64m1))) +void vsuxseg8ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f64m1_m))) +void vsuxseg8ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f64m1))) +void vsoxseg2ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f64m1_m))) +void vsoxseg2ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f64m2))) +void vsoxseg2ei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f64m2_m))) +void vsoxseg2ei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f64m4))) +void vsoxseg2ei8(double * op0, vuint8mf2_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f64m4_m))) +void vsoxseg2ei8(vbool16_t op0, double * op1, vuint8mf2_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f64m1))) +void vsoxseg3ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f64m1_m))) +void vsoxseg3ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f64m2))) +void vsoxseg3ei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f64m2_m))) +void vsoxseg3ei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f64m1))) +void vsoxseg4ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f64m1_m))) +void vsoxseg4ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f64m2))) +void vsoxseg4ei8(double * op0, vuint8mf4_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f64m2_m))) +void vsoxseg4ei8(vbool32_t op0, double * op1, vuint8mf4_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f64m1))) +void vsoxseg5ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f64m1_m))) +void vsoxseg5ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f64m1))) +void vsoxseg6ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f64m1_m))) +void vsoxseg6ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f64m1))) +void vsoxseg7ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f64m1_m))) +void vsoxseg7ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f64m1))) +void vsoxseg8ei8(double * op0, vuint8mf8_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f64m1_m))) +void vsoxseg8ei8(vbool64_t op0, double * op1, vuint8mf8_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f64m1))) +void vsoxseg2ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f64m1_m))) +void vsoxseg2ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f64m2))) +void vsoxseg2ei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f64m2_m))) +void vsoxseg2ei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f64m4))) +void vsoxseg2ei16(double * op0, vuint16m1_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f64m4_m))) +void vsoxseg2ei16(vbool16_t op0, double * op1, vuint16m1_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f64m1))) +void vsoxseg3ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f64m1_m))) +void vsoxseg3ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f64m2))) +void vsoxseg3ei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f64m2_m))) +void vsoxseg3ei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f64m1))) +void vsoxseg4ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f64m1_m))) +void vsoxseg4ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f64m2))) +void vsoxseg4ei16(double * op0, vuint16mf2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f64m2_m))) +void vsoxseg4ei16(vbool32_t op0, double * op1, vuint16mf2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f64m1))) +void vsoxseg5ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f64m1_m))) +void vsoxseg5ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f64m1))) +void vsoxseg6ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f64m1_m))) +void vsoxseg6ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f64m1))) +void vsoxseg7ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f64m1_m))) +void vsoxseg7ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f64m1))) +void vsoxseg8ei16(double * op0, vuint16mf4_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f64m1_m))) +void vsoxseg8ei16(vbool64_t op0, double * op1, vuint16mf4_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f64m1))) +void vsoxseg2ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f64m1_m))) +void vsoxseg2ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f64m2))) +void vsoxseg2ei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f64m2_m))) +void vsoxseg2ei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f64m4))) +void vsoxseg2ei32(double * op0, vuint32m2_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f64m4_m))) +void vsoxseg2ei32(vbool16_t op0, double * op1, vuint32m2_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f64m1))) +void vsoxseg3ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f64m1_m))) +void vsoxseg3ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f64m2))) +void vsoxseg3ei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f64m2_m))) +void vsoxseg3ei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f64m1))) +void vsoxseg4ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f64m1_m))) +void vsoxseg4ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f64m2))) +void vsoxseg4ei32(double * op0, vuint32m1_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f64m2_m))) +void vsoxseg4ei32(vbool32_t op0, double * op1, vuint32m1_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f64m1))) +void vsoxseg5ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f64m1_m))) +void vsoxseg5ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f64m1))) +void vsoxseg6ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f64m1_m))) +void vsoxseg6ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f64m1))) +void vsoxseg7ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f64m1_m))) +void vsoxseg7ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f64m1))) +void vsoxseg8ei32(double * op0, vuint32mf2_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f64m1_m))) +void vsoxseg8ei32(vbool64_t op0, double * op1, vuint32mf2_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f64m1))) +void vsoxseg2ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f64m1_m))) +void vsoxseg2ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f64m2))) +void vsoxseg2ei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f64m2_m))) +void vsoxseg2ei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f64m4))) +void vsoxseg2ei64(double * op0, vuint64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f64m4_m))) +void vsoxseg2ei64(vbool16_t op0, double * op1, vuint64m4_t op2, vfloat64m4_t op3, vfloat64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f64m1))) +void vsoxseg3ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f64m1_m))) +void vsoxseg3ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f64m2))) +void vsoxseg3ei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f64m2_m))) +void vsoxseg3ei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f64m1))) +void vsoxseg4ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f64m1_m))) +void vsoxseg4ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f64m2))) +void vsoxseg4ei64(double * op0, vuint64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f64m2_m))) +void vsoxseg4ei64(vbool32_t op0, double * op1, vuint64m2_t op2, vfloat64m2_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f64m1))) +void vsoxseg5ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f64m1_m))) +void vsoxseg5ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f64m1))) +void vsoxseg6ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f64m1_m))) +void vsoxseg6ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f64m1))) +void vsoxseg7ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f64m1_m))) +void vsoxseg7ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f64m1))) +void vsoxseg8ei64(double * op0, vuint64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f64m1_m))) +void vsoxseg8ei64(vbool64_t op0, double * op1, vuint64m1_t op2, vfloat64m1_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_f64m1_m))) +void vlseg2e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_f64m2_m))) +void vlseg2e64(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64_v_f64m4_m))) +void vlseg2e64(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64_v_f64m1_m))) +void vlseg3e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64_v_f64m2_m))) +void vlseg3e64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64_v_f64m1_m))) +void vlseg4e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64_v_f64m2_m))) +void vlseg4e64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e64_v_f64m1_m))) +void vlseg5e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e64_v_f64m1_m))) +void vlseg6e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e64_v_f64m1_m))) +void vlseg7e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e64_v_f64m1_m))) +void vlseg8e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_f64m1_m))) +void vlseg2e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_f64m2_m))) +void vlseg2e64ff(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e64ff_v_f64m4_m))) +void vlseg2e64ff(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64ff_v_f64m1_m))) +void vlseg3e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e64ff_v_f64m2_m))) +void vlseg3e64ff(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64ff_v_f64m1_m))) +void vlseg4e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e64ff_v_f64m2_m))) +void vlseg4e64ff(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e64ff_v_f64m1_m))) +void vlseg5e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e64ff_v_f64m1_m))) +void vlseg6e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e64ff_v_f64m1_m))) +void vlseg7e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e64ff_v_f64m1_m))) +void vlseg8e64ff(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_f64m1_m))) +void vlsseg2e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_f64m2_m))) +void vlsseg2e64(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e64_v_f64m4_m))) +void vlsseg2e64(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e64_v_f64m1_m))) +void vlsseg3e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e64_v_f64m2_m))) +void vlsseg3e64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e64_v_f64m1_m))) +void vlsseg4e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e64_v_f64m2_m))) +void vlsseg4e64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e64_v_f64m1_m))) +void vlsseg5e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e64_v_f64m1_m))) +void vlsseg6e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e64_v_f64m1_m))) +void vlsseg7e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e64_v_f64m1_m))) +void vlsseg8e64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f64m1))) +void vluxseg2ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f64m1_m))) +void vluxseg2ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f64m2))) +void vluxseg2ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f64m2_m))) +void vluxseg2ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f64m4))) +void vluxseg2ei8(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f64m4_m))) +void vluxseg2ei8(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f64m1))) +void vluxseg3ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f64m1_m))) +void vluxseg3ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f64m2))) +void vluxseg3ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f64m2_m))) +void vluxseg3ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f64m1))) +void vluxseg4ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f64m1_m))) +void vluxseg4ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f64m2))) +void vluxseg4ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f64m2_m))) +void vluxseg4ei8(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f64m1))) +void vluxseg5ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f64m1_m))) +void vluxseg5ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f64m1))) +void vluxseg6ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f64m1_m))) +void vluxseg6ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f64m1))) +void vluxseg7ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f64m1_m))) +void vluxseg7ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f64m1))) +void vluxseg8ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f64m1_m))) +void vluxseg8ei8(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f64m1))) +void vluxseg2ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f64m1_m))) +void vluxseg2ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f64m2))) +void vluxseg2ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f64m2_m))) +void vluxseg2ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f64m4))) +void vluxseg2ei16(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f64m4_m))) +void vluxseg2ei16(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f64m1))) +void vluxseg3ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f64m1_m))) +void vluxseg3ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f64m2))) +void vluxseg3ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f64m2_m))) +void vluxseg3ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f64m1))) +void vluxseg4ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f64m1_m))) +void vluxseg4ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f64m2))) +void vluxseg4ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f64m2_m))) +void vluxseg4ei16(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f64m1))) +void vluxseg5ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f64m1_m))) +void vluxseg5ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f64m1))) +void vluxseg6ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f64m1_m))) +void vluxseg6ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f64m1))) +void vluxseg7ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f64m1_m))) +void vluxseg7ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f64m1))) +void vluxseg8ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f64m1_m))) +void vluxseg8ei16(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f64m1))) +void vluxseg2ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f64m1_m))) +void vluxseg2ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f64m2))) +void vluxseg2ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f64m2_m))) +void vluxseg2ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f64m4))) +void vluxseg2ei32(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f64m4_m))) +void vluxseg2ei32(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f64m1))) +void vluxseg3ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f64m1_m))) +void vluxseg3ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f64m2))) +void vluxseg3ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f64m2_m))) +void vluxseg3ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f64m1))) +void vluxseg4ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f64m1_m))) +void vluxseg4ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f64m2))) +void vluxseg4ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f64m2_m))) +void vluxseg4ei32(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f64m1))) +void vluxseg5ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f64m1_m))) +void vluxseg5ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f64m1))) +void vluxseg6ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f64m1_m))) +void vluxseg6ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f64m1))) +void vluxseg7ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f64m1_m))) +void vluxseg7ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f64m1))) +void vluxseg8ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f64m1_m))) +void vluxseg8ei32(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f64m1))) +void vluxseg2ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, const double * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f64m1_m))) +void vluxseg2ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vbool64_t op2, vfloat64m1_t op3, vfloat64m1_t op4, const double * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f64m2))) +void vluxseg2ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, const double * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f64m2_m))) +void vluxseg2ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vbool32_t op2, vfloat64m2_t op3, vfloat64m2_t op4, const double * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f64m4))) +void vluxseg2ei64(vfloat64m4_t * op0, vfloat64m4_t * op1, const double * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f64m4_m))) +void vluxseg2ei64(vfloat64m4_t * op0, vfloat64m4_t * op1, vbool16_t op2, vfloat64m4_t op3, vfloat64m4_t op4, const double * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f64m1))) +void vluxseg3ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, const double * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f64m1_m))) +void vluxseg3ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vbool64_t op3, vfloat64m1_t op4, vfloat64m1_t op5, vfloat64m1_t op6, const double * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f64m2))) +void vluxseg3ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, const double * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f64m2_m))) +void vluxseg3ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vbool32_t op3, vfloat64m2_t op4, vfloat64m2_t op5, vfloat64m2_t op6, const double * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f64m1))) +void vluxseg4ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, const double * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f64m1_m))) +void vluxseg4ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vbool64_t op4, vfloat64m1_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, const double * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f64m2))) +void vluxseg4ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, const double * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f64m2_m))) +void vluxseg4ei64(vfloat64m2_t * op0, vfloat64m2_t * op1, vfloat64m2_t * op2, vfloat64m2_t * op3, vbool32_t op4, vfloat64m2_t op5, vfloat64m2_t op6, vfloat64m2_t op7, vfloat64m2_t op8, const double * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f64m1))) +void vluxseg5ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, const double * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f64m1_m))) +void vluxseg5ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vbool64_t op5, vfloat64m1_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, const double * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f64m1))) +void vluxseg6ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, const double * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f64m1_m))) +void vluxseg6ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vbool64_t op6, vfloat64m1_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, const double * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f64m1))) +void vluxseg7ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, const double * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f64m1_m))) +void vluxseg7ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vbool64_t op7, vfloat64m1_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, const double * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f64m1))) +void vluxseg8ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, const double * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f64m1_m))) +void vluxseg8ei64(vfloat64m1_t * op0, vfloat64m1_t * op1, vfloat64m1_t * op2, vfloat64m1_t * op3, vfloat64m1_t * op4, vfloat64m1_t * op5, vfloat64m1_t * op6, vfloat64m1_t * op7, vbool64_t op8, vfloat64m1_t op9, vfloat64m1_t op10, vfloat64m1_t op11, vfloat64m1_t op12, vfloat64m1_t op13, vfloat64m1_t op14, vfloat64m1_t op15, vfloat64m1_t op16, const double * op17, vuint64m1_t op18, size_t op19); + +#endif + +#if defined(__riscv_zfh) && defined(__riscv_zvlsseg) +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16m1))) +void vloxseg2ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16m1_m))) +void vloxseg2ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16m2))) +void vloxseg2ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16m2_m))) +void vloxseg2ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16m4))) +void vloxseg2ei8(vfloat16m4_t * op0, vfloat16m4_t * op1, const _Float16 * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16m4_m))) +void vloxseg2ei8(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16mf2))) +void vloxseg2ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16mf2_m))) +void vloxseg2ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16mf4))) +void vloxseg2ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei8_v_f16mf4_m))) +void vloxseg2ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16m1))) +void vloxseg3ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16m1_m))) +void vloxseg3ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16m2))) +void vloxseg3ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16m2_m))) +void vloxseg3ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16mf2))) +void vloxseg3ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16mf2_m))) +void vloxseg3ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16mf4))) +void vloxseg3ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei8_v_f16mf4_m))) +void vloxseg3ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16m1))) +void vloxseg4ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16m1_m))) +void vloxseg4ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16m2))) +void vloxseg4ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16m2_m))) +void vloxseg4ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16mf2))) +void vloxseg4ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16mf2_m))) +void vloxseg4ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16mf4))) +void vloxseg4ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei8_v_f16mf4_m))) +void vloxseg4ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f16m1))) +void vloxseg5ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f16m1_m))) +void vloxseg5ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f16mf2))) +void vloxseg5ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f16mf2_m))) +void vloxseg5ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f16mf4))) +void vloxseg5ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei8_v_f16mf4_m))) +void vloxseg5ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f16m1))) +void vloxseg6ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f16m1_m))) +void vloxseg6ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f16mf2))) +void vloxseg6ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f16mf2_m))) +void vloxseg6ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f16mf4))) +void vloxseg6ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei8_v_f16mf4_m))) +void vloxseg6ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f16m1))) +void vloxseg7ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f16m1_m))) +void vloxseg7ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f16mf2))) +void vloxseg7ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f16mf2_m))) +void vloxseg7ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f16mf4))) +void vloxseg7ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei8_v_f16mf4_m))) +void vloxseg7ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f16m1))) +void vloxseg8ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f16m1_m))) +void vloxseg8ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f16mf2))) +void vloxseg8ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f16mf2_m))) +void vloxseg8ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f16mf4))) +void vloxseg8ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei8_v_f16mf4_m))) +void vloxseg8ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16m1))) +void vloxseg2ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16m1_m))) +void vloxseg2ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16m2))) +void vloxseg2ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16m2_m))) +void vloxseg2ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16m4))) +void vloxseg2ei16(vfloat16m4_t * op0, vfloat16m4_t * op1, const _Float16 * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16m4_m))) +void vloxseg2ei16(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16mf2))) +void vloxseg2ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16mf2_m))) +void vloxseg2ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16mf4))) +void vloxseg2ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei16_v_f16mf4_m))) +void vloxseg2ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16m1))) +void vloxseg3ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16m1_m))) +void vloxseg3ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16m2))) +void vloxseg3ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16m2_m))) +void vloxseg3ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16mf2))) +void vloxseg3ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16mf2_m))) +void vloxseg3ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16mf4))) +void vloxseg3ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei16_v_f16mf4_m))) +void vloxseg3ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16m1))) +void vloxseg4ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16m1_m))) +void vloxseg4ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16m2))) +void vloxseg4ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16m2_m))) +void vloxseg4ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16mf2))) +void vloxseg4ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16mf2_m))) +void vloxseg4ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16mf4))) +void vloxseg4ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei16_v_f16mf4_m))) +void vloxseg4ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f16m1))) +void vloxseg5ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f16m1_m))) +void vloxseg5ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f16mf2))) +void vloxseg5ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f16mf2_m))) +void vloxseg5ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f16mf4))) +void vloxseg5ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei16_v_f16mf4_m))) +void vloxseg5ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f16m1))) +void vloxseg6ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f16m1_m))) +void vloxseg6ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f16mf2))) +void vloxseg6ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f16mf2_m))) +void vloxseg6ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f16mf4))) +void vloxseg6ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei16_v_f16mf4_m))) +void vloxseg6ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f16m1))) +void vloxseg7ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f16m1_m))) +void vloxseg7ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f16mf2))) +void vloxseg7ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f16mf2_m))) +void vloxseg7ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f16mf4))) +void vloxseg7ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei16_v_f16mf4_m))) +void vloxseg7ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f16m1))) +void vloxseg8ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f16m1_m))) +void vloxseg8ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f16mf2))) +void vloxseg8ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f16mf2_m))) +void vloxseg8ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f16mf4))) +void vloxseg8ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei16_v_f16mf4_m))) +void vloxseg8ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16m1))) +void vloxseg2ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16m1_m))) +void vloxseg2ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16m2))) +void vloxseg2ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16m2_m))) +void vloxseg2ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16m4))) +void vloxseg2ei32(vfloat16m4_t * op0, vfloat16m4_t * op1, const _Float16 * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16m4_m))) +void vloxseg2ei32(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16mf2))) +void vloxseg2ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16mf2_m))) +void vloxseg2ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16mf4))) +void vloxseg2ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei32_v_f16mf4_m))) +void vloxseg2ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16m1))) +void vloxseg3ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16m1_m))) +void vloxseg3ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16m2))) +void vloxseg3ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16m2_m))) +void vloxseg3ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16mf2))) +void vloxseg3ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16mf2_m))) +void vloxseg3ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16mf4))) +void vloxseg3ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei32_v_f16mf4_m))) +void vloxseg3ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16m1))) +void vloxseg4ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16m1_m))) +void vloxseg4ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16m2))) +void vloxseg4ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16m2_m))) +void vloxseg4ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16mf2))) +void vloxseg4ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16mf2_m))) +void vloxseg4ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16mf4))) +void vloxseg4ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei32_v_f16mf4_m))) +void vloxseg4ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f16m1))) +void vloxseg5ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f16m1_m))) +void vloxseg5ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f16mf2))) +void vloxseg5ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f16mf2_m))) +void vloxseg5ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f16mf4))) +void vloxseg5ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei32_v_f16mf4_m))) +void vloxseg5ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f16m1))) +void vloxseg6ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f16m1_m))) +void vloxseg6ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f16mf2))) +void vloxseg6ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f16mf2_m))) +void vloxseg6ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f16mf4))) +void vloxseg6ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei32_v_f16mf4_m))) +void vloxseg6ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f16m1))) +void vloxseg7ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f16m1_m))) +void vloxseg7ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f16mf2))) +void vloxseg7ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f16mf2_m))) +void vloxseg7ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f16mf4))) +void vloxseg7ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei32_v_f16mf4_m))) +void vloxseg7ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f16m1))) +void vloxseg8ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f16m1_m))) +void vloxseg8ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f16mf2))) +void vloxseg8ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f16mf2_m))) +void vloxseg8ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f16mf4))) +void vloxseg8ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei32_v_f16mf4_m))) +void vloxseg8ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16m1))) +void vloxseg2ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16m1_m))) +void vloxseg2ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16m2))) +void vloxseg2ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16m2_m))) +void vloxseg2ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16mf2))) +void vloxseg2ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16mf2_m))) +void vloxseg2ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16mf4))) +void vloxseg2ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg2ei64_v_f16mf4_m))) +void vloxseg2ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16m1))) +void vloxseg3ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16m1_m))) +void vloxseg3ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16m2))) +void vloxseg3ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16m2_m))) +void vloxseg3ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16mf2))) +void vloxseg3ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16mf2_m))) +void vloxseg3ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16mf4))) +void vloxseg3ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg3ei64_v_f16mf4_m))) +void vloxseg3ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16m1))) +void vloxseg4ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16m1_m))) +void vloxseg4ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16m2))) +void vloxseg4ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16m2_m))) +void vloxseg4ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16mf2))) +void vloxseg4ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16mf2_m))) +void vloxseg4ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16mf4))) +void vloxseg4ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg4ei64_v_f16mf4_m))) +void vloxseg4ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f16m1))) +void vloxseg5ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f16m1_m))) +void vloxseg5ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f16mf2))) +void vloxseg5ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f16mf2_m))) +void vloxseg5ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f16mf4))) +void vloxseg5ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg5ei64_v_f16mf4_m))) +void vloxseg5ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f16m1))) +void vloxseg6ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f16m1_m))) +void vloxseg6ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f16mf2))) +void vloxseg6ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f16mf2_m))) +void vloxseg6ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f16mf4))) +void vloxseg6ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg6ei64_v_f16mf4_m))) +void vloxseg6ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f16m1))) +void vloxseg7ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f16m1_m))) +void vloxseg7ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f16mf2))) +void vloxseg7ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f16mf2_m))) +void vloxseg7ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f16mf4))) +void vloxseg7ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg7ei64_v_f16mf4_m))) +void vloxseg7ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f16m1))) +void vloxseg8ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f16m1_m))) +void vloxseg8ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f16mf2))) +void vloxseg8ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f16mf2_m))) +void vloxseg8ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f16mf4))) +void vloxseg8ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vloxseg8ei64_v_f16mf4_m))) +void vloxseg8ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint64m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16m1))) +void vsseg2e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16m1_m))) +void vsseg2e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16m2))) +void vsseg2e16(_Float16 * op0, vfloat16m2_t op1, vfloat16m2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16m2_m))) +void vsseg2e16(vbool8_t op0, _Float16 * op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16m4))) +void vsseg2e16(_Float16 * op0, vfloat16m4_t op1, vfloat16m4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16m4_m))) +void vsseg2e16(vbool4_t op0, _Float16 * op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16mf2))) +void vsseg2e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16mf2_m))) +void vsseg2e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16mf4))) +void vsseg2e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t op3); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg2e16_v_f16mf4_m))) +void vsseg2e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16m1))) +void vsseg3e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16m1_m))) +void vsseg3e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16m2))) +void vsseg3e16(_Float16 * op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16m2_m))) +void vsseg3e16(vbool8_t op0, _Float16 * op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16mf2))) +void vsseg3e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16mf2_m))) +void vsseg3e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16mf4))) +void vsseg3e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg3e16_v_f16mf4_m))) +void vsseg3e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16m1))) +void vsseg4e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16m1_m))) +void vsseg4e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16m2))) +void vsseg4e16(_Float16 * op0, vfloat16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16m2_m))) +void vsseg4e16(vbool8_t op0, _Float16 * op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16mf2))) +void vsseg4e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16mf2_m))) +void vsseg4e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16mf4))) +void vsseg4e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg4e16_v_f16mf4_m))) +void vsseg4e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_f16m1))) +void vsseg5e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_f16m1_m))) +void vsseg5e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_f16mf2))) +void vsseg5e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_f16mf2_m))) +void vsseg5e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_f16mf4))) +void vsseg5e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg5e16_v_f16mf4_m))) +void vsseg5e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_f16m1))) +void vsseg6e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_f16m1_m))) +void vsseg6e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_f16mf2))) +void vsseg6e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_f16mf2_m))) +void vsseg6e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_f16mf4))) +void vsseg6e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg6e16_v_f16mf4_m))) +void vsseg6e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_f16m1))) +void vsseg7e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_f16m1_m))) +void vsseg7e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_f16mf2))) +void vsseg7e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_f16mf2_m))) +void vsseg7e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_f16mf4))) +void vsseg7e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg7e16_v_f16mf4_m))) +void vsseg7e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_f16m1))) +void vsseg8e16(_Float16 * op0, vfloat16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_f16m1_m))) +void vsseg8e16(vbool16_t op0, _Float16 * op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_f16mf2))) +void vsseg8e16(_Float16 * op0, vfloat16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_f16mf2_m))) +void vsseg8e16(vbool32_t op0, _Float16 * op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_f16mf4))) +void vsseg8e16(_Float16 * op0, vfloat16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsseg8e16_v_f16mf4_m))) +void vsseg8e16(vbool64_t op0, _Float16 * op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16m1))) +void vssseg2e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16m1_m))) +void vssseg2e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16m2))) +void vssseg2e16(_Float16 * op0, ptrdiff_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16m2_m))) +void vssseg2e16(vbool8_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16m4))) +void vssseg2e16(_Float16 * op0, ptrdiff_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16m4_m))) +void vssseg2e16(vbool4_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16mf2))) +void vssseg2e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16mf2_m))) +void vssseg2e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16mf4))) +void vssseg2e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg2e16_v_f16mf4_m))) +void vssseg2e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16m1))) +void vssseg3e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16m1_m))) +void vssseg3e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16m2))) +void vssseg3e16(_Float16 * op0, ptrdiff_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16m2_m))) +void vssseg3e16(vbool8_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16mf2))) +void vssseg3e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16mf2_m))) +void vssseg3e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16mf4))) +void vssseg3e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg3e16_v_f16mf4_m))) +void vssseg3e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16m1))) +void vssseg4e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16m1_m))) +void vssseg4e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16m2))) +void vssseg4e16(_Float16 * op0, ptrdiff_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16m2_m))) +void vssseg4e16(vbool8_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16mf2))) +void vssseg4e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16mf2_m))) +void vssseg4e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16mf4))) +void vssseg4e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg4e16_v_f16mf4_m))) +void vssseg4e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_f16m1))) +void vssseg5e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_f16m1_m))) +void vssseg5e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_f16mf2))) +void vssseg5e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_f16mf2_m))) +void vssseg5e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_f16mf4))) +void vssseg5e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg5e16_v_f16mf4_m))) +void vssseg5e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_f16m1))) +void vssseg6e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_f16m1_m))) +void vssseg6e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_f16mf2))) +void vssseg6e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_f16mf2_m))) +void vssseg6e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_f16mf4))) +void vssseg6e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg6e16_v_f16mf4_m))) +void vssseg6e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_f16m1))) +void vssseg7e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_f16m1_m))) +void vssseg7e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_f16mf2))) +void vssseg7e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_f16mf2_m))) +void vssseg7e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_f16mf4))) +void vssseg7e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg7e16_v_f16mf4_m))) +void vssseg7e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_f16m1))) +void vssseg8e16(_Float16 * op0, ptrdiff_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_f16m1_m))) +void vssseg8e16(vbool16_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_f16mf2))) +void vssseg8e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_f16mf2_m))) +void vssseg8e16(vbool32_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_f16mf4))) +void vssseg8e16(_Float16 * op0, ptrdiff_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vssseg8e16_v_f16mf4_m))) +void vssseg8e16(vbool64_t op0, _Float16 * op1, ptrdiff_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16m1))) +void vsuxseg2ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16m1_m))) +void vsuxseg2ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16m2))) +void vsuxseg2ei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16m2_m))) +void vsuxseg2ei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16m4))) +void vsuxseg2ei8(_Float16 * op0, vuint8m2_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16m4_m))) +void vsuxseg2ei8(vbool4_t op0, _Float16 * op1, vuint8m2_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16mf2))) +void vsuxseg2ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16mf2_m))) +void vsuxseg2ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16mf4))) +void vsuxseg2ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei8_v_f16mf4_m))) +void vsuxseg2ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16m1))) +void vsuxseg3ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16m1_m))) +void vsuxseg3ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16m2))) +void vsuxseg3ei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16m2_m))) +void vsuxseg3ei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16mf2))) +void vsuxseg3ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16mf2_m))) +void vsuxseg3ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16mf4))) +void vsuxseg3ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei8_v_f16mf4_m))) +void vsuxseg3ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16m1))) +void vsuxseg4ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16m1_m))) +void vsuxseg4ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16m2))) +void vsuxseg4ei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16m2_m))) +void vsuxseg4ei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16mf2))) +void vsuxseg4ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16mf2_m))) +void vsuxseg4ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16mf4))) +void vsuxseg4ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei8_v_f16mf4_m))) +void vsuxseg4ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f16m1))) +void vsuxseg5ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f16m1_m))) +void vsuxseg5ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f16mf2))) +void vsuxseg5ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f16mf2_m))) +void vsuxseg5ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f16mf4))) +void vsuxseg5ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei8_v_f16mf4_m))) +void vsuxseg5ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f16m1))) +void vsuxseg6ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f16m1_m))) +void vsuxseg6ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f16mf2))) +void vsuxseg6ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f16mf2_m))) +void vsuxseg6ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f16mf4))) +void vsuxseg6ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei8_v_f16mf4_m))) +void vsuxseg6ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f16m1))) +void vsuxseg7ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f16m1_m))) +void vsuxseg7ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f16mf2))) +void vsuxseg7ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f16mf2_m))) +void vsuxseg7ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f16mf4))) +void vsuxseg7ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei8_v_f16mf4_m))) +void vsuxseg7ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f16m1))) +void vsuxseg8ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f16m1_m))) +void vsuxseg8ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f16mf2))) +void vsuxseg8ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f16mf2_m))) +void vsuxseg8ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f16mf4))) +void vsuxseg8ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei8_v_f16mf4_m))) +void vsuxseg8ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16m1))) +void vsuxseg2ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16m1_m))) +void vsuxseg2ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16m2))) +void vsuxseg2ei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16m2_m))) +void vsuxseg2ei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16m4))) +void vsuxseg2ei16(_Float16 * op0, vuint16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16m4_m))) +void vsuxseg2ei16(vbool4_t op0, _Float16 * op1, vuint16m4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16mf2))) +void vsuxseg2ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16mf2_m))) +void vsuxseg2ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16mf4))) +void vsuxseg2ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei16_v_f16mf4_m))) +void vsuxseg2ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16m1))) +void vsuxseg3ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16m1_m))) +void vsuxseg3ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16m2))) +void vsuxseg3ei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16m2_m))) +void vsuxseg3ei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16mf2))) +void vsuxseg3ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16mf2_m))) +void vsuxseg3ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16mf4))) +void vsuxseg3ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei16_v_f16mf4_m))) +void vsuxseg3ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16m1))) +void vsuxseg4ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16m1_m))) +void vsuxseg4ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16m2))) +void vsuxseg4ei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16m2_m))) +void vsuxseg4ei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16mf2))) +void vsuxseg4ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16mf2_m))) +void vsuxseg4ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16mf4))) +void vsuxseg4ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei16_v_f16mf4_m))) +void vsuxseg4ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f16m1))) +void vsuxseg5ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f16m1_m))) +void vsuxseg5ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f16mf2))) +void vsuxseg5ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f16mf2_m))) +void vsuxseg5ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f16mf4))) +void vsuxseg5ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei16_v_f16mf4_m))) +void vsuxseg5ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f16m1))) +void vsuxseg6ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f16m1_m))) +void vsuxseg6ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f16mf2))) +void vsuxseg6ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f16mf2_m))) +void vsuxseg6ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f16mf4))) +void vsuxseg6ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei16_v_f16mf4_m))) +void vsuxseg6ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f16m1))) +void vsuxseg7ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f16m1_m))) +void vsuxseg7ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f16mf2))) +void vsuxseg7ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f16mf2_m))) +void vsuxseg7ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f16mf4))) +void vsuxseg7ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei16_v_f16mf4_m))) +void vsuxseg7ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f16m1))) +void vsuxseg8ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f16m1_m))) +void vsuxseg8ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f16mf2))) +void vsuxseg8ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f16mf2_m))) +void vsuxseg8ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f16mf4))) +void vsuxseg8ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei16_v_f16mf4_m))) +void vsuxseg8ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16m1))) +void vsuxseg2ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16m1_m))) +void vsuxseg2ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16m2))) +void vsuxseg2ei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16m2_m))) +void vsuxseg2ei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16m4))) +void vsuxseg2ei32(_Float16 * op0, vuint32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16m4_m))) +void vsuxseg2ei32(vbool4_t op0, _Float16 * op1, vuint32m8_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16mf2))) +void vsuxseg2ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16mf2_m))) +void vsuxseg2ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16mf4))) +void vsuxseg2ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei32_v_f16mf4_m))) +void vsuxseg2ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16m1))) +void vsuxseg3ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16m1_m))) +void vsuxseg3ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16m2))) +void vsuxseg3ei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16m2_m))) +void vsuxseg3ei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16mf2))) +void vsuxseg3ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16mf2_m))) +void vsuxseg3ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16mf4))) +void vsuxseg3ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei32_v_f16mf4_m))) +void vsuxseg3ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16m1))) +void vsuxseg4ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16m1_m))) +void vsuxseg4ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16m2))) +void vsuxseg4ei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16m2_m))) +void vsuxseg4ei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16mf2))) +void vsuxseg4ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16mf2_m))) +void vsuxseg4ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16mf4))) +void vsuxseg4ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei32_v_f16mf4_m))) +void vsuxseg4ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f16m1))) +void vsuxseg5ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f16m1_m))) +void vsuxseg5ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f16mf2))) +void vsuxseg5ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f16mf2_m))) +void vsuxseg5ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f16mf4))) +void vsuxseg5ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei32_v_f16mf4_m))) +void vsuxseg5ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f16m1))) +void vsuxseg6ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f16m1_m))) +void vsuxseg6ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f16mf2))) +void vsuxseg6ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f16mf2_m))) +void vsuxseg6ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f16mf4))) +void vsuxseg6ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei32_v_f16mf4_m))) +void vsuxseg6ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f16m1))) +void vsuxseg7ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f16m1_m))) +void vsuxseg7ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f16mf2))) +void vsuxseg7ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f16mf2_m))) +void vsuxseg7ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f16mf4))) +void vsuxseg7ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei32_v_f16mf4_m))) +void vsuxseg7ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f16m1))) +void vsuxseg8ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f16m1_m))) +void vsuxseg8ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f16mf2))) +void vsuxseg8ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f16mf2_m))) +void vsuxseg8ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f16mf4))) +void vsuxseg8ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei32_v_f16mf4_m))) +void vsuxseg8ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16m1))) +void vsuxseg2ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16m1_m))) +void vsuxseg2ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16m2))) +void vsuxseg2ei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16m2_m))) +void vsuxseg2ei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16mf2))) +void vsuxseg2ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16mf2_m))) +void vsuxseg2ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16mf4))) +void vsuxseg2ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg2ei64_v_f16mf4_m))) +void vsuxseg2ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16m1))) +void vsuxseg3ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16m1_m))) +void vsuxseg3ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16m2))) +void vsuxseg3ei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16m2_m))) +void vsuxseg3ei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16mf2))) +void vsuxseg3ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16mf2_m))) +void vsuxseg3ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16mf4))) +void vsuxseg3ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg3ei64_v_f16mf4_m))) +void vsuxseg3ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16m1))) +void vsuxseg4ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16m1_m))) +void vsuxseg4ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16m2))) +void vsuxseg4ei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16m2_m))) +void vsuxseg4ei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16mf2))) +void vsuxseg4ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16mf2_m))) +void vsuxseg4ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16mf4))) +void vsuxseg4ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg4ei64_v_f16mf4_m))) +void vsuxseg4ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f16m1))) +void vsuxseg5ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f16m1_m))) +void vsuxseg5ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f16mf2))) +void vsuxseg5ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f16mf2_m))) +void vsuxseg5ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f16mf4))) +void vsuxseg5ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg5ei64_v_f16mf4_m))) +void vsuxseg5ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f16m1))) +void vsuxseg6ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f16m1_m))) +void vsuxseg6ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f16mf2))) +void vsuxseg6ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f16mf2_m))) +void vsuxseg6ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f16mf4))) +void vsuxseg6ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg6ei64_v_f16mf4_m))) +void vsuxseg6ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f16m1))) +void vsuxseg7ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f16m1_m))) +void vsuxseg7ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f16mf2))) +void vsuxseg7ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f16mf2_m))) +void vsuxseg7ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f16mf4))) +void vsuxseg7ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg7ei64_v_f16mf4_m))) +void vsuxseg7ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f16m1))) +void vsuxseg8ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f16m1_m))) +void vsuxseg8ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f16mf2))) +void vsuxseg8ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f16mf2_m))) +void vsuxseg8ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f16mf4))) +void vsuxseg8ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsuxseg8ei64_v_f16mf4_m))) +void vsuxseg8ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16m1))) +void vsoxseg2ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16m1_m))) +void vsoxseg2ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16m2))) +void vsoxseg2ei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16m2_m))) +void vsoxseg2ei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16m4))) +void vsoxseg2ei8(_Float16 * op0, vuint8m2_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16m4_m))) +void vsoxseg2ei8(vbool4_t op0, _Float16 * op1, vuint8m2_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16mf2))) +void vsoxseg2ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16mf2_m))) +void vsoxseg2ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16mf4))) +void vsoxseg2ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei8_v_f16mf4_m))) +void vsoxseg2ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16m1))) +void vsoxseg3ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16m1_m))) +void vsoxseg3ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16m2))) +void vsoxseg3ei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16m2_m))) +void vsoxseg3ei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16mf2))) +void vsoxseg3ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16mf2_m))) +void vsoxseg3ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16mf4))) +void vsoxseg3ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei8_v_f16mf4_m))) +void vsoxseg3ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16m1))) +void vsoxseg4ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16m1_m))) +void vsoxseg4ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16m2))) +void vsoxseg4ei8(_Float16 * op0, vuint8m1_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16m2_m))) +void vsoxseg4ei8(vbool8_t op0, _Float16 * op1, vuint8m1_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16mf2))) +void vsoxseg4ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16mf2_m))) +void vsoxseg4ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16mf4))) +void vsoxseg4ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei8_v_f16mf4_m))) +void vsoxseg4ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f16m1))) +void vsoxseg5ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f16m1_m))) +void vsoxseg5ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f16mf2))) +void vsoxseg5ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f16mf2_m))) +void vsoxseg5ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f16mf4))) +void vsoxseg5ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei8_v_f16mf4_m))) +void vsoxseg5ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f16m1))) +void vsoxseg6ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f16m1_m))) +void vsoxseg6ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f16mf2))) +void vsoxseg6ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f16mf2_m))) +void vsoxseg6ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f16mf4))) +void vsoxseg6ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei8_v_f16mf4_m))) +void vsoxseg6ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f16m1))) +void vsoxseg7ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f16m1_m))) +void vsoxseg7ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f16mf2))) +void vsoxseg7ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f16mf2_m))) +void vsoxseg7ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f16mf4))) +void vsoxseg7ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei8_v_f16mf4_m))) +void vsoxseg7ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f16m1))) +void vsoxseg8ei8(_Float16 * op0, vuint8mf2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f16m1_m))) +void vsoxseg8ei8(vbool16_t op0, _Float16 * op1, vuint8mf2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f16mf2))) +void vsoxseg8ei8(_Float16 * op0, vuint8mf4_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f16mf2_m))) +void vsoxseg8ei8(vbool32_t op0, _Float16 * op1, vuint8mf4_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f16mf4))) +void vsoxseg8ei8(_Float16 * op0, vuint8mf8_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei8_v_f16mf4_m))) +void vsoxseg8ei8(vbool64_t op0, _Float16 * op1, vuint8mf8_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16m1))) +void vsoxseg2ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16m1_m))) +void vsoxseg2ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16m2))) +void vsoxseg2ei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16m2_m))) +void vsoxseg2ei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16m4))) +void vsoxseg2ei16(_Float16 * op0, vuint16m4_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16m4_m))) +void vsoxseg2ei16(vbool4_t op0, _Float16 * op1, vuint16m4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16mf2))) +void vsoxseg2ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16mf2_m))) +void vsoxseg2ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16mf4))) +void vsoxseg2ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei16_v_f16mf4_m))) +void vsoxseg2ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16m1))) +void vsoxseg3ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16m1_m))) +void vsoxseg3ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16m2))) +void vsoxseg3ei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16m2_m))) +void vsoxseg3ei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16mf2))) +void vsoxseg3ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16mf2_m))) +void vsoxseg3ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16mf4))) +void vsoxseg3ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei16_v_f16mf4_m))) +void vsoxseg3ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16m1))) +void vsoxseg4ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16m1_m))) +void vsoxseg4ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16m2))) +void vsoxseg4ei16(_Float16 * op0, vuint16m2_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16m2_m))) +void vsoxseg4ei16(vbool8_t op0, _Float16 * op1, vuint16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16mf2))) +void vsoxseg4ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16mf2_m))) +void vsoxseg4ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16mf4))) +void vsoxseg4ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei16_v_f16mf4_m))) +void vsoxseg4ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f16m1))) +void vsoxseg5ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f16m1_m))) +void vsoxseg5ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f16mf2))) +void vsoxseg5ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f16mf2_m))) +void vsoxseg5ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f16mf4))) +void vsoxseg5ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei16_v_f16mf4_m))) +void vsoxseg5ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f16m1))) +void vsoxseg6ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f16m1_m))) +void vsoxseg6ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f16mf2))) +void vsoxseg6ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f16mf2_m))) +void vsoxseg6ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f16mf4))) +void vsoxseg6ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei16_v_f16mf4_m))) +void vsoxseg6ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f16m1))) +void vsoxseg7ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f16m1_m))) +void vsoxseg7ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f16mf2))) +void vsoxseg7ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f16mf2_m))) +void vsoxseg7ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f16mf4))) +void vsoxseg7ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei16_v_f16mf4_m))) +void vsoxseg7ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f16m1))) +void vsoxseg8ei16(_Float16 * op0, vuint16m1_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f16m1_m))) +void vsoxseg8ei16(vbool16_t op0, _Float16 * op1, vuint16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f16mf2))) +void vsoxseg8ei16(_Float16 * op0, vuint16mf2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f16mf2_m))) +void vsoxseg8ei16(vbool32_t op0, _Float16 * op1, vuint16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f16mf4))) +void vsoxseg8ei16(_Float16 * op0, vuint16mf4_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei16_v_f16mf4_m))) +void vsoxseg8ei16(vbool64_t op0, _Float16 * op1, vuint16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16m1))) +void vsoxseg2ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16m1_m))) +void vsoxseg2ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16m2))) +void vsoxseg2ei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16m2_m))) +void vsoxseg2ei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16m4))) +void vsoxseg2ei32(_Float16 * op0, vuint32m8_t op1, vfloat16m4_t op2, vfloat16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16m4_m))) +void vsoxseg2ei32(vbool4_t op0, _Float16 * op1, vuint32m8_t op2, vfloat16m4_t op3, vfloat16m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16mf2))) +void vsoxseg2ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16mf2_m))) +void vsoxseg2ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16mf4))) +void vsoxseg2ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei32_v_f16mf4_m))) +void vsoxseg2ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16m1))) +void vsoxseg3ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16m1_m))) +void vsoxseg3ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16m2))) +void vsoxseg3ei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16m2_m))) +void vsoxseg3ei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16mf2))) +void vsoxseg3ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16mf2_m))) +void vsoxseg3ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16mf4))) +void vsoxseg3ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei32_v_f16mf4_m))) +void vsoxseg3ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16m1))) +void vsoxseg4ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16m1_m))) +void vsoxseg4ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16m2))) +void vsoxseg4ei32(_Float16 * op0, vuint32m4_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16m2_m))) +void vsoxseg4ei32(vbool8_t op0, _Float16 * op1, vuint32m4_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16mf2))) +void vsoxseg4ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16mf2_m))) +void vsoxseg4ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16mf4))) +void vsoxseg4ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei32_v_f16mf4_m))) +void vsoxseg4ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f16m1))) +void vsoxseg5ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f16m1_m))) +void vsoxseg5ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f16mf2))) +void vsoxseg5ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f16mf2_m))) +void vsoxseg5ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f16mf4))) +void vsoxseg5ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei32_v_f16mf4_m))) +void vsoxseg5ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f16m1))) +void vsoxseg6ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f16m1_m))) +void vsoxseg6ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f16mf2))) +void vsoxseg6ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f16mf2_m))) +void vsoxseg6ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f16mf4))) +void vsoxseg6ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei32_v_f16mf4_m))) +void vsoxseg6ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f16m1))) +void vsoxseg7ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f16m1_m))) +void vsoxseg7ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f16mf2))) +void vsoxseg7ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f16mf2_m))) +void vsoxseg7ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f16mf4))) +void vsoxseg7ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei32_v_f16mf4_m))) +void vsoxseg7ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f16m1))) +void vsoxseg8ei32(_Float16 * op0, vuint32m2_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f16m1_m))) +void vsoxseg8ei32(vbool16_t op0, _Float16 * op1, vuint32m2_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f16mf2))) +void vsoxseg8ei32(_Float16 * op0, vuint32m1_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f16mf2_m))) +void vsoxseg8ei32(vbool32_t op0, _Float16 * op1, vuint32m1_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f16mf4))) +void vsoxseg8ei32(_Float16 * op0, vuint32mf2_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei32_v_f16mf4_m))) +void vsoxseg8ei32(vbool64_t op0, _Float16 * op1, vuint32mf2_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16m1))) +void vsoxseg2ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16m1_m))) +void vsoxseg2ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16m2))) +void vsoxseg2ei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16m2_m))) +void vsoxseg2ei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16mf2))) +void vsoxseg2ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16mf2_m))) +void vsoxseg2ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16mf4))) +void vsoxseg2ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg2ei64_v_f16mf4_m))) +void vsoxseg2ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16m1))) +void vsoxseg3ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16m1_m))) +void vsoxseg3ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16m2))) +void vsoxseg3ei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16m2_m))) +void vsoxseg3ei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16mf2))) +void vsoxseg3ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16mf2_m))) +void vsoxseg3ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16mf4))) +void vsoxseg3ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg3ei64_v_f16mf4_m))) +void vsoxseg3ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16m1))) +void vsoxseg4ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16m1_m))) +void vsoxseg4ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16m2))) +void vsoxseg4ei64(_Float16 * op0, vuint64m8_t op1, vfloat16m2_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16m2_m))) +void vsoxseg4ei64(vbool8_t op0, _Float16 * op1, vuint64m8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16mf2))) +void vsoxseg4ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16mf2_m))) +void vsoxseg4ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16mf4))) +void vsoxseg4ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg4ei64_v_f16mf4_m))) +void vsoxseg4ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f16m1))) +void vsoxseg5ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f16m1_m))) +void vsoxseg5ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f16mf2))) +void vsoxseg5ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f16mf2_m))) +void vsoxseg5ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f16mf4))) +void vsoxseg5ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg5ei64_v_f16mf4_m))) +void vsoxseg5ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f16m1))) +void vsoxseg6ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f16m1_m))) +void vsoxseg6ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f16mf2))) +void vsoxseg6ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f16mf2_m))) +void vsoxseg6ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f16mf4))) +void vsoxseg6ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg6ei64_v_f16mf4_m))) +void vsoxseg6ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f16m1))) +void vsoxseg7ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f16m1_m))) +void vsoxseg7ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f16mf2))) +void vsoxseg7ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f16mf2_m))) +void vsoxseg7ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f16mf4))) +void vsoxseg7ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg7ei64_v_f16mf4_m))) +void vsoxseg7ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f16m1))) +void vsoxseg8ei64(_Float16 * op0, vuint64m4_t op1, vfloat16m1_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f16m1_m))) +void vsoxseg8ei64(vbool16_t op0, _Float16 * op1, vuint64m4_t op2, vfloat16m1_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f16mf2))) +void vsoxseg8ei64(_Float16 * op0, vuint64m2_t op1, vfloat16mf2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f16mf2_m))) +void vsoxseg8ei64(vbool32_t op0, _Float16 * op1, vuint64m2_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f16mf4))) +void vsoxseg8ei64(_Float16 * op0, vuint64m1_t op1, vfloat16mf4_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vsoxseg8ei64_v_f16mf4_m))) +void vsoxseg8ei64(vbool64_t op0, _Float16 * op1, vuint64m1_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_f16m1_m))) +void vlseg2e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_f16m2_m))) +void vlseg2e16(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_f16m4_m))) +void vlseg2e16(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_f16mf2_m))) +void vlseg2e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16_v_f16mf4_m))) +void vlseg2e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_f16m1_m))) +void vlseg3e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_f16m2_m))) +void vlseg3e16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_f16mf2_m))) +void vlseg3e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16_v_f16mf4_m))) +void vlseg3e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_f16m1_m))) +void vlseg4e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_f16m2_m))) +void vlseg4e16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_f16mf2_m))) +void vlseg4e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16_v_f16mf4_m))) +void vlseg4e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_f16m1_m))) +void vlseg5e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_f16mf2_m))) +void vlseg5e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16_v_f16mf4_m))) +void vlseg5e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, size_t op12); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_f16m1_m))) +void vlseg6e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_f16mf2_m))) +void vlseg6e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16_v_f16mf4_m))) +void vlseg6e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, size_t op14); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_f16m1_m))) +void vlseg7e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_f16mf2_m))) +void vlseg7e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16_v_f16mf4_m))) +void vlseg7e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, size_t op16); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_f16m1_m))) +void vlseg8e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_f16mf2_m))) +void vlseg8e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16_v_f16mf4_m))) +void vlseg8e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, size_t op18); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_f16m1_m))) +void vlseg2e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_f16m2_m))) +void vlseg2e16ff(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_f16m4_m))) +void vlseg2e16ff(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_f16mf2_m))) +void vlseg2e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg2e16ff_v_f16mf4_m))) +void vlseg2e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, size_t * op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_f16m1_m))) +void vlseg3e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_f16m2_m))) +void vlseg3e16ff(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_f16mf2_m))) +void vlseg3e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg3e16ff_v_f16mf4_m))) +void vlseg3e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, size_t * op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_f16m1_m))) +void vlseg4e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_f16m2_m))) +void vlseg4e16ff(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_f16mf2_m))) +void vlseg4e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg4e16ff_v_f16mf4_m))) +void vlseg4e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, size_t * op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_f16m1_m))) +void vlseg5e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_f16mf2_m))) +void vlseg5e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg5e16ff_v_f16mf4_m))) +void vlseg5e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, size_t * op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_f16m1_m))) +void vlseg6e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_f16mf2_m))) +void vlseg6e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg6e16ff_v_f16mf4_m))) +void vlseg6e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, size_t * op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_f16m1_m))) +void vlseg7e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_f16mf2_m))) +void vlseg7e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg7e16ff_v_f16mf4_m))) +void vlseg7e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, size_t * op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_f16m1_m))) +void vlseg8e16ff(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_f16mf2_m))) +void vlseg8e16ff(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlseg8e16ff_v_f16mf4_m))) +void vlseg8e16ff(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, size_t * op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_f16m1_m))) +void vlsseg2e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_f16m2_m))) +void vlsseg2e16(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_f16m4_m))) +void vlsseg2e16(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_f16mf2_m))) +void vlsseg2e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg2e16_v_f16mf4_m))) +void vlsseg2e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, ptrdiff_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_f16m1_m))) +void vlsseg3e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_f16m2_m))) +void vlsseg3e16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_f16mf2_m))) +void vlsseg3e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg3e16_v_f16mf4_m))) +void vlsseg3e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, ptrdiff_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_f16m1_m))) +void vlsseg4e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_f16m2_m))) +void vlsseg4e16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_f16mf2_m))) +void vlsseg4e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg4e16_v_f16mf4_m))) +void vlsseg4e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, ptrdiff_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_f16m1_m))) +void vlsseg5e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_f16mf2_m))) +void vlsseg5e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg5e16_v_f16mf4_m))) +void vlsseg5e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, ptrdiff_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_f16m1_m))) +void vlsseg6e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_f16mf2_m))) +void vlsseg6e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg6e16_v_f16mf4_m))) +void vlsseg6e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, ptrdiff_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_f16m1_m))) +void vlsseg7e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_f16mf2_m))) +void vlsseg7e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg7e16_v_f16mf4_m))) +void vlsseg7e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, ptrdiff_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_f16m1_m))) +void vlsseg8e16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_f16mf2_m))) +void vlsseg8e16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vlsseg8e16_v_f16mf4_m))) +void vlsseg8e16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, ptrdiff_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16m1))) +void vluxseg2ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint8mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16m1_m))) +void vluxseg2ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16m2))) +void vluxseg2ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint8m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16m2_m))) +void vluxseg2ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint8m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16m4))) +void vluxseg2ei8(vfloat16m4_t * op0, vfloat16m4_t * op1, const _Float16 * op2, vuint8m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16m4_m))) +void vluxseg2ei8(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, vuint8m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16mf2))) +void vluxseg2ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint8mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16mf2_m))) +void vluxseg2ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16mf4))) +void vluxseg2ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint8mf8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei8_v_f16mf4_m))) +void vluxseg2ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16m1))) +void vluxseg3ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint8mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16m1_m))) +void vluxseg3ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16m2))) +void vluxseg3ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint8m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16m2_m))) +void vluxseg3ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint8m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16mf2))) +void vluxseg3ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint8mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16mf2_m))) +void vluxseg3ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16mf4))) +void vluxseg3ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint8mf8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei8_v_f16mf4_m))) +void vluxseg3ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16m1))) +void vluxseg4ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint8mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16m1_m))) +void vluxseg4ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint8mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16m2))) +void vluxseg4ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint8m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16m2_m))) +void vluxseg4ei8(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint8m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16mf2))) +void vluxseg4ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint8mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16mf2_m))) +void vluxseg4ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint8mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16mf4))) +void vluxseg4ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint8mf8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei8_v_f16mf4_m))) +void vluxseg4ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint8mf8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f16m1))) +void vluxseg5ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint8mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f16m1_m))) +void vluxseg5ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint8mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f16mf2))) +void vluxseg5ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint8mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f16mf2_m))) +void vluxseg5ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint8mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f16mf4))) +void vluxseg5ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint8mf8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei8_v_f16mf4_m))) +void vluxseg5ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint8mf8_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f16m1))) +void vluxseg6ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint8mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f16m1_m))) +void vluxseg6ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint8mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f16mf2))) +void vluxseg6ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint8mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f16mf2_m))) +void vluxseg6ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint8mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f16mf4))) +void vluxseg6ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint8mf8_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei8_v_f16mf4_m))) +void vluxseg6ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint8mf8_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f16m1))) +void vluxseg7ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint8mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f16m1_m))) +void vluxseg7ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint8mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f16mf2))) +void vluxseg7ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint8mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f16mf2_m))) +void vluxseg7ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint8mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f16mf4))) +void vluxseg7ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint8mf8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei8_v_f16mf4_m))) +void vluxseg7ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint8mf8_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f16m1))) +void vluxseg8ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint8mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f16m1_m))) +void vluxseg8ei8(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint8mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f16mf2))) +void vluxseg8ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint8mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f16mf2_m))) +void vluxseg8ei8(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint8mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f16mf4))) +void vluxseg8ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint8mf8_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei8_v_f16mf4_m))) +void vluxseg8ei8(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint8mf8_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16m1))) +void vluxseg2ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint16m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16m1_m))) +void vluxseg2ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16m2))) +void vluxseg2ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint16m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16m2_m))) +void vluxseg2ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint16m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16m4))) +void vluxseg2ei16(vfloat16m4_t * op0, vfloat16m4_t * op1, const _Float16 * op2, vuint16m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16m4_m))) +void vluxseg2ei16(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, vuint16m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16mf2))) +void vluxseg2ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint16mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16mf2_m))) +void vluxseg2ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16mf4))) +void vluxseg2ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint16mf4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei16_v_f16mf4_m))) +void vluxseg2ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16m1))) +void vluxseg3ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint16m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16m1_m))) +void vluxseg3ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16m2))) +void vluxseg3ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint16m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16m2_m))) +void vluxseg3ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint16m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16mf2))) +void vluxseg3ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint16mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16mf2_m))) +void vluxseg3ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16mf4))) +void vluxseg3ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint16mf4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei16_v_f16mf4_m))) +void vluxseg3ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16m1))) +void vluxseg4ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint16m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16m1_m))) +void vluxseg4ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint16m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16m2))) +void vluxseg4ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint16m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16m2_m))) +void vluxseg4ei16(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint16m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16mf2))) +void vluxseg4ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint16mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16mf2_m))) +void vluxseg4ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint16mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16mf4))) +void vluxseg4ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint16mf4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei16_v_f16mf4_m))) +void vluxseg4ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint16mf4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f16m1))) +void vluxseg5ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint16m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f16m1_m))) +void vluxseg5ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint16m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f16mf2))) +void vluxseg5ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint16mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f16mf2_m))) +void vluxseg5ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint16mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f16mf4))) +void vluxseg5ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint16mf4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei16_v_f16mf4_m))) +void vluxseg5ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint16mf4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f16m1))) +void vluxseg6ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint16m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f16m1_m))) +void vluxseg6ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint16m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f16mf2))) +void vluxseg6ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint16mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f16mf2_m))) +void vluxseg6ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint16mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f16mf4))) +void vluxseg6ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint16mf4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei16_v_f16mf4_m))) +void vluxseg6ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint16mf4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f16m1))) +void vluxseg7ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint16m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f16m1_m))) +void vluxseg7ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint16m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f16mf2))) +void vluxseg7ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint16mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f16mf2_m))) +void vluxseg7ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint16mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f16mf4))) +void vluxseg7ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint16mf4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei16_v_f16mf4_m))) +void vluxseg7ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint16mf4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f16m1))) +void vluxseg8ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint16m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f16m1_m))) +void vluxseg8ei16(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint16m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f16mf2))) +void vluxseg8ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint16mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f16mf2_m))) +void vluxseg8ei16(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint16mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f16mf4))) +void vluxseg8ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint16mf4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei16_v_f16mf4_m))) +void vluxseg8ei16(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint16mf4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16m1))) +void vluxseg2ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint32m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16m1_m))) +void vluxseg2ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16m2))) +void vluxseg2ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint32m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16m2_m))) +void vluxseg2ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint32m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16m4))) +void vluxseg2ei32(vfloat16m4_t * op0, vfloat16m4_t * op1, const _Float16 * op2, vuint32m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16m4_m))) +void vluxseg2ei32(vfloat16m4_t * op0, vfloat16m4_t * op1, vbool4_t op2, vfloat16m4_t op3, vfloat16m4_t op4, const _Float16 * op5, vuint32m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16mf2))) +void vluxseg2ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint32m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16mf2_m))) +void vluxseg2ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16mf4))) +void vluxseg2ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint32mf2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei32_v_f16mf4_m))) +void vluxseg2ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16m1))) +void vluxseg3ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint32m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16m1_m))) +void vluxseg3ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16m2))) +void vluxseg3ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint32m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16m2_m))) +void vluxseg3ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint32m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16mf2))) +void vluxseg3ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint32m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16mf2_m))) +void vluxseg3ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16mf4))) +void vluxseg3ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint32mf2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei32_v_f16mf4_m))) +void vluxseg3ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16m1))) +void vluxseg4ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint32m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16m1_m))) +void vluxseg4ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint32m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16m2))) +void vluxseg4ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint32m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16m2_m))) +void vluxseg4ei32(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint32m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16mf2))) +void vluxseg4ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint32m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16mf2_m))) +void vluxseg4ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint32m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16mf4))) +void vluxseg4ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint32mf2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei32_v_f16mf4_m))) +void vluxseg4ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint32mf2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f16m1))) +void vluxseg5ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint32m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f16m1_m))) +void vluxseg5ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint32m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f16mf2))) +void vluxseg5ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint32m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f16mf2_m))) +void vluxseg5ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint32m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f16mf4))) +void vluxseg5ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint32mf2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei32_v_f16mf4_m))) +void vluxseg5ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint32mf2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f16m1))) +void vluxseg6ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint32m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f16m1_m))) +void vluxseg6ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint32m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f16mf2))) +void vluxseg6ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint32m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f16mf2_m))) +void vluxseg6ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint32m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f16mf4))) +void vluxseg6ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint32mf2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei32_v_f16mf4_m))) +void vluxseg6ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint32mf2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f16m1))) +void vluxseg7ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint32m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f16m1_m))) +void vluxseg7ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint32m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f16mf2))) +void vluxseg7ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint32m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f16mf2_m))) +void vluxseg7ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint32m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f16mf4))) +void vluxseg7ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint32mf2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei32_v_f16mf4_m))) +void vluxseg7ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint32mf2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f16m1))) +void vluxseg8ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint32m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f16m1_m))) +void vluxseg8ei32(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint32m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f16mf2))) +void vluxseg8ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint32m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f16mf2_m))) +void vluxseg8ei32(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint32m1_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f16mf4))) +void vluxseg8ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint32mf2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei32_v_f16mf4_m))) +void vluxseg8ei32(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint32mf2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16m1))) +void vluxseg2ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, const _Float16 * op2, vuint64m4_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16m1_m))) +void vluxseg2ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vbool16_t op2, vfloat16m1_t op3, vfloat16m1_t op4, const _Float16 * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16m2))) +void vluxseg2ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, const _Float16 * op2, vuint64m8_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16m2_m))) +void vluxseg2ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vbool8_t op2, vfloat16m2_t op3, vfloat16m2_t op4, const _Float16 * op5, vuint64m8_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16mf2))) +void vluxseg2ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, const _Float16 * op2, vuint64m2_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16mf2_m))) +void vluxseg2ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vbool32_t op2, vfloat16mf2_t op3, vfloat16mf2_t op4, const _Float16 * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16mf4))) +void vluxseg2ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, const _Float16 * op2, vuint64m1_t op3, size_t op4); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg2ei64_v_f16mf4_m))) +void vluxseg2ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vbool64_t op2, vfloat16mf4_t op3, vfloat16mf4_t op4, const _Float16 * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16m1))) +void vluxseg3ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, const _Float16 * op3, vuint64m4_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16m1_m))) +void vluxseg3ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vbool16_t op3, vfloat16m1_t op4, vfloat16m1_t op5, vfloat16m1_t op6, const _Float16 * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16m2))) +void vluxseg3ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, const _Float16 * op3, vuint64m8_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16m2_m))) +void vluxseg3ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vbool8_t op3, vfloat16m2_t op4, vfloat16m2_t op5, vfloat16m2_t op6, const _Float16 * op7, vuint64m8_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16mf2))) +void vluxseg3ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, const _Float16 * op3, vuint64m2_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16mf2_m))) +void vluxseg3ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vbool32_t op3, vfloat16mf2_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, const _Float16 * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16mf4))) +void vluxseg3ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, const _Float16 * op3, vuint64m1_t op4, size_t op5); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg3ei64_v_f16mf4_m))) +void vluxseg3ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vbool64_t op3, vfloat16mf4_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, const _Float16 * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16m1))) +void vluxseg4ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, const _Float16 * op4, vuint64m4_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16m1_m))) +void vluxseg4ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vbool16_t op4, vfloat16m1_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, const _Float16 * op9, vuint64m4_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16m2))) +void vluxseg4ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, const _Float16 * op4, vuint64m8_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16m2_m))) +void vluxseg4ei64(vfloat16m2_t * op0, vfloat16m2_t * op1, vfloat16m2_t * op2, vfloat16m2_t * op3, vbool8_t op4, vfloat16m2_t op5, vfloat16m2_t op6, vfloat16m2_t op7, vfloat16m2_t op8, const _Float16 * op9, vuint64m8_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16mf2))) +void vluxseg4ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, const _Float16 * op4, vuint64m2_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16mf2_m))) +void vluxseg4ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vbool32_t op4, vfloat16mf2_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, const _Float16 * op9, vuint64m2_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16mf4))) +void vluxseg4ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, const _Float16 * op4, vuint64m1_t op5, size_t op6); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg4ei64_v_f16mf4_m))) +void vluxseg4ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vbool64_t op4, vfloat16mf4_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, const _Float16 * op9, vuint64m1_t op10, size_t op11); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f16m1))) +void vluxseg5ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, const _Float16 * op5, vuint64m4_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f16m1_m))) +void vluxseg5ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vbool16_t op5, vfloat16m1_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, const _Float16 * op11, vuint64m4_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f16mf2))) +void vluxseg5ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, const _Float16 * op5, vuint64m2_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f16mf2_m))) +void vluxseg5ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vbool32_t op5, vfloat16mf2_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, const _Float16 * op11, vuint64m2_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f16mf4))) +void vluxseg5ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, const _Float16 * op5, vuint64m1_t op6, size_t op7); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg5ei64_v_f16mf4_m))) +void vluxseg5ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vbool64_t op5, vfloat16mf4_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, const _Float16 * op11, vuint64m1_t op12, size_t op13); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f16m1))) +void vluxseg6ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, const _Float16 * op6, vuint64m4_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f16m1_m))) +void vluxseg6ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vbool16_t op6, vfloat16m1_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, const _Float16 * op13, vuint64m4_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f16mf2))) +void vluxseg6ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, const _Float16 * op6, vuint64m2_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f16mf2_m))) +void vluxseg6ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vbool32_t op6, vfloat16mf2_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, const _Float16 * op13, vuint64m2_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f16mf4))) +void vluxseg6ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, const _Float16 * op6, vuint64m1_t op7, size_t op8); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg6ei64_v_f16mf4_m))) +void vluxseg6ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vbool64_t op6, vfloat16mf4_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, const _Float16 * op13, vuint64m1_t op14, size_t op15); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f16m1))) +void vluxseg7ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, const _Float16 * op7, vuint64m4_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f16m1_m))) +void vluxseg7ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vbool16_t op7, vfloat16m1_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, const _Float16 * op15, vuint64m4_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f16mf2))) +void vluxseg7ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, const _Float16 * op7, vuint64m2_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f16mf2_m))) +void vluxseg7ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vbool32_t op7, vfloat16mf2_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, const _Float16 * op15, vuint64m2_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f16mf4))) +void vluxseg7ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, const _Float16 * op7, vuint64m1_t op8, size_t op9); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg7ei64_v_f16mf4_m))) +void vluxseg7ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vbool64_t op7, vfloat16mf4_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, const _Float16 * op15, vuint64m1_t op16, size_t op17); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f16m1))) +void vluxseg8ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, const _Float16 * op8, vuint64m4_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f16m1_m))) +void vluxseg8ei64(vfloat16m1_t * op0, vfloat16m1_t * op1, vfloat16m1_t * op2, vfloat16m1_t * op3, vfloat16m1_t * op4, vfloat16m1_t * op5, vfloat16m1_t * op6, vfloat16m1_t * op7, vbool16_t op8, vfloat16m1_t op9, vfloat16m1_t op10, vfloat16m1_t op11, vfloat16m1_t op12, vfloat16m1_t op13, vfloat16m1_t op14, vfloat16m1_t op15, vfloat16m1_t op16, const _Float16 * op17, vuint64m4_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f16mf2))) +void vluxseg8ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, const _Float16 * op8, vuint64m2_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f16mf2_m))) +void vluxseg8ei64(vfloat16mf2_t * op0, vfloat16mf2_t * op1, vfloat16mf2_t * op2, vfloat16mf2_t * op3, vfloat16mf2_t * op4, vfloat16mf2_t * op5, vfloat16mf2_t * op6, vfloat16mf2_t * op7, vbool32_t op8, vfloat16mf2_t op9, vfloat16mf2_t op10, vfloat16mf2_t op11, vfloat16mf2_t op12, vfloat16mf2_t op13, vfloat16mf2_t op14, vfloat16mf2_t op15, vfloat16mf2_t op16, const _Float16 * op17, vuint64m2_t op18, size_t op19); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f16mf4))) +void vluxseg8ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, const _Float16 * op8, vuint64m1_t op9, size_t op10); + +__rvv_overloaded __attribute__((clang_builtin_alias(__builtin_rvv_vluxseg8ei64_v_f16mf4_m))) +void vluxseg8ei64(vfloat16mf4_t * op0, vfloat16mf4_t * op1, vfloat16mf4_t * op2, vfloat16mf4_t * op3, vfloat16mf4_t * op4, vfloat16mf4_t * op5, vfloat16mf4_t * op6, vfloat16mf4_t * op7, vbool64_t op8, vfloat16mf4_t op9, vfloat16mf4_t op10, vfloat16mf4_t op11, vfloat16mf4_t op12, vfloat16mf4_t op13, vfloat16mf4_t op14, vfloat16mf4_t op15, vfloat16mf4_t op16, const _Float16 * op17, vuint64m1_t op18, size_t op19); + +#endif + + +#ifdef __cplusplus +} +#endif // __riscv_vector +#endif // __RISCV_VECTOR_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/rtmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/rtmintrin.h new file mode 100644 index 0000000..36ff583 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/rtmintrin.h @@ -0,0 +1,45 @@ +/*===---- rtmintrin.h - RTM intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __RTMINTRIN_H +#define __RTMINTRIN_H + +#define _XBEGIN_STARTED (~0u) +#define _XABORT_EXPLICIT (1 << 0) +#define _XABORT_RETRY (1 << 1) +#define _XABORT_CONFLICT (1 << 2) +#define _XABORT_CAPACITY (1 << 3) +#define _XABORT_DEBUG (1 << 4) +#define _XABORT_NESTED (1 << 5) +#define _XABORT_CODE(x) (((x) >> 24) & 0xFF) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm"))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_xbegin(void) +{ + return __builtin_ia32_xbegin(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xend(void) +{ + __builtin_ia32_xend(); +} + +#define _xabort(imm) __builtin_ia32_xabort((imm)) + +#undef __DEFAULT_FN_ATTRS + +#endif /* __RTMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/s390intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/s390intrin.h new file mode 100644 index 0000000..73a915c --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/s390intrin.h @@ -0,0 +1,25 @@ +/*===---- s390intrin.h - SystemZ intrinsics --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __S390INTRIN_H +#define __S390INTRIN_H + +#ifndef __s390__ +#error " is for s390 only" +#endif + +#ifdef __HTM__ +#include +#endif + +#ifdef __VEC__ +#include +#endif + +#endif /* __S390INTRIN_H*/ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/allocator_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/allocator_interface.h new file mode 100644 index 0000000..6226135 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/allocator_interface.h @@ -0,0 +1,88 @@ +//===-- allocator_interface.h ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Public interface header for allocator used in sanitizers (ASan/TSan/MSan). +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_ALLOCATOR_INTERFACE_H +#define SANITIZER_ALLOCATOR_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + /* Returns the estimated number of bytes that will be reserved by allocator + for request of "size" bytes. If allocator can't allocate that much + memory, returns the maximal possible allocation size, otherwise returns + "size". */ + size_t __sanitizer_get_estimated_allocated_size(size_t size); + + /* Returns true if p was returned by the allocator and + is not yet freed. */ + int __sanitizer_get_ownership(const volatile void *p); + + /* Returns the number of bytes reserved for the pointer p. + Requires (get_ownership(p) == true) or (p == 0). */ + size_t __sanitizer_get_allocated_size(const volatile void *p); + + /* Number of bytes, allocated and not yet freed by the application. */ + size_t __sanitizer_get_current_allocated_bytes(void); + + /* Number of bytes, mmaped by the allocator to fulfill allocation requests. + Generally, for request of X bytes, allocator can reserve and add to free + lists a large number of chunks of size X to use them for future requests. + All these chunks count toward the heap size. Currently, allocator never + releases memory to OS (instead, it just puts freed chunks to free + lists). */ + size_t __sanitizer_get_heap_size(void); + + /* Number of bytes, mmaped by the allocator, which can be used to fulfill + allocation requests. When a user program frees memory chunk, it can first + fall into quarantine and will count toward __sanitizer_get_free_bytes() + later. */ + size_t __sanitizer_get_free_bytes(void); + + /* Number of bytes in unmapped pages, that are released to OS. Currently, + always returns 0. */ + size_t __sanitizer_get_unmapped_bytes(void); + + /* Malloc hooks that may be optionally provided by user. + __sanitizer_malloc_hook(ptr, size) is called immediately after + allocation of "size" bytes, which returned "ptr". + __sanitizer_free_hook(ptr) is called immediately before + deallocation of "ptr". */ + void __sanitizer_malloc_hook(const volatile void *ptr, size_t size); + void __sanitizer_free_hook(const volatile void *ptr); + + /* Installs a pair of hooks for malloc/free. + Several (currently, 5) hook pairs may be installed, they are executed + in the order they were installed and after calling + __sanitizer_malloc_hook/__sanitizer_free_hook. + Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be + chained and do not rely on weak symbols working on the platform, but + require __sanitizer_install_malloc_and_free_hooks to be called at startup + and thus will not be called on malloc/free very early in the process. + Returns the number of hooks currently installed or 0 on failure. + Not thread-safe, should be called in the main thread before starting + other threads. + */ + int __sanitizer_install_malloc_and_free_hooks( + void (*malloc_hook)(const volatile void *, size_t), + void (*free_hook)(const volatile void *)); + + /* Drains allocator quarantines (calling thread's and global ones), returns + freed memory back to OS and releases other non-essential internal allocator + resources in attempt to reduce process RSS. + Currently available with ASan only. + */ + void __sanitizer_purge_allocator(void); +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/asan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/asan_interface.h new file mode 100644 index 0000000..792ef9c --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/asan_interface.h @@ -0,0 +1,326 @@ +//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer (ASan). +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_ASAN_INTERFACE_H +#define SANITIZER_ASAN_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +/// Marks a memory region ([addr, addr+size)) as unaddressable. +/// +/// This memory must be previously allocated by your program. Instrumented +/// code is forbidden from accessing addresses in this region until it is +/// unpoisoned. This function is not guaranteed to poison the entire region - +/// it could poison only a subregion of [addr, addr+size) due to ASan +/// alignment restrictions. +/// +/// \note This function is not thread-safe because no two threads can poison or +/// unpoison memory in the same memory region simultaneously. +/// +/// \param addr Start of memory region. +/// \param size Size of memory region. +void __asan_poison_memory_region(void const volatile *addr, size_t size); + +/// Marks a memory region ([addr, addr+size)) as addressable. +/// +/// This memory must be previously allocated by your program. Accessing +/// addresses in this region is allowed until this region is poisoned again. +/// This function could unpoison a super-region of [addr, addr+size) due +/// to ASan alignment restrictions. +/// +/// \note This function is not thread-safe because no two threads can +/// poison or unpoison memory in the same memory region simultaneously. +/// +/// \param addr Start of memory region. +/// \param size Size of memory region. +void __asan_unpoison_memory_region(void const volatile *addr, size_t size); + +// Macros provided for convenience. +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +/// Marks a memory region as unaddressable. +/// +/// \note Macro provided for convenience; defined as a no-op if ASan is not +/// enabled. +/// +/// \param addr Start of memory region. +/// \param size Size of memory region. +#define ASAN_POISON_MEMORY_REGION(addr, size) \ + __asan_poison_memory_region((addr), (size)) + +/// Marks a memory region as addressable. +/// +/// \note Macro provided for convenience; defined as a no-op if ASan is not +/// enabled. +/// +/// \param addr Start of memory region. +/// \param size Size of memory region. +#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ + __asan_unpoison_memory_region((addr), (size)) +#else +#define ASAN_POISON_MEMORY_REGION(addr, size) \ + ((void)(addr), (void)(size)) +#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ + ((void)(addr), (void)(size)) +#endif + +/// Checks if an address is poisoned. +/// +/// Returns 1 if addr is poisoned (that is, 1-byte read/write +/// access to this address would result in an error report from ASan). +/// Otherwise returns 0. +/// +/// \param addr Address to check. +/// +/// \retval 1 Address is poisoned. +/// \retval 0 Address is not poisoned. +int __asan_address_is_poisoned(void const volatile *addr); + +/// Checks if a region is poisoned. +/// +/// If at least one byte in [beg, beg+size) is poisoned, returns the +/// address of the first such byte. Otherwise returns 0. +/// +/// \param beg Start of memory region. +/// \param size Start of memory region. +/// \returns Address of first poisoned byte. +void *__asan_region_is_poisoned(void *beg, size_t size); + +/// Describes an address (useful for calling from the debugger). +/// +/// Prints the description of addr. +/// +/// \param addr Address to describe. +void __asan_describe_address(void *addr); + +/// Checks if an error has been or is being reported (useful for calling from +/// the debugger to get information about an ASan error). +/// +/// Returns 1 if an error has been (or is being) reported. Otherwise returns 0. +/// +/// \returns 1 if an error has been (or is being) reported. Otherwise returns +/// 0. +int __asan_report_present(void); + +/// Gets the PC (program counter) register value of an ASan error (useful for +/// calling from the debugger). +/// +/// Returns PC if an error has been (or is being) reported. +/// Otherwise returns 0. +/// +/// \returns PC value. +void *__asan_get_report_pc(void); + +/// Gets the BP (base pointer) register value of an ASan error (useful for +/// calling from the debugger). +/// +/// Returns BP if an error has been (or is being) reported. +/// Otherwise returns 0. +/// +/// \returns BP value. +void *__asan_get_report_bp(void); + +/// Gets the SP (stack pointer) register value of an ASan error (useful for +/// calling from the debugger). +/// +/// If an error has been (or is being) reported, returns SP. +/// Otherwise returns 0. +/// +/// \returns SP value. +void *__asan_get_report_sp(void); + +/// Gets the address of the report buffer of an ASan error (useful for calling +/// from the debugger). +/// +/// Returns the address of the report buffer if an error has been (or is being) +/// reported. Otherwise returns 0. +/// +/// \returns Address of report buffer. +void *__asan_get_report_address(void); + +/// Gets access type of an ASan error (useful for calling from the debugger). +/// +/// Returns access type (read or write) if an error has been (or is being) +/// reported. Otherwise returns 0. +/// +/// \returns Access type (0 = read, 1 = write). +int __asan_get_report_access_type(void); + +/// Gets access size of an ASan error (useful for calling from the debugger). +/// +/// Returns access size if an error has been (or is being) reported. Otherwise +/// returns 0. +/// +/// \returns Access size in bytes. +size_t __asan_get_report_access_size(void); + +/// Gets the bug description of an ASan error (useful for calling from a +/// debugger). +/// +/// \returns Returns a bug description if an error has been (or is being) +/// reported - for example, "heap-use-after-free". Otherwise returns an empty +/// string. +const char *__asan_get_report_description(void); + +/// Gets information about a pointer (useful for calling from the debugger). +/// +/// Returns the category of the given pointer as a constant string. +/// Possible return values are global, stack, stack-fake, +/// heap, heap-invalid, shadow-low, shadow-gap, +/// shadow-high, and unknown. +/// +/// If the return value is global or stack, tries to also return +/// the variable name, address, and size. If the return value is heap, +/// tries to return the chunk address and size. name should point +/// to an allocated buffer of size name_size. +/// +/// \param addr Address to locate. +/// \param name Buffer to store the variable's name. +/// \param name_size Size in bytes of the variable's name buffer. +/// \param[out] region_address Address of the region. +/// \param[out] region_size Size of the region in bytes. +/// +/// \returns Returns the category of the given pointer as a constant string. +const char *__asan_locate_address(void *addr, char *name, size_t name_size, + void **region_address, size_t *region_size); + +/// Gets the allocation stack trace and thread ID for a heap address (useful +/// for calling from the debugger). +/// +/// Stores up to size frames in trace. Returns +/// the number of stored frames or 0 on error. +/// +/// \param addr A heap address. +/// \param trace A buffer to store the stack trace. +/// \param size Size in bytes of the trace buffer. +/// \param[out] thread_id The thread ID of the address. +/// +/// \returns Returns the number of stored frames or 0 on error. +size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size, + int *thread_id); + +/// Gets the free stack trace and thread ID for a heap address (useful for +/// calling from the debugger). +/// +/// Stores up to size frames in trace. Returns +/// the number of stored frames or 0 on error. +/// +/// \param addr A heap address. +/// \param trace A buffer to store the stack trace. +/// \param size Size in bytes of the trace buffer. +/// \param[out] thread_id The thread ID of the address. +/// +/// \returns Returns the number of stored frames or 0 on error. +size_t __asan_get_free_stack(void *addr, void **trace, size_t size, + int *thread_id); + +/// Gets the current shadow memory mapping (useful for calling from the +/// debugger). +/// +/// \param[out] shadow_scale Shadow scale value. +/// \param[out] shadow_offset Offset value. +void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset); + +/// This is an internal function that is called to report an error. However, +/// it is still a part of the interface because you might want to set a +/// breakpoint on this function in the debugger. +/// +/// \param pc pc value of the ASan error. +/// \param bp bp value of the ASan error. +/// \param sp sp value of the ASan error. +/// \param addr Address of the ASan error. +/// \param is_write True if the error is a write error; false otherwise. +/// \param access_size Size of the memory access of the ASan error. +void __asan_report_error(void *pc, void *bp, void *sp, + void *addr, int is_write, size_t access_size); + +// Deprecated. Call __sanitizer_set_death_callback instead. +void __asan_set_death_callback(void (*callback)(void)); + +/// Sets the callback function to be called during ASan error reporting. +/// +/// The callback provides a string pointer to the report. +/// +/// \param callback User-provided function. +void __asan_set_error_report_callback(void (*callback)(const char *)); + +/// User-provided callback on ASan errors. +/// +/// You can provide a function that would be called immediately when ASan +/// detects an error. This is useful in cases when ASan detects an error but +/// your program crashes before the ASan report is printed. +void __asan_on_error(void); + +/// Prints accumulated statistics to stderr (useful for calling from the +/// debugger). +void __asan_print_accumulated_stats(void); + +/// User-provided default option settings. +/// +/// You can provide your own implementation of this function to return a string +/// containing ASan runtime options (for example, +/// verbosity=1:halt_on_error=0). +/// +/// \returns Default options string. +const char* __asan_default_options(void); + +// The following two functions facilitate garbage collection in presence of +// ASan's fake stack. + +/// Gets an opaque handler to the current thread's fake stack. +/// +/// Returns an opaque handler to be used by +/// __asan_addr_is_in_fake_stack(). Returns NULL if the current thread +/// does not have a fake stack. +/// +/// \returns An opaque handler to the fake stack or NULL. +void *__asan_get_current_fake_stack(void); + +/// Checks if an address belongs to a given fake stack. +/// +/// If fake_stack is non-NULL and addr belongs to a +/// fake frame in fake_stack, returns the address of the real +/// stack that corresponds to the fake frame and sets beg and +/// end to the boundaries of this fake frame. Otherwise returns +/// NULL and does not touch beg and end. +/// +/// If beg or end are NULL, they are not touched. +/// +/// \note This function can be called from a thread other than the owner of +/// fake_stack, but the owner thread needs to be alive. +/// +/// \param fake_stack An opaque handler to a fake stack. +/// \param addr Address to test. +/// \param[out] beg Beginning of fake frame. +/// \param[out] end End of fake frame. +/// \returns Stack address or NULL. +void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, + void **end); + +/// Performs shadow memory cleanup of the current thread's stack before a +/// function marked with the [[noreturn]] attribute is called. +/// +/// To avoid false positives on the stack, must be called before no-return +/// functions like _exit() and execl(). +void __asan_handle_no_return(void); + +/// Update allocation stack trace for the given allocation to the current stack +/// trace. Returns 1 if successfull, 0 if not. +int __asan_update_allocation_context(void* addr); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_ASAN_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/common_interface_defs.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/common_interface_defs.h new file mode 100644 index 0000000..cd69285 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/common_interface_defs.h @@ -0,0 +1,357 @@ +//===-- sanitizer/common_interface_defs.h -----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Common part of the public sanitizer interface. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_COMMON_INTERFACE_DEFS_H +#define SANITIZER_COMMON_INTERFACE_DEFS_H + +#include +#include + +// GCC does not understand __has_feature. +#if !defined(__has_feature) +#define __has_feature(x) 0 +#endif + +#ifdef __cplusplus +extern "C" { +#endif +// Arguments for __sanitizer_sandbox_on_notify() below. +typedef struct { + // Enable sandbox support in sanitizer coverage. + int coverage_sandboxed; + // File descriptor to write coverage data to. If -1 is passed, a file will + // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no + // effect if coverage_sandboxed == 0. + intptr_t coverage_fd; + // If non-zero, split the coverage data into well-formed blocks. This is + // useful when coverage_fd is a socket descriptor. Each block will contain + // a header, allowing data from multiple processes to be sent over the same + // socket. + unsigned int coverage_max_block_size; +} __sanitizer_sandbox_arguments; + +// Tell the tools to write their reports to "path." instead of stderr. +void __sanitizer_set_report_path(const char *path); +// Tell the tools to write their reports to the provided file descriptor +// (casted to void *). +void __sanitizer_set_report_fd(void *fd); +// Get the current full report file path, if a path was specified by +// an earlier call to __sanitizer_set_report_path. Returns null otherwise. +const char *__sanitizer_get_report_path(); + +// Notify the tools that the sandbox is going to be turned on. The reserved +// parameter will be used in the future to hold a structure with functions +// that the tools may call to bypass the sandbox. +void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args); + +// This function is called by the tool when it has just finished reporting +// an error. 'error_summary' is a one-line string that summarizes +// the error message. This function can be overridden by the client. +void __sanitizer_report_error_summary(const char *error_summary); + +// Some of the sanitizers (for example ASan/TSan) could miss bugs that happen +// in unaligned loads/stores. To find such bugs reliably, you need to replace +// plain unaligned loads/stores with these calls. + +/// Loads a 16-bit unaligned value. +/// +/// \param p Pointer to unaligned memory. +/// +/// \returns Loaded value. +uint16_t __sanitizer_unaligned_load16(const void *p); + +/// Loads a 32-bit unaligned value. +/// +/// \param p Pointer to unaligned memory. +/// +/// \returns Loaded value. +uint32_t __sanitizer_unaligned_load32(const void *p); + +/// Loads a 64-bit unaligned value. +/// +/// \param p Pointer to unaligned memory. +/// +/// \returns Loaded value. +uint64_t __sanitizer_unaligned_load64(const void *p); + +/// Stores a 16-bit unaligned value. +/// +/// \param p Pointer to unaligned memory. +/// \param x 16-bit value to store. +void __sanitizer_unaligned_store16(void *p, uint16_t x); + +/// Stores a 32-bit unaligned value. +/// +/// \param p Pointer to unaligned memory. +/// \param x 32-bit value to store. +void __sanitizer_unaligned_store32(void *p, uint32_t x); + +/// Stores a 64-bit unaligned value. +/// +/// \param p Pointer to unaligned memory. +/// \param x 64-bit value to store. +void __sanitizer_unaligned_store64(void *p, uint64_t x); + +// Returns 1 on the first call, then returns 0 thereafter. Called by the tool +// to ensure only one report is printed when multiple errors occur +// simultaneously. +int __sanitizer_acquire_crash_state(); + +/// Annotates the current state of a contiguous container, such as +/// std::vector, std::string, or similar. +/// +/// A contiguous container is a container that keeps all of its elements +/// in a contiguous region of memory. The container owns the region of memory +/// [beg, end); the memory [beg, mid) is used to store the +/// current elements, and the memory [mid, end) is reserved for future +/// elements (beg <= mid <= end). For example, in +/// std::vector<> v: +/// +/// \code +/// beg = &v[0]; +/// end = beg + v.capacity() * sizeof(v[0]); +/// mid = beg + v.size() * sizeof(v[0]); +/// \endcode +/// +/// This annotation tells the Sanitizer tool about the current state of the +/// container so that the tool can report errors when memory from +/// [mid, end) is accessed. Insert this annotation into methods like +/// push_back() or pop_back(). Supply the old and new values of +/// mid(old_mid and new_mid). In the initial +/// state mid == end, so that should be the final state when the +/// container is destroyed or when the container reallocates the storage. +/// +/// For ASan, beg should be 8-aligned and end +/// should be either 8-aligned or it should point to the end of a separate +/// heap-, stack-, or global-allocated buffer. So the following example will +/// not work: +/// +/// \code +/// int64_t x[2]; // 16 bytes, 8-aligned +/// char *beg = (char *)&x[0]; +/// char *end = beg + 12; // Not 8-aligned, not the end of the buffer +/// \endcode +/// +/// The following, however, will work: +/// \code +/// int32_t x[3]; // 12 bytes, but 8-aligned under ASan. +/// char *beg = (char*)&x[0]; +/// char *end = beg + 12; // Not 8-aligned, but is the end of the buffer +/// \endcode +/// +/// \note Use this function with caution and do not use for anything other +/// than vector-like classes. +/// +/// \param beg Beginning of memory region. +/// \param end End of memory region. +/// \param old_mid Old middle of memory region. +/// \param new_mid New middle of memory region. +void __sanitizer_annotate_contiguous_container(const void *beg, + const void *end, + const void *old_mid, + const void *new_mid); + +/// Returns true if the contiguous container [beg, end) is properly +/// poisoned. +/// +/// Proper poisoning could occur, for example, with +/// __sanitizer_annotate_contiguous_container), that is, if +/// [beg, mid) is addressable and [mid, end) is unaddressable. +/// Full verification requires O (end - beg) time; this function tries +/// to avoid such complexity by touching only parts of the container around +/// beg, mid, and end. +/// +/// \param beg Beginning of memory region. +/// \param mid Middle of memory region. +/// \param end Old end of memory region. +/// +/// \returns True if the contiguous container [beg, end) is properly +/// poisoned. +int __sanitizer_verify_contiguous_container(const void *beg, const void *mid, + const void *end); + +/// Similar to __sanitizer_verify_contiguous_container() but also +/// returns the address of the first improperly poisoned byte. +/// +/// Returns NULL if the area is poisoned properly. +/// +/// \param beg Beginning of memory region. +/// \param mid Middle of memory region. +/// \param end Old end of memory region. +/// +/// \returns The bad address or NULL. +const void *__sanitizer_contiguous_container_find_bad_address(const void *beg, + const void *mid, + const void *end); + +/// Prints the stack trace leading to this call (useful for calling from the +/// debugger). +void __sanitizer_print_stack_trace(void); + +// Symbolizes the supplied 'pc' using the format string 'fmt'. +// Outputs at most 'out_buf_size' bytes into 'out_buf'. +// If 'out_buf' is not empty then output is zero or more non empty C strings +// followed by single empty C string. Multiple strings can be returned if PC +// corresponds to inlined function. Inlined frames are printed in the order +// from "most-inlined" to the "least-inlined", so the last frame should be the +// not inlined function. +// Inlined frames can be removed with 'symbolize_inline_frames=0'. +// The format syntax is described in +// lib/sanitizer_common/sanitizer_stacktrace_printer.h. +void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf, + size_t out_buf_size); +// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals). +void __sanitizer_symbolize_global(void *data_ptr, const char *fmt, + char *out_buf, size_t out_buf_size); + +/// Sets the callback to be called immediately before death on error. +/// +/// Passing 0 will unset the callback. +/// +/// \param callback User-provided callback. +void __sanitizer_set_death_callback(void (*callback)(void)); + + +// Interceptor hooks. +// Whenever a libc function interceptor is called, it checks if the +// corresponding weak hook is defined, and calls it if it is indeed defined. +// The primary use-case is data-flow-guided fuzzing, where the fuzzer needs +// to know what is being passed to libc functions (for example memcmp). +// FIXME: implement more hooks. + +/// Interceptor hook for memcmp(). +/// +/// \param called_pc PC (program counter) address of the original call. +/// \param s1 Pointer to block of memory. +/// \param s2 Pointer to block of memory. +/// \param n Number of bytes to compare. +/// \param result Value returned by the intercepted function. +void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1, + const void *s2, size_t n, int result); + +/// Interceptor hook for strncmp(). +/// +/// \param called_pc PC (program counter) address of the original call. +/// \param s1 Pointer to block of memory. +/// \param s2 Pointer to block of memory. +/// \param n Number of bytes to compare. +/// \param result Value returned by the intercepted function. +void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1, + const char *s2, size_t n, int result); + +/// Interceptor hook for strncasecmp(). +/// +/// \param called_pc PC (program counter) address of the original call. +/// \param s1 Pointer to block of memory. +/// \param s2 Pointer to block of memory. +/// \param n Number of bytes to compare. +/// \param result Value returned by the intercepted function. +void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1, + const char *s2, size_t n, int result); + +/// Interceptor hook for strcmp(). +/// +/// \param called_pc PC (program counter) address of the original call. +/// \param s1 Pointer to block of memory. +/// \param s2 Pointer to block of memory. +/// \param result Value returned by the intercepted function. +void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1, + const char *s2, int result); + +/// Interceptor hook for strcasecmp(). +/// +/// \param called_pc PC (program counter) address of the original call. +/// \param s1 Pointer to block of memory. +/// \param s2 Pointer to block of memory. +/// \param result Value returned by the intercepted function. +void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1, + const char *s2, int result); + +/// Interceptor hook for strstr(). +/// +/// \param called_pc PC (program counter) address of the original call. +/// \param s1 Pointer to block of memory. +/// \param s2 Pointer to block of memory. +/// \param result Value returned by the intercepted function. +void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1, + const char *s2, char *result); + +void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1, + const char *s2, char *result); + +void __sanitizer_weak_hook_memmem(void *called_pc, + const void *s1, size_t len1, + const void *s2, size_t len2, void *result); + +// Prints stack traces for all live heap allocations ordered by total +// allocation size until top_percent of total live heap is shown. top_percent +// should be between 1 and 100. At most max_number_of_contexts contexts +// (stack traces) are printed. +// Experimental feature currently available only with ASan on Linux/x86_64. +void __sanitizer_print_memory_profile(size_t top_percent, + size_t max_number_of_contexts); + +/// Notify ASan that a fiber switch has started (required only if implementing +/// your own fiber library). +/// +/// Before switching to a different stack, you must call +/// __sanitizer_start_switch_fiber() with a pointer to the bottom of the +/// destination stack and with its size. When code starts running on the new +/// stack, it must call __sanitizer_finish_switch_fiber() to finalize +/// the switch. The __sanitizer_start_switch_fiber() function takes a +/// void** pointer argument to store the current fake stack if there is +/// one (it is necessary when the runtime option +/// detect_stack_use_after_return is enabled). +/// +/// When restoring a stack, this void** pointer must be given to the +/// __sanitizer_finish_switch_fiber() function. In most cases, this +/// pointer can be stored on the stack immediately before switching. When +/// leaving a fiber definitely, NULL must be passed as the first argument to +/// the __sanitizer_start_switch_fiber() function so that the fake stack +/// is destroyed. If your program does not need stack use-after-return +/// detection, you can always pass NULL to these two functions. +/// +/// \note The fake stack mechanism is disabled during fiber switch, so if a +/// signal callback runs during the switch, it will not benefit from stack +/// use-after-return detection. +/// +/// \param[out] fake_stack_save Fake stack save location. +/// \param bottom Bottom address of stack. +/// \param size Size of stack in bytes. +void __sanitizer_start_switch_fiber(void **fake_stack_save, + const void *bottom, size_t size); + +/// Notify ASan that a fiber switch has completed (required only if +/// implementing your own fiber library). +/// +/// When code starts running on the new stack, it must call +/// __sanitizer_finish_switch_fiber() to finalize +/// the switch. For usage details, see the description of +/// __sanitizer_start_switch_fiber(). +/// +/// \param fake_stack_save Fake stack save location. +/// \param[out] bottom_old Bottom address of old stack. +/// \param[out] size_old Size of old stack in bytes. +void __sanitizer_finish_switch_fiber(void *fake_stack_save, + const void **bottom_old, + size_t *size_old); + +// Get full module name and calculate pc offset within it. +// Returns 1 if pc belongs to some module, 0 if module was not found. +int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path, + size_t module_path_len, + void **pc_offset); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_COMMON_INTERFACE_DEFS_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/coverage_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/coverage_interface.h new file mode 100644 index 0000000..c063cfe --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/coverage_interface.h @@ -0,0 +1,35 @@ +//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Public interface for sanitizer coverage. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_COVERAG_INTERFACE_H +#define SANITIZER_COVERAG_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + // Record and dump coverage info. + void __sanitizer_cov_dump(void); + + // Clear collected coverage info. + void __sanitizer_cov_reset(void); + + // Dump collected coverage info. Sorts pcs by module into individual .sancov + // files. + void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_COVERAG_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/dfsan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/dfsan_interface.h new file mode 100644 index 0000000..d6209a3 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/dfsan_interface.h @@ -0,0 +1,159 @@ +//===-- dfsan_interface.h -------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataFlowSanitizer. +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef DFSAN_INTERFACE_H +#define DFSAN_INTERFACE_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint8_t dfsan_label; +typedef uint32_t dfsan_origin; + +/// Signature of the callback argument to dfsan_set_write_callback(). +typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count); + +/// Computes the union of \c l1 and \c l2, resulting in a union label. +dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2); + +/// Sets the label for each address in [addr,addr+size) to \c label. +void dfsan_set_label(dfsan_label label, void *addr, size_t size); + +/// Sets the label for each address in [addr,addr+size) to the union of the +/// current label for that address and \c label. +void dfsan_add_label(dfsan_label label, void *addr, size_t size); + +/// Retrieves the label associated with the given data. +/// +/// The type of 'data' is arbitrary. The function accepts a value of any type, +/// which can be truncated or extended (implicitly or explicitly) as necessary. +/// The truncation/extension operations will preserve the label of the original +/// value. +dfsan_label dfsan_get_label(long data); + +/// Retrieves the immediate origin associated with the given data. The returned +/// origin may point to another origin. +/// +/// The type of 'data' is arbitrary. +dfsan_origin dfsan_get_origin(long data); + +/// Retrieves the label associated with the data at the given address. +dfsan_label dfsan_read_label(const void *addr, size_t size); + +/// Returns whether the given label label contains the label elem. +int dfsan_has_label(dfsan_label label, dfsan_label elem); + +/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated +/// with the application memory. Use this call to start over the taint tracking +/// within the same process. +/// +/// Note: If another thread is working with tainted data during the flush, that +/// taint could still be written to shadow after the flush. +void dfsan_flush(void); + +/// Sets a callback to be invoked on calls to write(). The callback is invoked +/// before the write is done. The write is not guaranteed to succeed when the +/// callback executes. Pass in NULL to remove any callback. +void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback); + +/// Interceptor hooks. +/// Whenever a dfsan's custom function is called the corresponding +/// hook is called it non-zero. The hooks should be defined by the user. +/// The primary use case is taint-guided fuzzing, where the fuzzer +/// needs to see the parameters of the function and the labels. +/// FIXME: implement more hooks. +void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2, + size_t n, dfsan_label s1_label, + dfsan_label s2_label, dfsan_label n_label); +void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2, + size_t n, dfsan_label s1_label, + dfsan_label s2_label, dfsan_label n_label); + +/// Prints the origin trace of the label at the address addr to stderr. It also +/// prints description at the beginning of the trace. If origin tracking is not +/// on, or the address is not labeled, it prints nothing. +void dfsan_print_origin_trace(const void *addr, const char *description); + +/// Prints the origin trace of the label at the address \p addr to a +/// pre-allocated output buffer. If origin tracking is not on, or the address is +/// not labeled, it prints nothing. +/// +/// Typical usage: +/// \code +/// char kDescription[] = "..."; +/// char buf[1024]; +/// dfsan_sprint_origin_trace(&tainted_var, kDescription, buf, sizeof(buf)); +/// \endcode +/// +/// Typical usage that handles truncation: +/// \code +/// char buf[1024]; +/// int len = dfsan_sprint_origin_trace(&var, nullptr, buf, sizeof(buf)); +/// +/// if (len < sizeof(buf)) { +/// ProcessOriginTrace(buf); +/// } else { +/// char *tmpbuf = new char[len + 1]; +/// dfsan_sprint_origin_trace(&var, nullptr, tmpbuf, len + 1); +/// ProcessOriginTrace(tmpbuf); +/// delete[] tmpbuf; +/// } +/// \endcode +/// +/// \param addr The tainted memory address whose origin we are printing. +/// \param description A description printed at the beginning of the trace. +/// \param [out] out_buf The output buffer to write the results to. +/// \param out_buf_size The size of \p out_buf. +/// +/// \returns The number of symbols that should have been written to \p out_buf +/// (not including trailing null byte '\0'). Thus, the string is truncated iff +/// return value is not less than \p out_buf_size. +size_t dfsan_sprint_origin_trace(const void *addr, const char *description, + char *out_buf, size_t out_buf_size); + +/// Prints the stack trace leading to this call to a pre-allocated output +/// buffer. +/// +/// For usage examples, see dfsan_sprint_origin_trace. +/// +/// \param [out] out_buf The output buffer to write the results to. +/// \param out_buf_size The size of \p out_buf. +/// +/// \returns The number of symbols that should have been written to \p out_buf +/// (not including trailing null byte '\0'). Thus, the string is truncated iff +/// return value is not less than \p out_buf_size. +size_t dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size); + +/// Retrieves the very first origin associated with the data at the given +/// address. +dfsan_origin dfsan_get_init_origin(const void *addr); + +/// Returns the value of -dfsan-track-origins. +/// * 0: do not track origins. +/// * 1: track origins at memory store operations. +/// * 2: track origins at memory load and store operations. +int dfsan_get_track_origins(void); +#ifdef __cplusplus +} // extern "C" + +template void dfsan_set_label(dfsan_label label, T &data) { + dfsan_set_label(label, (void *)&data, sizeof(T)); +} + +#endif + +#endif // DFSAN_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/hwasan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/hwasan_interface.h new file mode 100644 index 0000000..14035c0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/hwasan_interface.h @@ -0,0 +1,99 @@ +//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_HWASAN_INTERFACE_H +#define SANITIZER_HWASAN_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + // Libc hook for program startup in statically linked executables. + // Initializes enough of the runtime to run instrumented code. This function + // should only be called in statically linked executables because it modifies + // the GOT, which won't work in regular binaries because RELRO will already + // have been applied by the time the function is called. This also means that + // the function should be called before libc applies RELRO. + // Does not call libc unless there is an error. + // Can be called multiple times. + void __hwasan_init_static(void); + + // This function may be optionally provided by user and should return + // a string containing HWASan runtime options. See asan_flags.h for details. + const char* __hwasan_default_options(void); + + void __hwasan_enable_allocator_tagging(void); + void __hwasan_disable_allocator_tagging(void); + + // Mark region of memory with the given tag. Both address and size need to be + // 16-byte aligned. + void __hwasan_tag_memory(const volatile void *p, unsigned char tag, + size_t size); + + /// Set pointer tag. Previous tag is lost. + void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag); + + // Set memory tag from the current SP address to the given address to zero. + // This is meant to annotate longjmp and other non-local jumps. + // This function needs to know the (almost) exact destination frame address; + // clearing shadow for the entire thread stack like __asan_handle_no_return + // does would cause false reports. + void __hwasan_handle_longjmp(const void *sp_dst); + + // Set memory tag for the part of the current thread stack below sp_dst to + // zero. Call this in vfork() before returning in the parent process. + void __hwasan_handle_vfork(const void *sp_dst); + + // Libc hook for thread creation. Should be called in the child thread before + // any instrumented code. + void __hwasan_thread_enter(); + + // Libc hook for thread destruction. No instrumented code should run after + // this call. + void __hwasan_thread_exit(); + + // Print shadow and origin for the memory range to stderr in a human-readable + // format. + void __hwasan_print_shadow(const volatile void *x, size_t size); + + // Print one-line report about the memory usage of the current process. + void __hwasan_print_memory_usage(); + + /* Returns the offset of the first byte in the memory range that can not be + * accessed through the pointer in x, or -1 if the whole range is good. */ + intptr_t __hwasan_test_shadow(const volatile void *x, size_t size); + + /* Sets the callback function to be called during HWASan error reporting. */ + void __hwasan_set_error_report_callback(void (*callback)(const char *)); + + int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size); + void * __sanitizer_memalign(size_t alignment, size_t size); + void * __sanitizer_aligned_alloc(size_t alignment, size_t size); + void * __sanitizer___libc_memalign(size_t alignment, size_t size); + void * __sanitizer_valloc(size_t size); + void * __sanitizer_pvalloc(size_t size); + void __sanitizer_free(void *ptr); + void __sanitizer_cfree(void *ptr); + size_t __sanitizer_malloc_usable_size(const void *ptr); + struct mallinfo __sanitizer_mallinfo(); + int __sanitizer_mallopt(int cmd, int value); + void __sanitizer_malloc_stats(void); + void * __sanitizer_calloc(size_t nmemb, size_t size); + void * __sanitizer_realloc(void *ptr, size_t size); + void * __sanitizer_reallocarray(void *ptr, size_t nmemb, size_t size); + void * __sanitizer_malloc(size_t size); +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_HWASAN_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/linux_syscall_hooks.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/linux_syscall_hooks.h new file mode 100644 index 0000000..3f3f1e7 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/linux_syscall_hooks.h @@ -0,0 +1,3100 @@ +//===-- linux_syscall_hooks.h ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of public sanitizer interface. +// +// System call handlers. +// +// Interface methods declared in this header implement pre- and post- syscall +// actions for the active sanitizer. +// Usage: +// __sanitizer_syscall_pre_getfoo(...args...); +// long res = syscall(__NR_getfoo, ...args...); +// __sanitizer_syscall_post_getfoo(res, ...args...); +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_LINUX_SYSCALL_HOOKS_H +#define SANITIZER_LINUX_SYSCALL_HOOKS_H + +#define __sanitizer_syscall_pre_time(tloc) \ + __sanitizer_syscall_pre_impl_time((long)(tloc)) +#define __sanitizer_syscall_post_time(res, tloc) \ + __sanitizer_syscall_post_impl_time(res, (long)(tloc)) +#define __sanitizer_syscall_pre_stime(tptr) \ + __sanitizer_syscall_pre_impl_stime((long)(tptr)) +#define __sanitizer_syscall_post_stime(res, tptr) \ + __sanitizer_syscall_post_impl_stime(res, (long)(tptr)) +#define __sanitizer_syscall_pre_gettimeofday(tv, tz) \ + __sanitizer_syscall_pre_impl_gettimeofday((long)(tv), (long)(tz)) +#define __sanitizer_syscall_post_gettimeofday(res, tv, tz) \ + __sanitizer_syscall_post_impl_gettimeofday(res, (long)(tv), (long)(tz)) +#define __sanitizer_syscall_pre_settimeofday(tv, tz) \ + __sanitizer_syscall_pre_impl_settimeofday((long)(tv), (long)(tz)) +#define __sanitizer_syscall_post_settimeofday(res, tv, tz) \ + __sanitizer_syscall_post_impl_settimeofday(res, (long)(tv), (long)(tz)) +#define __sanitizer_syscall_pre_adjtimex(txc_p) \ + __sanitizer_syscall_pre_impl_adjtimex((long)(txc_p)) +#define __sanitizer_syscall_post_adjtimex(res, txc_p) \ + __sanitizer_syscall_post_impl_adjtimex(res, (long)(txc_p)) +#define __sanitizer_syscall_pre_times(tbuf) \ + __sanitizer_syscall_pre_impl_times((long)(tbuf)) +#define __sanitizer_syscall_post_times(res, tbuf) \ + __sanitizer_syscall_post_impl_times(res, (long)(tbuf)) +#define __sanitizer_syscall_pre_gettid() __sanitizer_syscall_pre_impl_gettid() +#define __sanitizer_syscall_post_gettid(res) \ + __sanitizer_syscall_post_impl_gettid(res) +#define __sanitizer_syscall_pre_nanosleep(rqtp, rmtp) \ + __sanitizer_syscall_pre_impl_nanosleep((long)(rqtp), (long)(rmtp)) +#define __sanitizer_syscall_post_nanosleep(res, rqtp, rmtp) \ + __sanitizer_syscall_post_impl_nanosleep(res, (long)(rqtp), (long)(rmtp)) +#define __sanitizer_syscall_pre_alarm(seconds) \ + __sanitizer_syscall_pre_impl_alarm((long)(seconds)) +#define __sanitizer_syscall_post_alarm(res, seconds) \ + __sanitizer_syscall_post_impl_alarm(res, (long)(seconds)) +#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid() +#define __sanitizer_syscall_post_getpid(res) \ + __sanitizer_syscall_post_impl_getpid(res) +#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid() +#define __sanitizer_syscall_post_getppid(res) \ + __sanitizer_syscall_post_impl_getppid(res) +#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid() +#define __sanitizer_syscall_post_getuid(res) \ + __sanitizer_syscall_post_impl_getuid(res) +#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid() +#define __sanitizer_syscall_post_geteuid(res) \ + __sanitizer_syscall_post_impl_geteuid(res) +#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid() +#define __sanitizer_syscall_post_getgid(res) \ + __sanitizer_syscall_post_impl_getgid(res) +#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid() +#define __sanitizer_syscall_post_getegid(res) \ + __sanitizer_syscall_post_impl_getegid(res) +#define __sanitizer_syscall_pre_getresuid(ruid, euid, suid) \ + __sanitizer_syscall_pre_impl_getresuid((long)(ruid), (long)(euid), \ + (long)(suid)) +#define __sanitizer_syscall_post_getresuid(res, ruid, euid, suid) \ + __sanitizer_syscall_post_impl_getresuid(res, (long)(ruid), (long)(euid), \ + (long)(suid)) +#define __sanitizer_syscall_pre_getresgid(rgid, egid, sgid) \ + __sanitizer_syscall_pre_impl_getresgid((long)(rgid), (long)(egid), \ + (long)(sgid)) +#define __sanitizer_syscall_post_getresgid(res, rgid, egid, sgid) \ + __sanitizer_syscall_post_impl_getresgid(res, (long)(rgid), (long)(egid), \ + (long)(sgid)) +#define __sanitizer_syscall_pre_getpgid(pid) \ + __sanitizer_syscall_pre_impl_getpgid((long)(pid)) +#define __sanitizer_syscall_post_getpgid(res, pid) \ + __sanitizer_syscall_post_impl_getpgid(res, (long)(pid)) +#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp() +#define __sanitizer_syscall_post_getpgrp(res) \ + __sanitizer_syscall_post_impl_getpgrp(res) +#define __sanitizer_syscall_pre_getsid(pid) \ + __sanitizer_syscall_pre_impl_getsid((long)(pid)) +#define __sanitizer_syscall_post_getsid(res, pid) \ + __sanitizer_syscall_post_impl_getsid(res, (long)(pid)) +#define __sanitizer_syscall_pre_getgroups(gidsetsize, grouplist) \ + __sanitizer_syscall_pre_impl_getgroups((long)(gidsetsize), (long)(grouplist)) +#define __sanitizer_syscall_post_getgroups(res, gidsetsize, grouplist) \ + __sanitizer_syscall_post_impl_getgroups(res, (long)(gidsetsize), \ + (long)(grouplist)) +#define __sanitizer_syscall_pre_setregid(rgid, egid) \ + __sanitizer_syscall_pre_impl_setregid((long)(rgid), (long)(egid)) +#define __sanitizer_syscall_post_setregid(res, rgid, egid) \ + __sanitizer_syscall_post_impl_setregid(res, (long)(rgid), (long)(egid)) +#define __sanitizer_syscall_pre_setgid(gid) \ + __sanitizer_syscall_pre_impl_setgid((long)(gid)) +#define __sanitizer_syscall_post_setgid(res, gid) \ + __sanitizer_syscall_post_impl_setgid(res, (long)(gid)) +#define __sanitizer_syscall_pre_setreuid(ruid, euid) \ + __sanitizer_syscall_pre_impl_setreuid((long)(ruid), (long)(euid)) +#define __sanitizer_syscall_post_setreuid(res, ruid, euid) \ + __sanitizer_syscall_post_impl_setreuid(res, (long)(ruid), (long)(euid)) +#define __sanitizer_syscall_pre_setuid(uid) \ + __sanitizer_syscall_pre_impl_setuid((long)(uid)) +#define __sanitizer_syscall_post_setuid(res, uid) \ + __sanitizer_syscall_post_impl_setuid(res, (long)(uid)) +#define __sanitizer_syscall_pre_setresuid(ruid, euid, suid) \ + __sanitizer_syscall_pre_impl_setresuid((long)(ruid), (long)(euid), \ + (long)(suid)) +#define __sanitizer_syscall_post_setresuid(res, ruid, euid, suid) \ + __sanitizer_syscall_post_impl_setresuid(res, (long)(ruid), (long)(euid), \ + (long)(suid)) +#define __sanitizer_syscall_pre_setresgid(rgid, egid, sgid) \ + __sanitizer_syscall_pre_impl_setresgid((long)(rgid), (long)(egid), \ + (long)(sgid)) +#define __sanitizer_syscall_post_setresgid(res, rgid, egid, sgid) \ + __sanitizer_syscall_post_impl_setresgid(res, (long)(rgid), (long)(egid), \ + (long)(sgid)) +#define __sanitizer_syscall_pre_setfsuid(uid) \ + __sanitizer_syscall_pre_impl_setfsuid((long)(uid)) +#define __sanitizer_syscall_post_setfsuid(res, uid) \ + __sanitizer_syscall_post_impl_setfsuid(res, (long)(uid)) +#define __sanitizer_syscall_pre_setfsgid(gid) \ + __sanitizer_syscall_pre_impl_setfsgid((long)(gid)) +#define __sanitizer_syscall_post_setfsgid(res, gid) \ + __sanitizer_syscall_post_impl_setfsgid(res, (long)(gid)) +#define __sanitizer_syscall_pre_setpgid(pid, pgid) \ + __sanitizer_syscall_pre_impl_setpgid((long)(pid), (long)(pgid)) +#define __sanitizer_syscall_post_setpgid(res, pid, pgid) \ + __sanitizer_syscall_post_impl_setpgid(res, (long)(pid), (long)(pgid)) +#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid() +#define __sanitizer_syscall_post_setsid(res) \ + __sanitizer_syscall_post_impl_setsid(res) +#define __sanitizer_syscall_pre_setgroups(gidsetsize, grouplist) \ + __sanitizer_syscall_pre_impl_setgroups((long)(gidsetsize), (long)(grouplist)) +#define __sanitizer_syscall_post_setgroups(res, gidsetsize, grouplist) \ + __sanitizer_syscall_post_impl_setgroups(res, (long)(gidsetsize), \ + (long)(grouplist)) +#define __sanitizer_syscall_pre_acct(name) \ + __sanitizer_syscall_pre_impl_acct((long)(name)) +#define __sanitizer_syscall_post_acct(res, name) \ + __sanitizer_syscall_post_impl_acct(res, (long)(name)) +#define __sanitizer_syscall_pre_capget(header, dataptr) \ + __sanitizer_syscall_pre_impl_capget((long)(header), (long)(dataptr)) +#define __sanitizer_syscall_post_capget(res, header, dataptr) \ + __sanitizer_syscall_post_impl_capget(res, (long)(header), (long)(dataptr)) +#define __sanitizer_syscall_pre_capset(header, data) \ + __sanitizer_syscall_pre_impl_capset((long)(header), (long)(data)) +#define __sanitizer_syscall_post_capset(res, header, data) \ + __sanitizer_syscall_post_impl_capset(res, (long)(header), (long)(data)) +#define __sanitizer_syscall_pre_personality(personality) \ + __sanitizer_syscall_pre_impl_personality((long)(personality)) +#define __sanitizer_syscall_post_personality(res, personality) \ + __sanitizer_syscall_post_impl_personality(res, (long)(personality)) +#define __sanitizer_syscall_pre_sigpending(set) \ + __sanitizer_syscall_pre_impl_sigpending((long)(set)) +#define __sanitizer_syscall_post_sigpending(res, set) \ + __sanitizer_syscall_post_impl_sigpending(res, (long)(set)) +#define __sanitizer_syscall_pre_sigprocmask(how, set, oset) \ + __sanitizer_syscall_pre_impl_sigprocmask((long)(how), (long)(set), \ + (long)(oset)) +#define __sanitizer_syscall_post_sigprocmask(res, how, set, oset) \ + __sanitizer_syscall_post_impl_sigprocmask(res, (long)(how), (long)(set), \ + (long)(oset)) +#define __sanitizer_syscall_pre_getitimer(which, value) \ + __sanitizer_syscall_pre_impl_getitimer((long)(which), (long)(value)) +#define __sanitizer_syscall_post_getitimer(res, which, value) \ + __sanitizer_syscall_post_impl_getitimer(res, (long)(which), (long)(value)) +#define __sanitizer_syscall_pre_setitimer(which, value, ovalue) \ + __sanitizer_syscall_pre_impl_setitimer((long)(which), (long)(value), \ + (long)(ovalue)) +#define __sanitizer_syscall_post_setitimer(res, which, value, ovalue) \ + __sanitizer_syscall_post_impl_setitimer(res, (long)(which), (long)(value), \ + (long)(ovalue)) +#define __sanitizer_syscall_pre_timer_create(which_clock, timer_event_spec, \ + created_timer_id) \ + __sanitizer_syscall_pre_impl_timer_create( \ + (long)(which_clock), (long)(timer_event_spec), (long)(created_timer_id)) +#define __sanitizer_syscall_post_timer_create( \ + res, which_clock, timer_event_spec, created_timer_id) \ + __sanitizer_syscall_post_impl_timer_create(res, (long)(which_clock), \ + (long)(timer_event_spec), \ + (long)(created_timer_id)) +#define __sanitizer_syscall_pre_timer_gettime(timer_id, setting) \ + __sanitizer_syscall_pre_impl_timer_gettime((long)(timer_id), (long)(setting)) +#define __sanitizer_syscall_post_timer_gettime(res, timer_id, setting) \ + __sanitizer_syscall_post_impl_timer_gettime(res, (long)(timer_id), \ + (long)(setting)) +#define __sanitizer_syscall_pre_timer_getoverrun(timer_id) \ + __sanitizer_syscall_pre_impl_timer_getoverrun((long)(timer_id)) +#define __sanitizer_syscall_post_timer_getoverrun(res, timer_id) \ + __sanitizer_syscall_post_impl_timer_getoverrun(res, (long)(timer_id)) +#define __sanitizer_syscall_pre_timer_settime(timer_id, flags, new_setting, \ + old_setting) \ + __sanitizer_syscall_pre_impl_timer_settime((long)(timer_id), (long)(flags), \ + (long)(new_setting), \ + (long)(old_setting)) +#define __sanitizer_syscall_post_timer_settime(res, timer_id, flags, \ + new_setting, old_setting) \ + __sanitizer_syscall_post_impl_timer_settime( \ + res, (long)(timer_id), (long)(flags), (long)(new_setting), \ + (long)(old_setting)) +#define __sanitizer_syscall_pre_timer_delete(timer_id) \ + __sanitizer_syscall_pre_impl_timer_delete((long)(timer_id)) +#define __sanitizer_syscall_post_timer_delete(res, timer_id) \ + __sanitizer_syscall_post_impl_timer_delete(res, (long)(timer_id)) +#define __sanitizer_syscall_pre_clock_settime(which_clock, tp) \ + __sanitizer_syscall_pre_impl_clock_settime((long)(which_clock), (long)(tp)) +#define __sanitizer_syscall_post_clock_settime(res, which_clock, tp) \ + __sanitizer_syscall_post_impl_clock_settime(res, (long)(which_clock), \ + (long)(tp)) +#define __sanitizer_syscall_pre_clock_gettime(which_clock, tp) \ + __sanitizer_syscall_pre_impl_clock_gettime((long)(which_clock), (long)(tp)) +#define __sanitizer_syscall_post_clock_gettime(res, which_clock, tp) \ + __sanitizer_syscall_post_impl_clock_gettime(res, (long)(which_clock), \ + (long)(tp)) +#define __sanitizer_syscall_pre_clock_adjtime(which_clock, tx) \ + __sanitizer_syscall_pre_impl_clock_adjtime((long)(which_clock), (long)(tx)) +#define __sanitizer_syscall_post_clock_adjtime(res, which_clock, tx) \ + __sanitizer_syscall_post_impl_clock_adjtime(res, (long)(which_clock), \ + (long)(tx)) +#define __sanitizer_syscall_pre_clock_getres(which_clock, tp) \ + __sanitizer_syscall_pre_impl_clock_getres((long)(which_clock), (long)(tp)) +#define __sanitizer_syscall_post_clock_getres(res, which_clock, tp) \ + __sanitizer_syscall_post_impl_clock_getres(res, (long)(which_clock), \ + (long)(tp)) +#define __sanitizer_syscall_pre_clock_nanosleep(which_clock, flags, rqtp, \ + rmtp) \ + __sanitizer_syscall_pre_impl_clock_nanosleep( \ + (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp)) +#define __sanitizer_syscall_post_clock_nanosleep(res, which_clock, flags, \ + rqtp, rmtp) \ + __sanitizer_syscall_post_impl_clock_nanosleep( \ + res, (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp)) +#define __sanitizer_syscall_pre_nice(increment) \ + __sanitizer_syscall_pre_impl_nice((long)(increment)) +#define __sanitizer_syscall_post_nice(res, increment) \ + __sanitizer_syscall_post_impl_nice(res, (long)(increment)) +#define __sanitizer_syscall_pre_sched_setscheduler(pid, policy, param) \ + __sanitizer_syscall_pre_impl_sched_setscheduler((long)(pid), (long)(policy), \ + (long)(param)) +#define __sanitizer_syscall_post_sched_setscheduler(res, pid, policy, param) \ + __sanitizer_syscall_post_impl_sched_setscheduler( \ + res, (long)(pid), (long)(policy), (long)(param)) +#define __sanitizer_syscall_pre_sched_setparam(pid, param) \ + __sanitizer_syscall_pre_impl_sched_setparam((long)(pid), (long)(param)) +#define __sanitizer_syscall_post_sched_setparam(res, pid, param) \ + __sanitizer_syscall_post_impl_sched_setparam(res, (long)(pid), (long)(param)) +#define __sanitizer_syscall_pre_sched_getscheduler(pid) \ + __sanitizer_syscall_pre_impl_sched_getscheduler((long)(pid)) +#define __sanitizer_syscall_post_sched_getscheduler(res, pid) \ + __sanitizer_syscall_post_impl_sched_getscheduler(res, (long)(pid)) +#define __sanitizer_syscall_pre_sched_getparam(pid, param) \ + __sanitizer_syscall_pre_impl_sched_getparam((long)(pid), (long)(param)) +#define __sanitizer_syscall_post_sched_getparam(res, pid, param) \ + __sanitizer_syscall_post_impl_sched_getparam(res, (long)(pid), (long)(param)) +#define __sanitizer_syscall_pre_sched_setaffinity(pid, len, user_mask_ptr) \ + __sanitizer_syscall_pre_impl_sched_setaffinity((long)(pid), (long)(len), \ + (long)(user_mask_ptr)) +#define __sanitizer_syscall_post_sched_setaffinity(res, pid, len, \ + user_mask_ptr) \ + __sanitizer_syscall_post_impl_sched_setaffinity( \ + res, (long)(pid), (long)(len), (long)(user_mask_ptr)) +#define __sanitizer_syscall_pre_sched_getaffinity(pid, len, user_mask_ptr) \ + __sanitizer_syscall_pre_impl_sched_getaffinity((long)(pid), (long)(len), \ + (long)(user_mask_ptr)) +#define __sanitizer_syscall_post_sched_getaffinity(res, pid, len, \ + user_mask_ptr) \ + __sanitizer_syscall_post_impl_sched_getaffinity( \ + res, (long)(pid), (long)(len), (long)(user_mask_ptr)) +#define __sanitizer_syscall_pre_sched_yield() \ + __sanitizer_syscall_pre_impl_sched_yield() +#define __sanitizer_syscall_post_sched_yield(res) \ + __sanitizer_syscall_post_impl_sched_yield(res) +#define __sanitizer_syscall_pre_sched_get_priority_max(policy) \ + __sanitizer_syscall_pre_impl_sched_get_priority_max((long)(policy)) +#define __sanitizer_syscall_post_sched_get_priority_max(res, policy) \ + __sanitizer_syscall_post_impl_sched_get_priority_max(res, (long)(policy)) +#define __sanitizer_syscall_pre_sched_get_priority_min(policy) \ + __sanitizer_syscall_pre_impl_sched_get_priority_min((long)(policy)) +#define __sanitizer_syscall_post_sched_get_priority_min(res, policy) \ + __sanitizer_syscall_post_impl_sched_get_priority_min(res, (long)(policy)) +#define __sanitizer_syscall_pre_sched_rr_get_interval(pid, interval) \ + __sanitizer_syscall_pre_impl_sched_rr_get_interval((long)(pid), \ + (long)(interval)) +#define __sanitizer_syscall_post_sched_rr_get_interval(res, pid, interval) \ + __sanitizer_syscall_post_impl_sched_rr_get_interval(res, (long)(pid), \ + (long)(interval)) +#define __sanitizer_syscall_pre_setpriority(which, who, niceval) \ + __sanitizer_syscall_pre_impl_setpriority((long)(which), (long)(who), \ + (long)(niceval)) +#define __sanitizer_syscall_post_setpriority(res, which, who, niceval) \ + __sanitizer_syscall_post_impl_setpriority(res, (long)(which), (long)(who), \ + (long)(niceval)) +#define __sanitizer_syscall_pre_getpriority(which, who) \ + __sanitizer_syscall_pre_impl_getpriority((long)(which), (long)(who)) +#define __sanitizer_syscall_post_getpriority(res, which, who) \ + __sanitizer_syscall_post_impl_getpriority(res, (long)(which), (long)(who)) +#define __sanitizer_syscall_pre_shutdown(arg0, arg1) \ + __sanitizer_syscall_pre_impl_shutdown((long)(arg0), (long)(arg1)) +#define __sanitizer_syscall_post_shutdown(res, arg0, arg1) \ + __sanitizer_syscall_post_impl_shutdown(res, (long)(arg0), (long)(arg1)) +#define __sanitizer_syscall_pre_reboot(magic1, magic2, cmd, arg) \ + __sanitizer_syscall_pre_impl_reboot((long)(magic1), (long)(magic2), \ + (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_post_reboot(res, magic1, magic2, cmd, arg) \ + __sanitizer_syscall_post_impl_reboot(res, (long)(magic1), (long)(magic2), \ + (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_pre_restart_syscall() \ + __sanitizer_syscall_pre_impl_restart_syscall() +#define __sanitizer_syscall_post_restart_syscall(res) \ + __sanitizer_syscall_post_impl_restart_syscall(res) +#define __sanitizer_syscall_pre_kexec_load(entry, nr_segments, segments, \ + flags) \ + __sanitizer_syscall_pre_impl_kexec_load((long)(entry), (long)(nr_segments), \ + (long)(segments), (long)(flags)) +#define __sanitizer_syscall_post_kexec_load(res, entry, nr_segments, segments, \ + flags) \ + __sanitizer_syscall_post_impl_kexec_load(res, (long)(entry), \ + (long)(nr_segments), \ + (long)(segments), (long)(flags)) +#define __sanitizer_syscall_pre_exit(error_code) \ + __sanitizer_syscall_pre_impl_exit((long)(error_code)) +#define __sanitizer_syscall_post_exit(res, error_code) \ + __sanitizer_syscall_post_impl_exit(res, (long)(error_code)) +#define __sanitizer_syscall_pre_exit_group(error_code) \ + __sanitizer_syscall_pre_impl_exit_group((long)(error_code)) +#define __sanitizer_syscall_post_exit_group(res, error_code) \ + __sanitizer_syscall_post_impl_exit_group(res, (long)(error_code)) +#define __sanitizer_syscall_pre_wait4(pid, stat_addr, options, ru) \ + __sanitizer_syscall_pre_impl_wait4((long)(pid), (long)(stat_addr), \ + (long)(options), (long)(ru)) +#define __sanitizer_syscall_post_wait4(res, pid, stat_addr, options, ru) \ + __sanitizer_syscall_post_impl_wait4(res, (long)(pid), (long)(stat_addr), \ + (long)(options), (long)(ru)) +#define __sanitizer_syscall_pre_waitid(which, pid, infop, options, ru) \ + __sanitizer_syscall_pre_impl_waitid( \ + (long)(which), (long)(pid), (long)(infop), (long)(options), (long)(ru)) +#define __sanitizer_syscall_post_waitid(res, which, pid, infop, options, ru) \ + __sanitizer_syscall_post_impl_waitid(res, (long)(which), (long)(pid), \ + (long)(infop), (long)(options), \ + (long)(ru)) +#define __sanitizer_syscall_pre_waitpid(pid, stat_addr, options) \ + __sanitizer_syscall_pre_impl_waitpid((long)(pid), (long)(stat_addr), \ + (long)(options)) +#define __sanitizer_syscall_post_waitpid(res, pid, stat_addr, options) \ + __sanitizer_syscall_post_impl_waitpid(res, (long)(pid), (long)(stat_addr), \ + (long)(options)) +#define __sanitizer_syscall_pre_set_tid_address(tidptr) \ + __sanitizer_syscall_pre_impl_set_tid_address((long)(tidptr)) +#define __sanitizer_syscall_post_set_tid_address(res, tidptr) \ + __sanitizer_syscall_post_impl_set_tid_address(res, (long)(tidptr)) +#define __sanitizer_syscall_pre_init_module(umod, len, uargs) \ + __sanitizer_syscall_pre_impl_init_module((long)(umod), (long)(len), \ + (long)(uargs)) +#define __sanitizer_syscall_post_init_module(res, umod, len, uargs) \ + __sanitizer_syscall_post_impl_init_module(res, (long)(umod), (long)(len), \ + (long)(uargs)) +#define __sanitizer_syscall_pre_delete_module(name_user, flags) \ + __sanitizer_syscall_pre_impl_delete_module((long)(name_user), (long)(flags)) +#define __sanitizer_syscall_post_delete_module(res, name_user, flags) \ + __sanitizer_syscall_post_impl_delete_module(res, (long)(name_user), \ + (long)(flags)) +#define __sanitizer_syscall_pre_rt_sigprocmask(how, set, oset, sigsetsize) \ + __sanitizer_syscall_pre_impl_rt_sigprocmask( \ + (long)(how), (long)(set), (long)(oset), (long)(sigsetsize)) +#define __sanitizer_syscall_post_rt_sigprocmask(res, how, set, oset, \ + sigsetsize) \ + __sanitizer_syscall_post_impl_rt_sigprocmask( \ + res, (long)(how), (long)(set), (long)(oset), (long)(sigsetsize)) +#define __sanitizer_syscall_pre_rt_sigpending(set, sigsetsize) \ + __sanitizer_syscall_pre_impl_rt_sigpending((long)(set), (long)(sigsetsize)) +#define __sanitizer_syscall_post_rt_sigpending(res, set, sigsetsize) \ + __sanitizer_syscall_post_impl_rt_sigpending(res, (long)(set), \ + (long)(sigsetsize)) +#define __sanitizer_syscall_pre_rt_sigtimedwait(uthese, uinfo, uts, \ + sigsetsize) \ + __sanitizer_syscall_pre_impl_rt_sigtimedwait( \ + (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize)) +#define __sanitizer_syscall_post_rt_sigtimedwait(res, uthese, uinfo, uts, \ + sigsetsize) \ + __sanitizer_syscall_post_impl_rt_sigtimedwait( \ + res, (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize)) +#define __sanitizer_syscall_pre_rt_tgsigqueueinfo(tgid, pid, sig, uinfo) \ + __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo((long)(tgid), (long)(pid), \ + (long)(sig), (long)(uinfo)) +#define __sanitizer_syscall_post_rt_tgsigqueueinfo(res, tgid, pid, sig, uinfo) \ + __sanitizer_syscall_post_impl_rt_tgsigqueueinfo( \ + res, (long)(tgid), (long)(pid), (long)(sig), (long)(uinfo)) +#define __sanitizer_syscall_pre_kill(pid, sig) \ + __sanitizer_syscall_pre_impl_kill((long)(pid), (long)(sig)) +#define __sanitizer_syscall_post_kill(res, pid, sig) \ + __sanitizer_syscall_post_impl_kill(res, (long)(pid), (long)(sig)) +#define __sanitizer_syscall_pre_tgkill(tgid, pid, sig) \ + __sanitizer_syscall_pre_impl_tgkill((long)(tgid), (long)(pid), (long)(sig)) +#define __sanitizer_syscall_post_tgkill(res, tgid, pid, sig) \ + __sanitizer_syscall_post_impl_tgkill(res, (long)(tgid), (long)(pid), \ + (long)(sig)) +#define __sanitizer_syscall_pre_tkill(pid, sig) \ + __sanitizer_syscall_pre_impl_tkill((long)(pid), (long)(sig)) +#define __sanitizer_syscall_post_tkill(res, pid, sig) \ + __sanitizer_syscall_post_impl_tkill(res, (long)(pid), (long)(sig)) +#define __sanitizer_syscall_pre_rt_sigqueueinfo(pid, sig, uinfo) \ + __sanitizer_syscall_pre_impl_rt_sigqueueinfo((long)(pid), (long)(sig), \ + (long)(uinfo)) +#define __sanitizer_syscall_post_rt_sigqueueinfo(res, pid, sig, uinfo) \ + __sanitizer_syscall_post_impl_rt_sigqueueinfo(res, (long)(pid), (long)(sig), \ + (long)(uinfo)) +#define __sanitizer_syscall_pre_sgetmask() \ + __sanitizer_syscall_pre_impl_sgetmask() +#define __sanitizer_syscall_post_sgetmask(res) \ + __sanitizer_syscall_post_impl_sgetmask(res) +#define __sanitizer_syscall_pre_ssetmask(newmask) \ + __sanitizer_syscall_pre_impl_ssetmask((long)(newmask)) +#define __sanitizer_syscall_post_ssetmask(res, newmask) \ + __sanitizer_syscall_post_impl_ssetmask(res, (long)(newmask)) +#define __sanitizer_syscall_pre_signal(sig, handler) \ + __sanitizer_syscall_pre_impl_signal((long)(sig), (long)(handler)) +#define __sanitizer_syscall_post_signal(res, sig, handler) \ + __sanitizer_syscall_post_impl_signal(res, (long)(sig), (long)(handler)) +#define __sanitizer_syscall_pre_pause() __sanitizer_syscall_pre_impl_pause() +#define __sanitizer_syscall_post_pause(res) \ + __sanitizer_syscall_post_impl_pause(res) +#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync() +#define __sanitizer_syscall_post_sync(res) \ + __sanitizer_syscall_post_impl_sync(res) +#define __sanitizer_syscall_pre_fsync(fd) \ + __sanitizer_syscall_pre_impl_fsync((long)(fd)) +#define __sanitizer_syscall_post_fsync(res, fd) \ + __sanitizer_syscall_post_impl_fsync(res, (long)(fd)) +#define __sanitizer_syscall_pre_fdatasync(fd) \ + __sanitizer_syscall_pre_impl_fdatasync((long)(fd)) +#define __sanitizer_syscall_post_fdatasync(res, fd) \ + __sanitizer_syscall_post_impl_fdatasync(res, (long)(fd)) +#define __sanitizer_syscall_pre_bdflush(func, data) \ + __sanitizer_syscall_pre_impl_bdflush((long)(func), (long)(data)) +#define __sanitizer_syscall_post_bdflush(res, func, data) \ + __sanitizer_syscall_post_impl_bdflush(res, (long)(func), (long)(data)) +#define __sanitizer_syscall_pre_mount(dev_name, dir_name, type, flags, data) \ + __sanitizer_syscall_pre_impl_mount((long)(dev_name), (long)(dir_name), \ + (long)(type), (long)(flags), \ + (long)(data)) +#define __sanitizer_syscall_post_mount(res, dev_name, dir_name, type, flags, \ + data) \ + __sanitizer_syscall_post_impl_mount(res, (long)(dev_name), (long)(dir_name), \ + (long)(type), (long)(flags), \ + (long)(data)) +#define __sanitizer_syscall_pre_umount(name, flags) \ + __sanitizer_syscall_pre_impl_umount((long)(name), (long)(flags)) +#define __sanitizer_syscall_post_umount(res, name, flags) \ + __sanitizer_syscall_post_impl_umount(res, (long)(name), (long)(flags)) +#define __sanitizer_syscall_pre_oldumount(name) \ + __sanitizer_syscall_pre_impl_oldumount((long)(name)) +#define __sanitizer_syscall_post_oldumount(res, name) \ + __sanitizer_syscall_post_impl_oldumount(res, (long)(name)) +#define __sanitizer_syscall_pre_truncate(path, length) \ + __sanitizer_syscall_pre_impl_truncate((long)(path), (long)(length)) +#define __sanitizer_syscall_post_truncate(res, path, length) \ + __sanitizer_syscall_post_impl_truncate(res, (long)(path), (long)(length)) +#define __sanitizer_syscall_pre_ftruncate(fd, length) \ + __sanitizer_syscall_pre_impl_ftruncate((long)(fd), (long)(length)) +#define __sanitizer_syscall_post_ftruncate(res, fd, length) \ + __sanitizer_syscall_post_impl_ftruncate(res, (long)(fd), (long)(length)) +#define __sanitizer_syscall_pre_stat(filename, statbuf) \ + __sanitizer_syscall_pre_impl_stat((long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_post_stat(res, filename, statbuf) \ + __sanitizer_syscall_post_impl_stat(res, (long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_pre_statfs(path, buf) \ + __sanitizer_syscall_pre_impl_statfs((long)(path), (long)(buf)) +#define __sanitizer_syscall_post_statfs(res, path, buf) \ + __sanitizer_syscall_post_impl_statfs(res, (long)(path), (long)(buf)) +#define __sanitizer_syscall_pre_statfs64(path, sz, buf) \ + __sanitizer_syscall_pre_impl_statfs64((long)(path), (long)(sz), (long)(buf)) +#define __sanitizer_syscall_post_statfs64(res, path, sz, buf) \ + __sanitizer_syscall_post_impl_statfs64(res, (long)(path), (long)(sz), \ + (long)(buf)) +#define __sanitizer_syscall_pre_fstatfs(fd, buf) \ + __sanitizer_syscall_pre_impl_fstatfs((long)(fd), (long)(buf)) +#define __sanitizer_syscall_post_fstatfs(res, fd, buf) \ + __sanitizer_syscall_post_impl_fstatfs(res, (long)(fd), (long)(buf)) +#define __sanitizer_syscall_pre_fstatfs64(fd, sz, buf) \ + __sanitizer_syscall_pre_impl_fstatfs64((long)(fd), (long)(sz), (long)(buf)) +#define __sanitizer_syscall_post_fstatfs64(res, fd, sz, buf) \ + __sanitizer_syscall_post_impl_fstatfs64(res, (long)(fd), (long)(sz), \ + (long)(buf)) +#define __sanitizer_syscall_pre_lstat(filename, statbuf) \ + __sanitizer_syscall_pre_impl_lstat((long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_post_lstat(res, filename, statbuf) \ + __sanitizer_syscall_post_impl_lstat(res, (long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_pre_fstat(fd, statbuf) \ + __sanitizer_syscall_pre_impl_fstat((long)(fd), (long)(statbuf)) +#define __sanitizer_syscall_post_fstat(res, fd, statbuf) \ + __sanitizer_syscall_post_impl_fstat(res, (long)(fd), (long)(statbuf)) +#define __sanitizer_syscall_pre_newstat(filename, statbuf) \ + __sanitizer_syscall_pre_impl_newstat((long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_post_newstat(res, filename, statbuf) \ + __sanitizer_syscall_post_impl_newstat(res, (long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_pre_newlstat(filename, statbuf) \ + __sanitizer_syscall_pre_impl_newlstat((long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_post_newlstat(res, filename, statbuf) \ + __sanitizer_syscall_post_impl_newlstat(res, (long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_pre_newfstat(fd, statbuf) \ + __sanitizer_syscall_pre_impl_newfstat((long)(fd), (long)(statbuf)) +#define __sanitizer_syscall_post_newfstat(res, fd, statbuf) \ + __sanitizer_syscall_post_impl_newfstat(res, (long)(fd), (long)(statbuf)) +#define __sanitizer_syscall_pre_ustat(dev, ubuf) \ + __sanitizer_syscall_pre_impl_ustat((long)(dev), (long)(ubuf)) +#define __sanitizer_syscall_post_ustat(res, dev, ubuf) \ + __sanitizer_syscall_post_impl_ustat(res, (long)(dev), (long)(ubuf)) +#define __sanitizer_syscall_pre_stat64(filename, statbuf) \ + __sanitizer_syscall_pre_impl_stat64((long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_post_stat64(res, filename, statbuf) \ + __sanitizer_syscall_post_impl_stat64(res, (long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_pre_fstat64(fd, statbuf) \ + __sanitizer_syscall_pre_impl_fstat64((long)(fd), (long)(statbuf)) +#define __sanitizer_syscall_post_fstat64(res, fd, statbuf) \ + __sanitizer_syscall_post_impl_fstat64(res, (long)(fd), (long)(statbuf)) +#define __sanitizer_syscall_pre_lstat64(filename, statbuf) \ + __sanitizer_syscall_pre_impl_lstat64((long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_post_lstat64(res, filename, statbuf) \ + __sanitizer_syscall_post_impl_lstat64(res, (long)(filename), (long)(statbuf)) +#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags) \ + __sanitizer_syscall_pre_impl_setxattr( \ + (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags)) +#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \ + __sanitizer_syscall_post_impl_setxattr(res, (long)(path), (long)(name), \ + (long)(value), (long)(size), \ + (long)(flags)) +#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags) \ + __sanitizer_syscall_pre_impl_lsetxattr( \ + (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags)) +#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size, \ + flags) \ + __sanitizer_syscall_post_impl_lsetxattr(res, (long)(path), (long)(name), \ + (long)(value), (long)(size), \ + (long)(flags)) +#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags) \ + __sanitizer_syscall_pre_impl_fsetxattr( \ + (long)(fd), (long)(name), (long)(value), (long)(size), (long)(flags)) +#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags) \ + __sanitizer_syscall_post_impl_fsetxattr(res, (long)(fd), (long)(name), \ + (long)(value), (long)(size), \ + (long)(flags)) +#define __sanitizer_syscall_pre_getxattr(path, name, value, size) \ + __sanitizer_syscall_pre_impl_getxattr((long)(path), (long)(name), \ + (long)(value), (long)(size)) +#define __sanitizer_syscall_post_getxattr(res, path, name, value, size) \ + __sanitizer_syscall_post_impl_getxattr(res, (long)(path), (long)(name), \ + (long)(value), (long)(size)) +#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size) \ + __sanitizer_syscall_pre_impl_lgetxattr((long)(path), (long)(name), \ + (long)(value), (long)(size)) +#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size) \ + __sanitizer_syscall_post_impl_lgetxattr(res, (long)(path), (long)(name), \ + (long)(value), (long)(size)) +#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size) \ + __sanitizer_syscall_pre_impl_fgetxattr((long)(fd), (long)(name), \ + (long)(value), (long)(size)) +#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size) \ + __sanitizer_syscall_post_impl_fgetxattr(res, (long)(fd), (long)(name), \ + (long)(value), (long)(size)) +#define __sanitizer_syscall_pre_listxattr(path, list, size) \ + __sanitizer_syscall_pre_impl_listxattr((long)(path), (long)(list), \ + (long)(size)) +#define __sanitizer_syscall_post_listxattr(res, path, list, size) \ + __sanitizer_syscall_post_impl_listxattr(res, (long)(path), (long)(list), \ + (long)(size)) +#define __sanitizer_syscall_pre_llistxattr(path, list, size) \ + __sanitizer_syscall_pre_impl_llistxattr((long)(path), (long)(list), \ + (long)(size)) +#define __sanitizer_syscall_post_llistxattr(res, path, list, size) \ + __sanitizer_syscall_post_impl_llistxattr(res, (long)(path), (long)(list), \ + (long)(size)) +#define __sanitizer_syscall_pre_flistxattr(fd, list, size) \ + __sanitizer_syscall_pre_impl_flistxattr((long)(fd), (long)(list), \ + (long)(size)) +#define __sanitizer_syscall_post_flistxattr(res, fd, list, size) \ + __sanitizer_syscall_post_impl_flistxattr(res, (long)(fd), (long)(list), \ + (long)(size)) +#define __sanitizer_syscall_pre_removexattr(path, name) \ + __sanitizer_syscall_pre_impl_removexattr((long)(path), (long)(name)) +#define __sanitizer_syscall_post_removexattr(res, path, name) \ + __sanitizer_syscall_post_impl_removexattr(res, (long)(path), (long)(name)) +#define __sanitizer_syscall_pre_lremovexattr(path, name) \ + __sanitizer_syscall_pre_impl_lremovexattr((long)(path), (long)(name)) +#define __sanitizer_syscall_post_lremovexattr(res, path, name) \ + __sanitizer_syscall_post_impl_lremovexattr(res, (long)(path), (long)(name)) +#define __sanitizer_syscall_pre_fremovexattr(fd, name) \ + __sanitizer_syscall_pre_impl_fremovexattr((long)(fd), (long)(name)) +#define __sanitizer_syscall_post_fremovexattr(res, fd, name) \ + __sanitizer_syscall_post_impl_fremovexattr(res, (long)(fd), (long)(name)) +#define __sanitizer_syscall_pre_brk(brk) \ + __sanitizer_syscall_pre_impl_brk((long)(brk)) +#define __sanitizer_syscall_post_brk(res, brk) \ + __sanitizer_syscall_post_impl_brk(res, (long)(brk)) +#define __sanitizer_syscall_pre_mprotect(start, len, prot) \ + __sanitizer_syscall_pre_impl_mprotect((long)(start), (long)(len), \ + (long)(prot)) +#define __sanitizer_syscall_post_mprotect(res, start, len, prot) \ + __sanitizer_syscall_post_impl_mprotect(res, (long)(start), (long)(len), \ + (long)(prot)) +#define __sanitizer_syscall_pre_mremap(addr, old_len, new_len, flags, \ + new_addr) \ + __sanitizer_syscall_pre_impl_mremap((long)(addr), (long)(old_len), \ + (long)(new_len), (long)(flags), \ + (long)(new_addr)) +#define __sanitizer_syscall_post_mremap(res, addr, old_len, new_len, flags, \ + new_addr) \ + __sanitizer_syscall_post_impl_mremap(res, (long)(addr), (long)(old_len), \ + (long)(new_len), (long)(flags), \ + (long)(new_addr)) +#define __sanitizer_syscall_pre_remap_file_pages(start, size, prot, pgoff, \ + flags) \ + __sanitizer_syscall_pre_impl_remap_file_pages( \ + (long)(start), (long)(size), (long)(prot), (long)(pgoff), (long)(flags)) +#define __sanitizer_syscall_post_remap_file_pages(res, start, size, prot, \ + pgoff, flags) \ + __sanitizer_syscall_post_impl_remap_file_pages(res, (long)(start), \ + (long)(size), (long)(prot), \ + (long)(pgoff), (long)(flags)) +#define __sanitizer_syscall_pre_msync(start, len, flags) \ + __sanitizer_syscall_pre_impl_msync((long)(start), (long)(len), (long)(flags)) +#define __sanitizer_syscall_post_msync(res, start, len, flags) \ + __sanitizer_syscall_post_impl_msync(res, (long)(start), (long)(len), \ + (long)(flags)) +#define __sanitizer_syscall_pre_munmap(addr, len) \ + __sanitizer_syscall_pre_impl_munmap((long)(addr), (long)(len)) +#define __sanitizer_syscall_post_munmap(res, addr, len) \ + __sanitizer_syscall_post_impl_munmap(res, (long)(addr), (long)(len)) +#define __sanitizer_syscall_pre_mlock(start, len) \ + __sanitizer_syscall_pre_impl_mlock((long)(start), (long)(len)) +#define __sanitizer_syscall_post_mlock(res, start, len) \ + __sanitizer_syscall_post_impl_mlock(res, (long)(start), (long)(len)) +#define __sanitizer_syscall_pre_munlock(start, len) \ + __sanitizer_syscall_pre_impl_munlock((long)(start), (long)(len)) +#define __sanitizer_syscall_post_munlock(res, start, len) \ + __sanitizer_syscall_post_impl_munlock(res, (long)(start), (long)(len)) +#define __sanitizer_syscall_pre_mlockall(flags) \ + __sanitizer_syscall_pre_impl_mlockall((long)(flags)) +#define __sanitizer_syscall_post_mlockall(res, flags) \ + __sanitizer_syscall_post_impl_mlockall(res, (long)(flags)) +#define __sanitizer_syscall_pre_munlockall() \ + __sanitizer_syscall_pre_impl_munlockall() +#define __sanitizer_syscall_post_munlockall(res) \ + __sanitizer_syscall_post_impl_munlockall(res) +#define __sanitizer_syscall_pre_madvise(start, len, behavior) \ + __sanitizer_syscall_pre_impl_madvise((long)(start), (long)(len), \ + (long)(behavior)) +#define __sanitizer_syscall_post_madvise(res, start, len, behavior) \ + __sanitizer_syscall_post_impl_madvise(res, (long)(start), (long)(len), \ + (long)(behavior)) +#define __sanitizer_syscall_pre_mincore(start, len, vec) \ + __sanitizer_syscall_pre_impl_mincore((long)(start), (long)(len), (long)(vec)) +#define __sanitizer_syscall_post_mincore(res, start, len, vec) \ + __sanitizer_syscall_post_impl_mincore(res, (long)(start), (long)(len), \ + (long)(vec)) +#define __sanitizer_syscall_pre_pivot_root(new_root, put_old) \ + __sanitizer_syscall_pre_impl_pivot_root((long)(new_root), (long)(put_old)) +#define __sanitizer_syscall_post_pivot_root(res, new_root, put_old) \ + __sanitizer_syscall_post_impl_pivot_root(res, (long)(new_root), \ + (long)(put_old)) +#define __sanitizer_syscall_pre_chroot(filename) \ + __sanitizer_syscall_pre_impl_chroot((long)(filename)) +#define __sanitizer_syscall_post_chroot(res, filename) \ + __sanitizer_syscall_post_impl_chroot(res, (long)(filename)) +#define __sanitizer_syscall_pre_mknod(filename, mode, dev) \ + __sanitizer_syscall_pre_impl_mknod((long)(filename), (long)(mode), \ + (long)(dev)) +#define __sanitizer_syscall_post_mknod(res, filename, mode, dev) \ + __sanitizer_syscall_post_impl_mknod(res, (long)(filename), (long)(mode), \ + (long)(dev)) +#define __sanitizer_syscall_pre_link(oldname, newname) \ + __sanitizer_syscall_pre_impl_link((long)(oldname), (long)(newname)) +#define __sanitizer_syscall_post_link(res, oldname, newname) \ + __sanitizer_syscall_post_impl_link(res, (long)(oldname), (long)(newname)) +#define __sanitizer_syscall_pre_symlink(old, new_) \ + __sanitizer_syscall_pre_impl_symlink((long)(old), (long)(new_)) +#define __sanitizer_syscall_post_symlink(res, old, new_) \ + __sanitizer_syscall_post_impl_symlink(res, (long)(old), (long)(new_)) +#define __sanitizer_syscall_pre_unlink(pathname) \ + __sanitizer_syscall_pre_impl_unlink((long)(pathname)) +#define __sanitizer_syscall_post_unlink(res, pathname) \ + __sanitizer_syscall_post_impl_unlink(res, (long)(pathname)) +#define __sanitizer_syscall_pre_rename(oldname, newname) \ + __sanitizer_syscall_pre_impl_rename((long)(oldname), (long)(newname)) +#define __sanitizer_syscall_post_rename(res, oldname, newname) \ + __sanitizer_syscall_post_impl_rename(res, (long)(oldname), (long)(newname)) +#define __sanitizer_syscall_pre_chmod(filename, mode) \ + __sanitizer_syscall_pre_impl_chmod((long)(filename), (long)(mode)) +#define __sanitizer_syscall_post_chmod(res, filename, mode) \ + __sanitizer_syscall_post_impl_chmod(res, (long)(filename), (long)(mode)) +#define __sanitizer_syscall_pre_fchmod(fd, mode) \ + __sanitizer_syscall_pre_impl_fchmod((long)(fd), (long)(mode)) +#define __sanitizer_syscall_post_fchmod(res, fd, mode) \ + __sanitizer_syscall_post_impl_fchmod(res, (long)(fd), (long)(mode)) +#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \ + __sanitizer_syscall_pre_impl_fcntl((long)(fd), (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg) \ + __sanitizer_syscall_post_impl_fcntl(res, (long)(fd), (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_pre_fcntl64(fd, cmd, arg) \ + __sanitizer_syscall_pre_impl_fcntl64((long)(fd), (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_post_fcntl64(res, fd, cmd, arg) \ + __sanitizer_syscall_post_impl_fcntl64(res, (long)(fd), (long)(cmd), \ + (long)(arg)) +#define __sanitizer_syscall_pre_pipe(fildes) \ + __sanitizer_syscall_pre_impl_pipe((long)(fildes)) +#define __sanitizer_syscall_post_pipe(res, fildes) \ + __sanitizer_syscall_post_impl_pipe(res, (long)(fildes)) +#define __sanitizer_syscall_pre_pipe2(fildes, flags) \ + __sanitizer_syscall_pre_impl_pipe2((long)(fildes), (long)(flags)) +#define __sanitizer_syscall_post_pipe2(res, fildes, flags) \ + __sanitizer_syscall_post_impl_pipe2(res, (long)(fildes), (long)(flags)) +#define __sanitizer_syscall_pre_dup(fildes) \ + __sanitizer_syscall_pre_impl_dup((long)(fildes)) +#define __sanitizer_syscall_post_dup(res, fildes) \ + __sanitizer_syscall_post_impl_dup(res, (long)(fildes)) +#define __sanitizer_syscall_pre_dup2(oldfd, newfd) \ + __sanitizer_syscall_pre_impl_dup2((long)(oldfd), (long)(newfd)) +#define __sanitizer_syscall_post_dup2(res, oldfd, newfd) \ + __sanitizer_syscall_post_impl_dup2(res, (long)(oldfd), (long)(newfd)) +#define __sanitizer_syscall_pre_dup3(oldfd, newfd, flags) \ + __sanitizer_syscall_pre_impl_dup3((long)(oldfd), (long)(newfd), (long)(flags)) +#define __sanitizer_syscall_post_dup3(res, oldfd, newfd, flags) \ + __sanitizer_syscall_post_impl_dup3(res, (long)(oldfd), (long)(newfd), \ + (long)(flags)) +#define __sanitizer_syscall_pre_ioperm(from, num, on) \ + __sanitizer_syscall_pre_impl_ioperm((long)(from), (long)(num), (long)(on)) +#define __sanitizer_syscall_post_ioperm(res, from, num, on) \ + __sanitizer_syscall_post_impl_ioperm(res, (long)(from), (long)(num), \ + (long)(on)) +#define __sanitizer_syscall_pre_ioctl(fd, cmd, arg) \ + __sanitizer_syscall_pre_impl_ioctl((long)(fd), (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_post_ioctl(res, fd, cmd, arg) \ + __sanitizer_syscall_post_impl_ioctl(res, (long)(fd), (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_pre_flock(fd, cmd) \ + __sanitizer_syscall_pre_impl_flock((long)(fd), (long)(cmd)) +#define __sanitizer_syscall_post_flock(res, fd, cmd) \ + __sanitizer_syscall_post_impl_flock(res, (long)(fd), (long)(cmd)) +#define __sanitizer_syscall_pre_io_setup(nr_reqs, ctx) \ + __sanitizer_syscall_pre_impl_io_setup((long)(nr_reqs), (long)(ctx)) +#define __sanitizer_syscall_post_io_setup(res, nr_reqs, ctx) \ + __sanitizer_syscall_post_impl_io_setup(res, (long)(nr_reqs), (long)(ctx)) +#define __sanitizer_syscall_pre_io_destroy(ctx) \ + __sanitizer_syscall_pre_impl_io_destroy((long)(ctx)) +#define __sanitizer_syscall_post_io_destroy(res, ctx) \ + __sanitizer_syscall_post_impl_io_destroy(res, (long)(ctx)) +#define __sanitizer_syscall_pre_io_getevents(ctx_id, min_nr, nr, events, \ + timeout) \ + __sanitizer_syscall_pre_impl_io_getevents((long)(ctx_id), (long)(min_nr), \ + (long)(nr), (long)(events), \ + (long)(timeout)) +#define __sanitizer_syscall_post_io_getevents(res, ctx_id, min_nr, nr, events, \ + timeout) \ + __sanitizer_syscall_post_impl_io_getevents(res, (long)(ctx_id), \ + (long)(min_nr), (long)(nr), \ + (long)(events), (long)(timeout)) +#define __sanitizer_syscall_pre_io_submit(ctx_id, arg1, arg2) \ + __sanitizer_syscall_pre_impl_io_submit((long)(ctx_id), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_post_io_submit(res, ctx_id, arg1, arg2) \ + __sanitizer_syscall_post_impl_io_submit(res, (long)(ctx_id), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_io_cancel(ctx_id, iocb, result) \ + __sanitizer_syscall_pre_impl_io_cancel((long)(ctx_id), (long)(iocb), \ + (long)(result)) +#define __sanitizer_syscall_post_io_cancel(res, ctx_id, iocb, result) \ + __sanitizer_syscall_post_impl_io_cancel(res, (long)(ctx_id), (long)(iocb), \ + (long)(result)) +#define __sanitizer_syscall_pre_sendfile(out_fd, in_fd, offset, count) \ + __sanitizer_syscall_pre_impl_sendfile((long)(out_fd), (long)(in_fd), \ + (long)(offset), (long)(count)) +#define __sanitizer_syscall_post_sendfile(res, out_fd, in_fd, offset, count) \ + __sanitizer_syscall_post_impl_sendfile(res, (long)(out_fd), (long)(in_fd), \ + (long)(offset), (long)(count)) +#define __sanitizer_syscall_pre_sendfile64(out_fd, in_fd, offset, count) \ + __sanitizer_syscall_pre_impl_sendfile64((long)(out_fd), (long)(in_fd), \ + (long)(offset), (long)(count)) +#define __sanitizer_syscall_post_sendfile64(res, out_fd, in_fd, offset, count) \ + __sanitizer_syscall_post_impl_sendfile64(res, (long)(out_fd), (long)(in_fd), \ + (long)(offset), (long)(count)) +#define __sanitizer_syscall_pre_readlink(path, buf, bufsiz) \ + __sanitizer_syscall_pre_impl_readlink((long)(path), (long)(buf), \ + (long)(bufsiz)) +#define __sanitizer_syscall_post_readlink(res, path, buf, bufsiz) \ + __sanitizer_syscall_post_impl_readlink(res, (long)(path), (long)(buf), \ + (long)(bufsiz)) +#define __sanitizer_syscall_pre_creat(pathname, mode) \ + __sanitizer_syscall_pre_impl_creat((long)(pathname), (long)(mode)) +#define __sanitizer_syscall_post_creat(res, pathname, mode) \ + __sanitizer_syscall_post_impl_creat(res, (long)(pathname), (long)(mode)) +#define __sanitizer_syscall_pre_open(filename, flags, mode) \ + __sanitizer_syscall_pre_impl_open((long)(filename), (long)(flags), \ + (long)(mode)) +#define __sanitizer_syscall_post_open(res, filename, flags, mode) \ + __sanitizer_syscall_post_impl_open(res, (long)(filename), (long)(flags), \ + (long)(mode)) +#define __sanitizer_syscall_pre_close(fd) \ + __sanitizer_syscall_pre_impl_close((long)(fd)) +#define __sanitizer_syscall_post_close(res, fd) \ + __sanitizer_syscall_post_impl_close(res, (long)(fd)) +#define __sanitizer_syscall_pre_access(filename, mode) \ + __sanitizer_syscall_pre_impl_access((long)(filename), (long)(mode)) +#define __sanitizer_syscall_post_access(res, filename, mode) \ + __sanitizer_syscall_post_impl_access(res, (long)(filename), (long)(mode)) +#define __sanitizer_syscall_pre_vhangup() __sanitizer_syscall_pre_impl_vhangup() +#define __sanitizer_syscall_post_vhangup(res) \ + __sanitizer_syscall_post_impl_vhangup(res) +#define __sanitizer_syscall_pre_chown(filename, user, group) \ + __sanitizer_syscall_pre_impl_chown((long)(filename), (long)(user), \ + (long)(group)) +#define __sanitizer_syscall_post_chown(res, filename, user, group) \ + __sanitizer_syscall_post_impl_chown(res, (long)(filename), (long)(user), \ + (long)(group)) +#define __sanitizer_syscall_pre_lchown(filename, user, group) \ + __sanitizer_syscall_pre_impl_lchown((long)(filename), (long)(user), \ + (long)(group)) +#define __sanitizer_syscall_post_lchown(res, filename, user, group) \ + __sanitizer_syscall_post_impl_lchown(res, (long)(filename), (long)(user), \ + (long)(group)) +#define __sanitizer_syscall_pre_fchown(fd, user, group) \ + __sanitizer_syscall_pre_impl_fchown((long)(fd), (long)(user), (long)(group)) +#define __sanitizer_syscall_post_fchown(res, fd, user, group) \ + __sanitizer_syscall_post_impl_fchown(res, (long)(fd), (long)(user), \ + (long)(group)) +#define __sanitizer_syscall_pre_chown16(filename, user, group) \ + __sanitizer_syscall_pre_impl_chown16((long)(filename), (long)user, \ + (long)group) +#define __sanitizer_syscall_post_chown16(res, filename, user, group) \ + __sanitizer_syscall_post_impl_chown16(res, (long)(filename), (long)user, \ + (long)group) +#define __sanitizer_syscall_pre_lchown16(filename, user, group) \ + __sanitizer_syscall_pre_impl_lchown16((long)(filename), (long)user, \ + (long)group) +#define __sanitizer_syscall_post_lchown16(res, filename, user, group) \ + __sanitizer_syscall_post_impl_lchown16(res, (long)(filename), (long)user, \ + (long)group) +#define __sanitizer_syscall_pre_fchown16(fd, user, group) \ + __sanitizer_syscall_pre_impl_fchown16((long)(fd), (long)user, (long)group) +#define __sanitizer_syscall_post_fchown16(res, fd, user, group) \ + __sanitizer_syscall_post_impl_fchown16(res, (long)(fd), (long)user, \ + (long)group) +#define __sanitizer_syscall_pre_setregid16(rgid, egid) \ + __sanitizer_syscall_pre_impl_setregid16((long)rgid, (long)egid) +#define __sanitizer_syscall_post_setregid16(res, rgid, egid) \ + __sanitizer_syscall_post_impl_setregid16(res, (long)rgid, (long)egid) +#define __sanitizer_syscall_pre_setgid16(gid) \ + __sanitizer_syscall_pre_impl_setgid16((long)gid) +#define __sanitizer_syscall_post_setgid16(res, gid) \ + __sanitizer_syscall_post_impl_setgid16(res, (long)gid) +#define __sanitizer_syscall_pre_setreuid16(ruid, euid) \ + __sanitizer_syscall_pre_impl_setreuid16((long)ruid, (long)euid) +#define __sanitizer_syscall_post_setreuid16(res, ruid, euid) \ + __sanitizer_syscall_post_impl_setreuid16(res, (long)ruid, (long)euid) +#define __sanitizer_syscall_pre_setuid16(uid) \ + __sanitizer_syscall_pre_impl_setuid16((long)uid) +#define __sanitizer_syscall_post_setuid16(res, uid) \ + __sanitizer_syscall_post_impl_setuid16(res, (long)uid) +#define __sanitizer_syscall_pre_setresuid16(ruid, euid, suid) \ + __sanitizer_syscall_pre_impl_setresuid16((long)ruid, (long)euid, (long)suid) +#define __sanitizer_syscall_post_setresuid16(res, ruid, euid, suid) \ + __sanitizer_syscall_post_impl_setresuid16(res, (long)ruid, (long)euid, \ + (long)suid) +#define __sanitizer_syscall_pre_getresuid16(ruid, euid, suid) \ + __sanitizer_syscall_pre_impl_getresuid16((long)(ruid), (long)(euid), \ + (long)(suid)) +#define __sanitizer_syscall_post_getresuid16(res, ruid, euid, suid) \ + __sanitizer_syscall_post_impl_getresuid16(res, (long)(ruid), (long)(euid), \ + (long)(suid)) +#define __sanitizer_syscall_pre_setresgid16(rgid, egid, sgid) \ + __sanitizer_syscall_pre_impl_setresgid16((long)rgid, (long)egid, (long)sgid) +#define __sanitizer_syscall_post_setresgid16(res, rgid, egid, sgid) \ + __sanitizer_syscall_post_impl_setresgid16(res, (long)rgid, (long)egid, \ + (long)sgid) +#define __sanitizer_syscall_pre_getresgid16(rgid, egid, sgid) \ + __sanitizer_syscall_pre_impl_getresgid16((long)(rgid), (long)(egid), \ + (long)(sgid)) +#define __sanitizer_syscall_post_getresgid16(res, rgid, egid, sgid) \ + __sanitizer_syscall_post_impl_getresgid16(res, (long)(rgid), (long)(egid), \ + (long)(sgid)) +#define __sanitizer_syscall_pre_setfsuid16(uid) \ + __sanitizer_syscall_pre_impl_setfsuid16((long)uid) +#define __sanitizer_syscall_post_setfsuid16(res, uid) \ + __sanitizer_syscall_post_impl_setfsuid16(res, (long)uid) +#define __sanitizer_syscall_pre_setfsgid16(gid) \ + __sanitizer_syscall_pre_impl_setfsgid16((long)gid) +#define __sanitizer_syscall_post_setfsgid16(res, gid) \ + __sanitizer_syscall_post_impl_setfsgid16(res, (long)gid) +#define __sanitizer_syscall_pre_getgroups16(gidsetsize, grouplist) \ + __sanitizer_syscall_pre_impl_getgroups16((long)(gidsetsize), \ + (long)(grouplist)) +#define __sanitizer_syscall_post_getgroups16(res, gidsetsize, grouplist) \ + __sanitizer_syscall_post_impl_getgroups16(res, (long)(gidsetsize), \ + (long)(grouplist)) +#define __sanitizer_syscall_pre_setgroups16(gidsetsize, grouplist) \ + __sanitizer_syscall_pre_impl_setgroups16((long)(gidsetsize), \ + (long)(grouplist)) +#define __sanitizer_syscall_post_setgroups16(res, gidsetsize, grouplist) \ + __sanitizer_syscall_post_impl_setgroups16(res, (long)(gidsetsize), \ + (long)(grouplist)) +#define __sanitizer_syscall_pre_getuid16() \ + __sanitizer_syscall_pre_impl_getuid16() +#define __sanitizer_syscall_post_getuid16(res) \ + __sanitizer_syscall_post_impl_getuid16(res) +#define __sanitizer_syscall_pre_geteuid16() \ + __sanitizer_syscall_pre_impl_geteuid16() +#define __sanitizer_syscall_post_geteuid16(res) \ + __sanitizer_syscall_post_impl_geteuid16(res) +#define __sanitizer_syscall_pre_getgid16() \ + __sanitizer_syscall_pre_impl_getgid16() +#define __sanitizer_syscall_post_getgid16(res) \ + __sanitizer_syscall_post_impl_getgid16(res) +#define __sanitizer_syscall_pre_getegid16() \ + __sanitizer_syscall_pre_impl_getegid16() +#define __sanitizer_syscall_post_getegid16(res) \ + __sanitizer_syscall_post_impl_getegid16(res) +#define __sanitizer_syscall_pre_utime(filename, times) \ + __sanitizer_syscall_pre_impl_utime((long)(filename), (long)(times)) +#define __sanitizer_syscall_post_utime(res, filename, times) \ + __sanitizer_syscall_post_impl_utime(res, (long)(filename), (long)(times)) +#define __sanitizer_syscall_pre_utimes(filename, utimes) \ + __sanitizer_syscall_pre_impl_utimes((long)(filename), (long)(utimes)) +#define __sanitizer_syscall_post_utimes(res, filename, utimes) \ + __sanitizer_syscall_post_impl_utimes(res, (long)(filename), (long)(utimes)) +#define __sanitizer_syscall_pre_lseek(fd, offset, origin) \ + __sanitizer_syscall_pre_impl_lseek((long)(fd), (long)(offset), (long)(origin)) +#define __sanitizer_syscall_post_lseek(res, fd, offset, origin) \ + __sanitizer_syscall_post_impl_lseek(res, (long)(fd), (long)(offset), \ + (long)(origin)) +#define __sanitizer_syscall_pre_llseek(fd, offset_high, offset_low, result, \ + origin) \ + __sanitizer_syscall_pre_impl_llseek((long)(fd), (long)(offset_high), \ + (long)(offset_low), (long)(result), \ + (long)(origin)) +#define __sanitizer_syscall_post_llseek(res, fd, offset_high, offset_low, \ + result, origin) \ + __sanitizer_syscall_post_impl_llseek(res, (long)(fd), (long)(offset_high), \ + (long)(offset_low), (long)(result), \ + (long)(origin)) +#define __sanitizer_syscall_pre_read(fd, buf, count) \ + __sanitizer_syscall_pre_impl_read((long)(fd), (long)(buf), (long)(count)) +#define __sanitizer_syscall_post_read(res, fd, buf, count) \ + __sanitizer_syscall_post_impl_read(res, (long)(fd), (long)(buf), \ + (long)(count)) +#define __sanitizer_syscall_pre_readv(fd, vec, vlen) \ + __sanitizer_syscall_pre_impl_readv((long)(fd), (long)(vec), (long)(vlen)) +#define __sanitizer_syscall_post_readv(res, fd, vec, vlen) \ + __sanitizer_syscall_post_impl_readv(res, (long)(fd), (long)(vec), \ + (long)(vlen)) +#define __sanitizer_syscall_pre_write(fd, buf, count) \ + __sanitizer_syscall_pre_impl_write((long)(fd), (long)(buf), (long)(count)) +#define __sanitizer_syscall_post_write(res, fd, buf, count) \ + __sanitizer_syscall_post_impl_write(res, (long)(fd), (long)(buf), \ + (long)(count)) +#define __sanitizer_syscall_pre_writev(fd, vec, vlen) \ + __sanitizer_syscall_pre_impl_writev((long)(fd), (long)(vec), (long)(vlen)) +#define __sanitizer_syscall_post_writev(res, fd, vec, vlen) \ + __sanitizer_syscall_post_impl_writev(res, (long)(fd), (long)(vec), \ + (long)(vlen)) + +#ifdef _LP64 +#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos) \ + __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \ + (long)(pos)) +#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos) \ + __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \ + (long)(count), (long)(pos)) +#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos) \ + __sanitizer_syscall_pre_impl_pwrite64((long)(fd), (long)(buf), \ + (long)(count), (long)(pos)) +#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos) \ + __sanitizer_syscall_post_impl_pwrite64(res, (long)(fd), (long)(buf), \ + (long)(count), (long)(pos)) +#else +#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1) \ + __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \ + (long)(pos0), (long)(pos1)) +#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \ + __sanitizer_syscall_post_impl_pread64( \ + res, (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1)) +#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \ + __sanitizer_syscall_pre_impl_pwrite64( \ + (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1)) +#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos0, pos1) \ + __sanitizer_syscall_post_impl_pwrite64( \ + res, (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1)) +#endif + +#define __sanitizer_syscall_pre_preadv(fd, vec, vlen, pos_l, pos_h) \ + __sanitizer_syscall_pre_impl_preadv((long)(fd), (long)(vec), (long)(vlen), \ + (long)(pos_l), (long)(pos_h)) +#define __sanitizer_syscall_post_preadv(res, fd, vec, vlen, pos_l, pos_h) \ + __sanitizer_syscall_post_impl_preadv(res, (long)(fd), (long)(vec), \ + (long)(vlen), (long)(pos_l), \ + (long)(pos_h)) +#define __sanitizer_syscall_pre_pwritev(fd, vec, vlen, pos_l, pos_h) \ + __sanitizer_syscall_pre_impl_pwritev((long)(fd), (long)(vec), (long)(vlen), \ + (long)(pos_l), (long)(pos_h)) +#define __sanitizer_syscall_post_pwritev(res, fd, vec, vlen, pos_l, pos_h) \ + __sanitizer_syscall_post_impl_pwritev(res, (long)(fd), (long)(vec), \ + (long)(vlen), (long)(pos_l), \ + (long)(pos_h)) +#define __sanitizer_syscall_pre_getcwd(buf, size) \ + __sanitizer_syscall_pre_impl_getcwd((long)(buf), (long)(size)) +#define __sanitizer_syscall_post_getcwd(res, buf, size) \ + __sanitizer_syscall_post_impl_getcwd(res, (long)(buf), (long)(size)) +#define __sanitizer_syscall_pre_mkdir(pathname, mode) \ + __sanitizer_syscall_pre_impl_mkdir((long)(pathname), (long)(mode)) +#define __sanitizer_syscall_post_mkdir(res, pathname, mode) \ + __sanitizer_syscall_post_impl_mkdir(res, (long)(pathname), (long)(mode)) +#define __sanitizer_syscall_pre_chdir(filename) \ + __sanitizer_syscall_pre_impl_chdir((long)(filename)) +#define __sanitizer_syscall_post_chdir(res, filename) \ + __sanitizer_syscall_post_impl_chdir(res, (long)(filename)) +#define __sanitizer_syscall_pre_fchdir(fd) \ + __sanitizer_syscall_pre_impl_fchdir((long)(fd)) +#define __sanitizer_syscall_post_fchdir(res, fd) \ + __sanitizer_syscall_post_impl_fchdir(res, (long)(fd)) +#define __sanitizer_syscall_pre_rmdir(pathname) \ + __sanitizer_syscall_pre_impl_rmdir((long)(pathname)) +#define __sanitizer_syscall_post_rmdir(res, pathname) \ + __sanitizer_syscall_post_impl_rmdir(res, (long)(pathname)) +#define __sanitizer_syscall_pre_lookup_dcookie(cookie64, buf, len) \ + __sanitizer_syscall_pre_impl_lookup_dcookie((long)(cookie64), (long)(buf), \ + (long)(len)) +#define __sanitizer_syscall_post_lookup_dcookie(res, cookie64, buf, len) \ + __sanitizer_syscall_post_impl_lookup_dcookie(res, (long)(cookie64), \ + (long)(buf), (long)(len)) +#define __sanitizer_syscall_pre_quotactl(cmd, special, id, addr) \ + __sanitizer_syscall_pre_impl_quotactl((long)(cmd), (long)(special), \ + (long)(id), (long)(addr)) +#define __sanitizer_syscall_post_quotactl(res, cmd, special, id, addr) \ + __sanitizer_syscall_post_impl_quotactl(res, (long)(cmd), (long)(special), \ + (long)(id), (long)(addr)) +#define __sanitizer_syscall_pre_getdents(fd, dirent, count) \ + __sanitizer_syscall_pre_impl_getdents((long)(fd), (long)(dirent), \ + (long)(count)) +#define __sanitizer_syscall_post_getdents(res, fd, dirent, count) \ + __sanitizer_syscall_post_impl_getdents(res, (long)(fd), (long)(dirent), \ + (long)(count)) +#define __sanitizer_syscall_pre_getdents64(fd, dirent, count) \ + __sanitizer_syscall_pre_impl_getdents64((long)(fd), (long)(dirent), \ + (long)(count)) +#define __sanitizer_syscall_post_getdents64(res, fd, dirent, count) \ + __sanitizer_syscall_post_impl_getdents64(res, (long)(fd), (long)(dirent), \ + (long)(count)) +#define __sanitizer_syscall_pre_setsockopt(fd, level, optname, optval, optlen) \ + __sanitizer_syscall_pre_impl_setsockopt((long)(fd), (long)(level), \ + (long)(optname), (long)(optval), \ + (long)(optlen)) +#define __sanitizer_syscall_post_setsockopt(res, fd, level, optname, optval, \ + optlen) \ + __sanitizer_syscall_post_impl_setsockopt(res, (long)(fd), (long)(level), \ + (long)(optname), (long)(optval), \ + (long)(optlen)) +#define __sanitizer_syscall_pre_getsockopt(fd, level, optname, optval, optlen) \ + __sanitizer_syscall_pre_impl_getsockopt((long)(fd), (long)(level), \ + (long)(optname), (long)(optval), \ + (long)(optlen)) +#define __sanitizer_syscall_post_getsockopt(res, fd, level, optname, optval, \ + optlen) \ + __sanitizer_syscall_post_impl_getsockopt(res, (long)(fd), (long)(level), \ + (long)(optname), (long)(optval), \ + (long)(optlen)) +#define __sanitizer_syscall_pre_bind(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_bind((long)(arg0), (long)(arg1), (long)(arg2)) +#define __sanitizer_syscall_post_bind(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_bind(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_connect(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_connect((long)(arg0), (long)(arg1), (long)(arg2)) +#define __sanitizer_syscall_post_connect(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_connect(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_accept(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_accept((long)(arg0), (long)(arg1), (long)(arg2)) +#define __sanitizer_syscall_post_accept(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_accept(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_accept4(arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_pre_impl_accept4((long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3)) +#define __sanitizer_syscall_post_accept4(res, arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_post_impl_accept4(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3)) +#define __sanitizer_syscall_pre_getsockname(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_getsockname((long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_post_getsockname(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_getsockname(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_getpeername(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_getpeername((long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_post_getpeername(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_getpeername(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_send(arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_pre_impl_send((long)(arg0), (long)(arg1), (long)(arg2), \ + (long)(arg3)) +#define __sanitizer_syscall_post_send(res, arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_post_impl_send(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3)) +#define __sanitizer_syscall_pre_sendto(arg0, arg1, arg2, arg3, arg4, arg5) \ + __sanitizer_syscall_pre_impl_sendto((long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_post_sendto(res, arg0, arg1, arg2, arg3, arg4, \ + arg5) \ + __sanitizer_syscall_post_impl_sendto(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_pre_sendmsg(fd, msg, flags) \ + __sanitizer_syscall_pre_impl_sendmsg((long)(fd), (long)(msg), (long)(flags)) +#define __sanitizer_syscall_post_sendmsg(res, fd, msg, flags) \ + __sanitizer_syscall_post_impl_sendmsg(res, (long)(fd), (long)(msg), \ + (long)(flags)) +#define __sanitizer_syscall_pre_sendmmsg(fd, msg, vlen, flags) \ + __sanitizer_syscall_pre_impl_sendmmsg((long)(fd), (long)(msg), (long)(vlen), \ + (long)(flags)) +#define __sanitizer_syscall_post_sendmmsg(res, fd, msg, vlen, flags) \ + __sanitizer_syscall_post_impl_sendmmsg(res, (long)(fd), (long)(msg), \ + (long)(vlen), (long)(flags)) +#define __sanitizer_syscall_pre_recv(arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_pre_impl_recv((long)(arg0), (long)(arg1), (long)(arg2), \ + (long)(arg3)) +#define __sanitizer_syscall_post_recv(res, arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_post_impl_recv(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3)) +#define __sanitizer_syscall_pre_recvfrom(arg0, arg1, arg2, arg3, arg4, arg5) \ + __sanitizer_syscall_pre_impl_recvfrom((long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_post_recvfrom(res, arg0, arg1, arg2, arg3, arg4, \ + arg5) \ + __sanitizer_syscall_post_impl_recvfrom(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_pre_recvmsg(fd, msg, flags) \ + __sanitizer_syscall_pre_impl_recvmsg((long)(fd), (long)(msg), (long)(flags)) +#define __sanitizer_syscall_post_recvmsg(res, fd, msg, flags) \ + __sanitizer_syscall_post_impl_recvmsg(res, (long)(fd), (long)(msg), \ + (long)(flags)) +#define __sanitizer_syscall_pre_recvmmsg(fd, msg, vlen, flags, timeout) \ + __sanitizer_syscall_pre_impl_recvmmsg((long)(fd), (long)(msg), (long)(vlen), \ + (long)(flags), (long)(timeout)) +#define __sanitizer_syscall_post_recvmmsg(res, fd, msg, vlen, flags, timeout) \ + __sanitizer_syscall_post_impl_recvmmsg(res, (long)(fd), (long)(msg), \ + (long)(vlen), (long)(flags), \ + (long)(timeout)) +#define __sanitizer_syscall_pre_socket(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_socket((long)(arg0), (long)(arg1), (long)(arg2)) +#define __sanitizer_syscall_post_socket(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_socket(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_socketpair(arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_pre_impl_socketpair((long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3)) +#define __sanitizer_syscall_post_socketpair(res, arg0, arg1, arg2, arg3) \ + __sanitizer_syscall_post_impl_socketpair(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3)) +#define __sanitizer_syscall_pre_socketcall(call, args) \ + __sanitizer_syscall_pre_impl_socketcall((long)(call), (long)(args)) +#define __sanitizer_syscall_post_socketcall(res, call, args) \ + __sanitizer_syscall_post_impl_socketcall(res, (long)(call), (long)(args)) +#define __sanitizer_syscall_pre_listen(arg0, arg1) \ + __sanitizer_syscall_pre_impl_listen((long)(arg0), (long)(arg1)) +#define __sanitizer_syscall_post_listen(res, arg0, arg1) \ + __sanitizer_syscall_post_impl_listen(res, (long)(arg0), (long)(arg1)) +#define __sanitizer_syscall_pre_poll(ufds, nfds, timeout) \ + __sanitizer_syscall_pre_impl_poll((long)(ufds), (long)(nfds), (long)(timeout)) +#define __sanitizer_syscall_post_poll(res, ufds, nfds, timeout) \ + __sanitizer_syscall_post_impl_poll(res, (long)(ufds), (long)(nfds), \ + (long)(timeout)) +#define __sanitizer_syscall_pre_select(n, inp, outp, exp, tvp) \ + __sanitizer_syscall_pre_impl_select((long)(n), (long)(inp), (long)(outp), \ + (long)(exp), (long)(tvp)) +#define __sanitizer_syscall_post_select(res, n, inp, outp, exp, tvp) \ + __sanitizer_syscall_post_impl_select(res, (long)(n), (long)(inp), \ + (long)(outp), (long)(exp), (long)(tvp)) +#define __sanitizer_syscall_pre_old_select(arg) \ + __sanitizer_syscall_pre_impl_old_select((long)(arg)) +#define __sanitizer_syscall_post_old_select(res, arg) \ + __sanitizer_syscall_post_impl_old_select(res, (long)(arg)) +#define __sanitizer_syscall_pre_epoll_create(size) \ + __sanitizer_syscall_pre_impl_epoll_create((long)(size)) +#define __sanitizer_syscall_post_epoll_create(res, size) \ + __sanitizer_syscall_post_impl_epoll_create(res, (long)(size)) +#define __sanitizer_syscall_pre_epoll_create1(flags) \ + __sanitizer_syscall_pre_impl_epoll_create1((long)(flags)) +#define __sanitizer_syscall_post_epoll_create1(res, flags) \ + __sanitizer_syscall_post_impl_epoll_create1(res, (long)(flags)) +#define __sanitizer_syscall_pre_epoll_ctl(epfd, op, fd, event) \ + __sanitizer_syscall_pre_impl_epoll_ctl((long)(epfd), (long)(op), (long)(fd), \ + (long)(event)) +#define __sanitizer_syscall_post_epoll_ctl(res, epfd, op, fd, event) \ + __sanitizer_syscall_post_impl_epoll_ctl(res, (long)(epfd), (long)(op), \ + (long)(fd), (long)(event)) +#define __sanitizer_syscall_pre_epoll_wait(epfd, events, maxevents, timeout) \ + __sanitizer_syscall_pre_impl_epoll_wait((long)(epfd), (long)(events), \ + (long)(maxevents), (long)(timeout)) +#define __sanitizer_syscall_post_epoll_wait(res, epfd, events, maxevents, \ + timeout) \ + __sanitizer_syscall_post_impl_epoll_wait(res, (long)(epfd), (long)(events), \ + (long)(maxevents), (long)(timeout)) +#define __sanitizer_syscall_pre_epoll_pwait(epfd, events, maxevents, timeout, \ + sigmask, sigsetsize) \ + __sanitizer_syscall_pre_impl_epoll_pwait( \ + (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \ + (long)(sigmask), (long)(sigsetsize)) +#define __sanitizer_syscall_post_epoll_pwait(res, epfd, events, maxevents, \ + timeout, sigmask, sigsetsize) \ + __sanitizer_syscall_post_impl_epoll_pwait( \ + res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \ + (long)(sigmask), (long)(sigsetsize)) +#define __sanitizer_syscall_pre_epoll_pwait2(epfd, events, maxevents, timeout, \ + sigmask, sigsetsize) \ + __sanitizer_syscall_pre_impl_epoll_pwait2( \ + (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \ + (long)(sigmask), (long)(sigsetsize)) +#define __sanitizer_syscall_post_epoll_pwait2(res, epfd, events, maxevents, \ + timeout, sigmask, sigsetsize) \ + __sanitizer_syscall_post_impl_epoll_pwait2( \ + res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \ + (long)(sigmask), (long)(sigsetsize)) +#define __sanitizer_syscall_pre_gethostname(name, len) \ + __sanitizer_syscall_pre_impl_gethostname((long)(name), (long)(len)) +#define __sanitizer_syscall_post_gethostname(res, name, len) \ + __sanitizer_syscall_post_impl_gethostname(res, (long)(name), (long)(len)) +#define __sanitizer_syscall_pre_sethostname(name, len) \ + __sanitizer_syscall_pre_impl_sethostname((long)(name), (long)(len)) +#define __sanitizer_syscall_post_sethostname(res, name, len) \ + __sanitizer_syscall_post_impl_sethostname(res, (long)(name), (long)(len)) +#define __sanitizer_syscall_pre_setdomainname(name, len) \ + __sanitizer_syscall_pre_impl_setdomainname((long)(name), (long)(len)) +#define __sanitizer_syscall_post_setdomainname(res, name, len) \ + __sanitizer_syscall_post_impl_setdomainname(res, (long)(name), (long)(len)) +#define __sanitizer_syscall_pre_newuname(name) \ + __sanitizer_syscall_pre_impl_newuname((long)(name)) +#define __sanitizer_syscall_post_newuname(res, name) \ + __sanitizer_syscall_post_impl_newuname(res, (long)(name)) +#define __sanitizer_syscall_pre_uname(arg0) \ + __sanitizer_syscall_pre_impl_uname((long)(arg0)) +#define __sanitizer_syscall_post_uname(res, arg0) \ + __sanitizer_syscall_post_impl_uname(res, (long)(arg0)) +#define __sanitizer_syscall_pre_olduname(arg0) \ + __sanitizer_syscall_pre_impl_olduname((long)(arg0)) +#define __sanitizer_syscall_post_olduname(res, arg0) \ + __sanitizer_syscall_post_impl_olduname(res, (long)(arg0)) +#define __sanitizer_syscall_pre_getrlimit(resource, rlim) \ + __sanitizer_syscall_pre_impl_getrlimit((long)(resource), (long)(rlim)) +#define __sanitizer_syscall_post_getrlimit(res, resource, rlim) \ + __sanitizer_syscall_post_impl_getrlimit(res, (long)(resource), (long)(rlim)) +#define __sanitizer_syscall_pre_old_getrlimit(resource, rlim) \ + __sanitizer_syscall_pre_impl_old_getrlimit((long)(resource), (long)(rlim)) +#define __sanitizer_syscall_post_old_getrlimit(res, resource, rlim) \ + __sanitizer_syscall_post_impl_old_getrlimit(res, (long)(resource), \ + (long)(rlim)) +#define __sanitizer_syscall_pre_setrlimit(resource, rlim) \ + __sanitizer_syscall_pre_impl_setrlimit((long)(resource), (long)(rlim)) +#define __sanitizer_syscall_post_setrlimit(res, resource, rlim) \ + __sanitizer_syscall_post_impl_setrlimit(res, (long)(resource), (long)(rlim)) +#define __sanitizer_syscall_pre_prlimit64(pid, resource, new_rlim, old_rlim) \ + __sanitizer_syscall_pre_impl_prlimit64((long)(pid), (long)(resource), \ + (long)(new_rlim), (long)(old_rlim)) +#define __sanitizer_syscall_post_prlimit64(res, pid, resource, new_rlim, \ + old_rlim) \ + __sanitizer_syscall_post_impl_prlimit64(res, (long)(pid), (long)(resource), \ + (long)(new_rlim), (long)(old_rlim)) +#define __sanitizer_syscall_pre_getrusage(who, ru) \ + __sanitizer_syscall_pre_impl_getrusage((long)(who), (long)(ru)) +#define __sanitizer_syscall_post_getrusage(res, who, ru) \ + __sanitizer_syscall_post_impl_getrusage(res, (long)(who), (long)(ru)) +#define __sanitizer_syscall_pre_umask(mask) \ + __sanitizer_syscall_pre_impl_umask((long)(mask)) +#define __sanitizer_syscall_post_umask(res, mask) \ + __sanitizer_syscall_post_impl_umask(res, (long)(mask)) +#define __sanitizer_syscall_pre_msgget(key, msgflg) \ + __sanitizer_syscall_pre_impl_msgget((long)(key), (long)(msgflg)) +#define __sanitizer_syscall_post_msgget(res, key, msgflg) \ + __sanitizer_syscall_post_impl_msgget(res, (long)(key), (long)(msgflg)) +#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg) \ + __sanitizer_syscall_pre_impl_msgsnd((long)(msqid), (long)(msgp), \ + (long)(msgsz), (long)(msgflg)) +#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg) \ + __sanitizer_syscall_post_impl_msgsnd(res, (long)(msqid), (long)(msgp), \ + (long)(msgsz), (long)(msgflg)) +#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg) \ + __sanitizer_syscall_pre_impl_msgrcv((long)(msqid), (long)(msgp), \ + (long)(msgsz), (long)(msgtyp), \ + (long)(msgflg)) +#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp, \ + msgflg) \ + __sanitizer_syscall_post_impl_msgrcv(res, (long)(msqid), (long)(msgp), \ + (long)(msgsz), (long)(msgtyp), \ + (long)(msgflg)) +#define __sanitizer_syscall_pre_msgctl(msqid, cmd, buf) \ + __sanitizer_syscall_pre_impl_msgctl((long)(msqid), (long)(cmd), (long)(buf)) +#define __sanitizer_syscall_post_msgctl(res, msqid, cmd, buf) \ + __sanitizer_syscall_post_impl_msgctl(res, (long)(msqid), (long)(cmd), \ + (long)(buf)) +#define __sanitizer_syscall_pre_semget(key, nsems, semflg) \ + __sanitizer_syscall_pre_impl_semget((long)(key), (long)(nsems), \ + (long)(semflg)) +#define __sanitizer_syscall_post_semget(res, key, nsems, semflg) \ + __sanitizer_syscall_post_impl_semget(res, (long)(key), (long)(nsems), \ + (long)(semflg)) +#define __sanitizer_syscall_pre_semop(semid, sops, nsops) \ + __sanitizer_syscall_pre_impl_semop((long)(semid), (long)(sops), (long)(nsops)) +#define __sanitizer_syscall_post_semop(res, semid, sops, nsops) \ + __sanitizer_syscall_post_impl_semop(res, (long)(semid), (long)(sops), \ + (long)(nsops)) +#define __sanitizer_syscall_pre_semctl(semid, semnum, cmd, arg) \ + __sanitizer_syscall_pre_impl_semctl((long)(semid), (long)(semnum), \ + (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_post_semctl(res, semid, semnum, cmd, arg) \ + __sanitizer_syscall_post_impl_semctl(res, (long)(semid), (long)(semnum), \ + (long)(cmd), (long)(arg)) +#define __sanitizer_syscall_pre_semtimedop(semid, sops, nsops, timeout) \ + __sanitizer_syscall_pre_impl_semtimedop((long)(semid), (long)(sops), \ + (long)(nsops), (long)(timeout)) +#define __sanitizer_syscall_post_semtimedop(res, semid, sops, nsops, timeout) \ + __sanitizer_syscall_post_impl_semtimedop(res, (long)(semid), (long)(sops), \ + (long)(nsops), (long)(timeout)) +#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg) \ + __sanitizer_syscall_pre_impl_shmat((long)(shmid), (long)(shmaddr), \ + (long)(shmflg)) +#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg) \ + __sanitizer_syscall_post_impl_shmat(res, (long)(shmid), (long)(shmaddr), \ + (long)(shmflg)) +#define __sanitizer_syscall_pre_shmget(key, size, flag) \ + __sanitizer_syscall_pre_impl_shmget((long)(key), (long)(size), (long)(flag)) +#define __sanitizer_syscall_post_shmget(res, key, size, flag) \ + __sanitizer_syscall_post_impl_shmget(res, (long)(key), (long)(size), \ + (long)(flag)) +#define __sanitizer_syscall_pre_shmdt(shmaddr) \ + __sanitizer_syscall_pre_impl_shmdt((long)(shmaddr)) +#define __sanitizer_syscall_post_shmdt(res, shmaddr) \ + __sanitizer_syscall_post_impl_shmdt(res, (long)(shmaddr)) +#define __sanitizer_syscall_pre_shmctl(shmid, cmd, buf) \ + __sanitizer_syscall_pre_impl_shmctl((long)(shmid), (long)(cmd), (long)(buf)) +#define __sanitizer_syscall_post_shmctl(res, shmid, cmd, buf) \ + __sanitizer_syscall_post_impl_shmctl(res, (long)(shmid), (long)(cmd), \ + (long)(buf)) +#define __sanitizer_syscall_pre_ipc(call, first, second, third, ptr, fifth) \ + __sanitizer_syscall_pre_impl_ipc((long)(call), (long)(first), \ + (long)(second), (long)(third), (long)(ptr), \ + (long)(fifth)) +#define __sanitizer_syscall_post_ipc(res, call, first, second, third, ptr, \ + fifth) \ + __sanitizer_syscall_post_impl_ipc(res, (long)(call), (long)(first), \ + (long)(second), (long)(third), \ + (long)(ptr), (long)(fifth)) +#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr) \ + __sanitizer_syscall_pre_impl_mq_open((long)(name), (long)(oflag), \ + (long)(mode), (long)(attr)) +#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr) \ + __sanitizer_syscall_post_impl_mq_open(res, (long)(name), (long)(oflag), \ + (long)(mode), (long)(attr)) +#define __sanitizer_syscall_pre_mq_unlink(name) \ + __sanitizer_syscall_pre_impl_mq_unlink((long)(name)) +#define __sanitizer_syscall_post_mq_unlink(res, name) \ + __sanitizer_syscall_post_impl_mq_unlink(res, (long)(name)) +#define __sanitizer_syscall_pre_mq_timedsend(mqdes, msg_ptr, msg_len, \ + msg_prio, abs_timeout) \ + __sanitizer_syscall_pre_impl_mq_timedsend((long)(mqdes), (long)(msg_ptr), \ + (long)(msg_len), (long)(msg_prio), \ + (long)(abs_timeout)) +#define __sanitizer_syscall_post_mq_timedsend(res, mqdes, msg_ptr, msg_len, \ + msg_prio, abs_timeout) \ + __sanitizer_syscall_post_impl_mq_timedsend( \ + res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \ + (long)(abs_timeout)) +#define __sanitizer_syscall_pre_mq_timedreceive(mqdes, msg_ptr, msg_len, \ + msg_prio, abs_timeout) \ + __sanitizer_syscall_pre_impl_mq_timedreceive( \ + (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \ + (long)(abs_timeout)) +#define __sanitizer_syscall_post_mq_timedreceive(res, mqdes, msg_ptr, msg_len, \ + msg_prio, abs_timeout) \ + __sanitizer_syscall_post_impl_mq_timedreceive( \ + res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \ + (long)(abs_timeout)) +#define __sanitizer_syscall_pre_mq_notify(mqdes, notification) \ + __sanitizer_syscall_pre_impl_mq_notify((long)(mqdes), (long)(notification)) +#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification) \ + __sanitizer_syscall_post_impl_mq_notify(res, (long)(mqdes), \ + (long)(notification)) +#define __sanitizer_syscall_pre_mq_getsetattr(mqdes, mqstat, omqstat) \ + __sanitizer_syscall_pre_impl_mq_getsetattr((long)(mqdes), (long)(mqstat), \ + (long)(omqstat)) +#define __sanitizer_syscall_post_mq_getsetattr(res, mqdes, mqstat, omqstat) \ + __sanitizer_syscall_post_impl_mq_getsetattr(res, (long)(mqdes), \ + (long)(mqstat), (long)(omqstat)) +#define __sanitizer_syscall_pre_pciconfig_iobase(which, bus, devfn) \ + __sanitizer_syscall_pre_impl_pciconfig_iobase((long)(which), (long)(bus), \ + (long)(devfn)) +#define __sanitizer_syscall_post_pciconfig_iobase(res, which, bus, devfn) \ + __sanitizer_syscall_post_impl_pciconfig_iobase(res, (long)(which), \ + (long)(bus), (long)(devfn)) +#define __sanitizer_syscall_pre_pciconfig_read(bus, dfn, off, len, buf) \ + __sanitizer_syscall_pre_impl_pciconfig_read( \ + (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf)) +#define __sanitizer_syscall_post_pciconfig_read(res, bus, dfn, off, len, buf) \ + __sanitizer_syscall_post_impl_pciconfig_read( \ + res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf)) +#define __sanitizer_syscall_pre_pciconfig_write(bus, dfn, off, len, buf) \ + __sanitizer_syscall_pre_impl_pciconfig_write( \ + (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf)) +#define __sanitizer_syscall_post_pciconfig_write(res, bus, dfn, off, len, buf) \ + __sanitizer_syscall_post_impl_pciconfig_write( \ + res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf)) +#define __sanitizer_syscall_pre_swapon(specialfile, swap_flags) \ + __sanitizer_syscall_pre_impl_swapon((long)(specialfile), (long)(swap_flags)) +#define __sanitizer_syscall_post_swapon(res, specialfile, swap_flags) \ + __sanitizer_syscall_post_impl_swapon(res, (long)(specialfile), \ + (long)(swap_flags)) +#define __sanitizer_syscall_pre_swapoff(specialfile) \ + __sanitizer_syscall_pre_impl_swapoff((long)(specialfile)) +#define __sanitizer_syscall_post_swapoff(res, specialfile) \ + __sanitizer_syscall_post_impl_swapoff(res, (long)(specialfile)) +#define __sanitizer_syscall_pre_sysctl(args) \ + __sanitizer_syscall_pre_impl_sysctl((long)(args)) +#define __sanitizer_syscall_post_sysctl(res, args) \ + __sanitizer_syscall_post_impl_sysctl(res, (long)(args)) +#define __sanitizer_syscall_pre_sysinfo(info) \ + __sanitizer_syscall_pre_impl_sysinfo((long)(info)) +#define __sanitizer_syscall_post_sysinfo(res, info) \ + __sanitizer_syscall_post_impl_sysinfo(res, (long)(info)) +#define __sanitizer_syscall_pre_sysfs(option, arg1, arg2) \ + __sanitizer_syscall_pre_impl_sysfs((long)(option), (long)(arg1), (long)(arg2)) +#define __sanitizer_syscall_post_sysfs(res, option, arg1, arg2) \ + __sanitizer_syscall_post_impl_sysfs(res, (long)(option), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_syslog(type, buf, len) \ + __sanitizer_syscall_pre_impl_syslog((long)(type), (long)(buf), (long)(len)) +#define __sanitizer_syscall_post_syslog(res, type, buf, len) \ + __sanitizer_syscall_post_impl_syslog(res, (long)(type), (long)(buf), \ + (long)(len)) +#define __sanitizer_syscall_pre_uselib(library) \ + __sanitizer_syscall_pre_impl_uselib((long)(library)) +#define __sanitizer_syscall_post_uselib(res, library) \ + __sanitizer_syscall_post_impl_uselib(res, (long)(library)) +#define __sanitizer_syscall_pre_ni_syscall() \ + __sanitizer_syscall_pre_impl_ni_syscall() +#define __sanitizer_syscall_post_ni_syscall(res) \ + __sanitizer_syscall_post_impl_ni_syscall(res) +#define __sanitizer_syscall_pre_ptrace(request, pid, addr, data) \ + __sanitizer_syscall_pre_impl_ptrace((long)(request), (long)(pid), \ + (long)(addr), (long)(data)) +#define __sanitizer_syscall_post_ptrace(res, request, pid, addr, data) \ + __sanitizer_syscall_post_impl_ptrace(res, (long)(request), (long)(pid), \ + (long)(addr), (long)(data)) +#define __sanitizer_syscall_pre_add_key(_type, _description, _payload, plen, \ + destringid) \ + __sanitizer_syscall_pre_impl_add_key((long)(_type), (long)(_description), \ + (long)(_payload), (long)(plen), \ + (long)(destringid)) +#define __sanitizer_syscall_post_add_key(res, _type, _description, _payload, \ + plen, destringid) \ + __sanitizer_syscall_post_impl_add_key( \ + res, (long)(_type), (long)(_description), (long)(_payload), \ + (long)(plen), (long)(destringid)) +#define __sanitizer_syscall_pre_request_key(_type, _description, \ + _callout_info, destringid) \ + __sanitizer_syscall_pre_impl_request_key( \ + (long)(_type), (long)(_description), (long)(_callout_info), \ + (long)(destringid)) +#define __sanitizer_syscall_post_request_key(res, _type, _description, \ + _callout_info, destringid) \ + __sanitizer_syscall_post_impl_request_key( \ + res, (long)(_type), (long)(_description), (long)(_callout_info), \ + (long)(destringid)) +#define __sanitizer_syscall_pre_keyctl(cmd, arg2, arg3, arg4, arg5) \ + __sanitizer_syscall_pre_impl_keyctl((long)(cmd), (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_post_keyctl(res, cmd, arg2, arg3, arg4, arg5) \ + __sanitizer_syscall_post_impl_keyctl(res, (long)(cmd), (long)(arg2), \ + (long)(arg3), (long)(arg4), \ + (long)(arg5)) +#define __sanitizer_syscall_pre_ioprio_set(which, who, ioprio) \ + __sanitizer_syscall_pre_impl_ioprio_set((long)(which), (long)(who), \ + (long)(ioprio)) +#define __sanitizer_syscall_post_ioprio_set(res, which, who, ioprio) \ + __sanitizer_syscall_post_impl_ioprio_set(res, (long)(which), (long)(who), \ + (long)(ioprio)) +#define __sanitizer_syscall_pre_ioprio_get(which, who) \ + __sanitizer_syscall_pre_impl_ioprio_get((long)(which), (long)(who)) +#define __sanitizer_syscall_post_ioprio_get(res, which, who) \ + __sanitizer_syscall_post_impl_ioprio_get(res, (long)(which), (long)(who)) +#define __sanitizer_syscall_pre_set_mempolicy(mode, nmask, maxnode) \ + __sanitizer_syscall_pre_impl_set_mempolicy((long)(mode), (long)(nmask), \ + (long)(maxnode)) +#define __sanitizer_syscall_post_set_mempolicy(res, mode, nmask, maxnode) \ + __sanitizer_syscall_post_impl_set_mempolicy(res, (long)(mode), \ + (long)(nmask), (long)(maxnode)) +#define __sanitizer_syscall_pre_migrate_pages(pid, maxnode, from, to) \ + __sanitizer_syscall_pre_impl_migrate_pages((long)(pid), (long)(maxnode), \ + (long)(from), (long)(to)) +#define __sanitizer_syscall_post_migrate_pages(res, pid, maxnode, from, to) \ + __sanitizer_syscall_post_impl_migrate_pages( \ + res, (long)(pid), (long)(maxnode), (long)(from), (long)(to)) +#define __sanitizer_syscall_pre_move_pages(pid, nr_pages, pages, nodes, \ + status, flags) \ + __sanitizer_syscall_pre_impl_move_pages((long)(pid), (long)(nr_pages), \ + (long)(pages), (long)(nodes), \ + (long)(status), (long)(flags)) +#define __sanitizer_syscall_post_move_pages(res, pid, nr_pages, pages, nodes, \ + status, flags) \ + __sanitizer_syscall_post_impl_move_pages(res, (long)(pid), (long)(nr_pages), \ + (long)(pages), (long)(nodes), \ + (long)(status), (long)(flags)) +#define __sanitizer_syscall_pre_mbind(start, len, mode, nmask, maxnode, flags) \ + __sanitizer_syscall_pre_impl_mbind((long)(start), (long)(len), (long)(mode), \ + (long)(nmask), (long)(maxnode), \ + (long)(flags)) +#define __sanitizer_syscall_post_mbind(res, start, len, mode, nmask, maxnode, \ + flags) \ + __sanitizer_syscall_post_impl_mbind(res, (long)(start), (long)(len), \ + (long)(mode), (long)(nmask), \ + (long)(maxnode), (long)(flags)) +#define __sanitizer_syscall_pre_get_mempolicy(policy, nmask, maxnode, addr, \ + flags) \ + __sanitizer_syscall_pre_impl_get_mempolicy((long)(policy), (long)(nmask), \ + (long)(maxnode), (long)(addr), \ + (long)(flags)) +#define __sanitizer_syscall_post_get_mempolicy(res, policy, nmask, maxnode, \ + addr, flags) \ + __sanitizer_syscall_post_impl_get_mempolicy(res, (long)(policy), \ + (long)(nmask), (long)(maxnode), \ + (long)(addr), (long)(flags)) +#define __sanitizer_syscall_pre_inotify_init() \ + __sanitizer_syscall_pre_impl_inotify_init() +#define __sanitizer_syscall_post_inotify_init(res) \ + __sanitizer_syscall_post_impl_inotify_init(res) +#define __sanitizer_syscall_pre_inotify_init1(flags) \ + __sanitizer_syscall_pre_impl_inotify_init1((long)(flags)) +#define __sanitizer_syscall_post_inotify_init1(res, flags) \ + __sanitizer_syscall_post_impl_inotify_init1(res, (long)(flags)) +#define __sanitizer_syscall_pre_inotify_add_watch(fd, path, mask) \ + __sanitizer_syscall_pre_impl_inotify_add_watch((long)(fd), (long)(path), \ + (long)(mask)) +#define __sanitizer_syscall_post_inotify_add_watch(res, fd, path, mask) \ + __sanitizer_syscall_post_impl_inotify_add_watch(res, (long)(fd), \ + (long)(path), (long)(mask)) +#define __sanitizer_syscall_pre_inotify_rm_watch(fd, wd) \ + __sanitizer_syscall_pre_impl_inotify_rm_watch((long)(fd), (long)(wd)) +#define __sanitizer_syscall_post_inotify_rm_watch(res, fd, wd) \ + __sanitizer_syscall_post_impl_inotify_rm_watch(res, (long)(fd), (long)(wd)) +#define __sanitizer_syscall_pre_spu_run(fd, unpc, ustatus) \ + __sanitizer_syscall_pre_impl_spu_run((long)(fd), (long)(unpc), \ + (long)(ustatus)) +#define __sanitizer_syscall_post_spu_run(res, fd, unpc, ustatus) \ + __sanitizer_syscall_post_impl_spu_run(res, (long)(fd), (long)(unpc), \ + (long)(ustatus)) +#define __sanitizer_syscall_pre_spu_create(name, flags, mode, fd) \ + __sanitizer_syscall_pre_impl_spu_create((long)(name), (long)(flags), \ + (long)(mode), (long)(fd)) +#define __sanitizer_syscall_post_spu_create(res, name, flags, mode, fd) \ + __sanitizer_syscall_post_impl_spu_create(res, (long)(name), (long)(flags), \ + (long)(mode), (long)(fd)) +#define __sanitizer_syscall_pre_mknodat(dfd, filename, mode, dev) \ + __sanitizer_syscall_pre_impl_mknodat((long)(dfd), (long)(filename), \ + (long)(mode), (long)(dev)) +#define __sanitizer_syscall_post_mknodat(res, dfd, filename, mode, dev) \ + __sanitizer_syscall_post_impl_mknodat(res, (long)(dfd), (long)(filename), \ + (long)(mode), (long)(dev)) +#define __sanitizer_syscall_pre_mkdirat(dfd, pathname, mode) \ + __sanitizer_syscall_pre_impl_mkdirat((long)(dfd), (long)(pathname), \ + (long)(mode)) +#define __sanitizer_syscall_post_mkdirat(res, dfd, pathname, mode) \ + __sanitizer_syscall_post_impl_mkdirat(res, (long)(dfd), (long)(pathname), \ + (long)(mode)) +#define __sanitizer_syscall_pre_unlinkat(dfd, pathname, flag) \ + __sanitizer_syscall_pre_impl_unlinkat((long)(dfd), (long)(pathname), \ + (long)(flag)) +#define __sanitizer_syscall_post_unlinkat(res, dfd, pathname, flag) \ + __sanitizer_syscall_post_impl_unlinkat(res, (long)(dfd), (long)(pathname), \ + (long)(flag)) +#define __sanitizer_syscall_pre_symlinkat(oldname, newdfd, newname) \ + __sanitizer_syscall_pre_impl_symlinkat((long)(oldname), (long)(newdfd), \ + (long)(newname)) +#define __sanitizer_syscall_post_symlinkat(res, oldname, newdfd, newname) \ + __sanitizer_syscall_post_impl_symlinkat(res, (long)(oldname), \ + (long)(newdfd), (long)(newname)) +#define __sanitizer_syscall_pre_linkat(olddfd, oldname, newdfd, newname, \ + flags) \ + __sanitizer_syscall_pre_impl_linkat((long)(olddfd), (long)(oldname), \ + (long)(newdfd), (long)(newname), \ + (long)(flags)) +#define __sanitizer_syscall_post_linkat(res, olddfd, oldname, newdfd, newname, \ + flags) \ + __sanitizer_syscall_post_impl_linkat(res, (long)(olddfd), (long)(oldname), \ + (long)(newdfd), (long)(newname), \ + (long)(flags)) +#define __sanitizer_syscall_pre_renameat(olddfd, oldname, newdfd, newname) \ + __sanitizer_syscall_pre_impl_renameat((long)(olddfd), (long)(oldname), \ + (long)(newdfd), (long)(newname)) +#define __sanitizer_syscall_post_renameat(res, olddfd, oldname, newdfd, \ + newname) \ + __sanitizer_syscall_post_impl_renameat(res, (long)(olddfd), (long)(oldname), \ + (long)(newdfd), (long)(newname)) +#define __sanitizer_syscall_pre_futimesat(dfd, filename, utimes) \ + __sanitizer_syscall_pre_impl_futimesat((long)(dfd), (long)(filename), \ + (long)(utimes)) +#define __sanitizer_syscall_post_futimesat(res, dfd, filename, utimes) \ + __sanitizer_syscall_post_impl_futimesat(res, (long)(dfd), (long)(filename), \ + (long)(utimes)) +#define __sanitizer_syscall_pre_faccessat(dfd, filename, mode) \ + __sanitizer_syscall_pre_impl_faccessat((long)(dfd), (long)(filename), \ + (long)(mode)) +#define __sanitizer_syscall_post_faccessat(res, dfd, filename, mode) \ + __sanitizer_syscall_post_impl_faccessat(res, (long)(dfd), (long)(filename), \ + (long)(mode)) +#define __sanitizer_syscall_pre_fchmodat(dfd, filename, mode) \ + __sanitizer_syscall_pre_impl_fchmodat((long)(dfd), (long)(filename), \ + (long)(mode)) +#define __sanitizer_syscall_post_fchmodat(res, dfd, filename, mode) \ + __sanitizer_syscall_post_impl_fchmodat(res, (long)(dfd), (long)(filename), \ + (long)(mode)) +#define __sanitizer_syscall_pre_fchownat(dfd, filename, user, group, flag) \ + __sanitizer_syscall_pre_impl_fchownat((long)(dfd), (long)(filename), \ + (long)(user), (long)(group), \ + (long)(flag)) +#define __sanitizer_syscall_post_fchownat(res, dfd, filename, user, group, \ + flag) \ + __sanitizer_syscall_post_impl_fchownat(res, (long)(dfd), (long)(filename), \ + (long)(user), (long)(group), \ + (long)(flag)) +#define __sanitizer_syscall_pre_openat(dfd, filename, flags, mode) \ + __sanitizer_syscall_pre_impl_openat((long)(dfd), (long)(filename), \ + (long)(flags), (long)(mode)) +#define __sanitizer_syscall_post_openat(res, dfd, filename, flags, mode) \ + __sanitizer_syscall_post_impl_openat(res, (long)(dfd), (long)(filename), \ + (long)(flags), (long)(mode)) +#define __sanitizer_syscall_pre_newfstatat(dfd, filename, statbuf, flag) \ + __sanitizer_syscall_pre_impl_newfstatat((long)(dfd), (long)(filename), \ + (long)(statbuf), (long)(flag)) +#define __sanitizer_syscall_post_newfstatat(res, dfd, filename, statbuf, flag) \ + __sanitizer_syscall_post_impl_newfstatat(res, (long)(dfd), (long)(filename), \ + (long)(statbuf), (long)(flag)) +#define __sanitizer_syscall_pre_fstatat64(dfd, filename, statbuf, flag) \ + __sanitizer_syscall_pre_impl_fstatat64((long)(dfd), (long)(filename), \ + (long)(statbuf), (long)(flag)) +#define __sanitizer_syscall_post_fstatat64(res, dfd, filename, statbuf, flag) \ + __sanitizer_syscall_post_impl_fstatat64(res, (long)(dfd), (long)(filename), \ + (long)(statbuf), (long)(flag)) +#define __sanitizer_syscall_pre_readlinkat(dfd, path, buf, bufsiz) \ + __sanitizer_syscall_pre_impl_readlinkat((long)(dfd), (long)(path), \ + (long)(buf), (long)(bufsiz)) +#define __sanitizer_syscall_post_readlinkat(res, dfd, path, buf, bufsiz) \ + __sanitizer_syscall_post_impl_readlinkat(res, (long)(dfd), (long)(path), \ + (long)(buf), (long)(bufsiz)) +#define __sanitizer_syscall_pre_utimensat(dfd, filename, utimes, flags) \ + __sanitizer_syscall_pre_impl_utimensat((long)(dfd), (long)(filename), \ + (long)(utimes), (long)(flags)) +#define __sanitizer_syscall_post_utimensat(res, dfd, filename, utimes, flags) \ + __sanitizer_syscall_post_impl_utimensat(res, (long)(dfd), (long)(filename), \ + (long)(utimes), (long)(flags)) +#define __sanitizer_syscall_pre_unshare(unshare_flags) \ + __sanitizer_syscall_pre_impl_unshare((long)(unshare_flags)) +#define __sanitizer_syscall_post_unshare(res, unshare_flags) \ + __sanitizer_syscall_post_impl_unshare(res, (long)(unshare_flags)) +#define __sanitizer_syscall_pre_splice(fd_in, off_in, fd_out, off_out, len, \ + flags) \ + __sanitizer_syscall_pre_impl_splice((long)(fd_in), (long)(off_in), \ + (long)(fd_out), (long)(off_out), \ + (long)(len), (long)(flags)) +#define __sanitizer_syscall_post_splice(res, fd_in, off_in, fd_out, off_out, \ + len, flags) \ + __sanitizer_syscall_post_impl_splice(res, (long)(fd_in), (long)(off_in), \ + (long)(fd_out), (long)(off_out), \ + (long)(len), (long)(flags)) +#define __sanitizer_syscall_pre_vmsplice(fd, iov, nr_segs, flags) \ + __sanitizer_syscall_pre_impl_vmsplice((long)(fd), (long)(iov), \ + (long)(nr_segs), (long)(flags)) +#define __sanitizer_syscall_post_vmsplice(res, fd, iov, nr_segs, flags) \ + __sanitizer_syscall_post_impl_vmsplice(res, (long)(fd), (long)(iov), \ + (long)(nr_segs), (long)(flags)) +#define __sanitizer_syscall_pre_tee(fdin, fdout, len, flags) \ + __sanitizer_syscall_pre_impl_tee((long)(fdin), (long)(fdout), (long)(len), \ + (long)(flags)) +#define __sanitizer_syscall_post_tee(res, fdin, fdout, len, flags) \ + __sanitizer_syscall_post_impl_tee(res, (long)(fdin), (long)(fdout), \ + (long)(len), (long)(flags)) +#define __sanitizer_syscall_pre_get_robust_list(pid, head_ptr, len_ptr) \ + __sanitizer_syscall_pre_impl_get_robust_list((long)(pid), (long)(head_ptr), \ + (long)(len_ptr)) +#define __sanitizer_syscall_post_get_robust_list(res, pid, head_ptr, len_ptr) \ + __sanitizer_syscall_post_impl_get_robust_list( \ + res, (long)(pid), (long)(head_ptr), (long)(len_ptr)) +#define __sanitizer_syscall_pre_set_robust_list(head, len) \ + __sanitizer_syscall_pre_impl_set_robust_list((long)(head), (long)(len)) +#define __sanitizer_syscall_post_set_robust_list(res, head, len) \ + __sanitizer_syscall_post_impl_set_robust_list(res, (long)(head), (long)(len)) +#define __sanitizer_syscall_pre_getcpu(cpu, node, cache) \ + __sanitizer_syscall_pre_impl_getcpu((long)(cpu), (long)(node), (long)(cache)) +#define __sanitizer_syscall_post_getcpu(res, cpu, node, cache) \ + __sanitizer_syscall_post_impl_getcpu(res, (long)(cpu), (long)(node), \ + (long)(cache)) +#define __sanitizer_syscall_pre_signalfd(ufd, user_mask, sizemask) \ + __sanitizer_syscall_pre_impl_signalfd((long)(ufd), (long)(user_mask), \ + (long)(sizemask)) +#define __sanitizer_syscall_post_signalfd(res, ufd, user_mask, sizemask) \ + __sanitizer_syscall_post_impl_signalfd(res, (long)(ufd), (long)(user_mask), \ + (long)(sizemask)) +#define __sanitizer_syscall_pre_signalfd4(ufd, user_mask, sizemask, flags) \ + __sanitizer_syscall_pre_impl_signalfd4((long)(ufd), (long)(user_mask), \ + (long)(sizemask), (long)(flags)) +#define __sanitizer_syscall_post_signalfd4(res, ufd, user_mask, sizemask, \ + flags) \ + __sanitizer_syscall_post_impl_signalfd4(res, (long)(ufd), (long)(user_mask), \ + (long)(sizemask), (long)(flags)) +#define __sanitizer_syscall_pre_timerfd_create(clockid, flags) \ + __sanitizer_syscall_pre_impl_timerfd_create((long)(clockid), (long)(flags)) +#define __sanitizer_syscall_post_timerfd_create(res, clockid, flags) \ + __sanitizer_syscall_post_impl_timerfd_create(res, (long)(clockid), \ + (long)(flags)) +#define __sanitizer_syscall_pre_timerfd_settime(ufd, flags, utmr, otmr) \ + __sanitizer_syscall_pre_impl_timerfd_settime((long)(ufd), (long)(flags), \ + (long)(utmr), (long)(otmr)) +#define __sanitizer_syscall_post_timerfd_settime(res, ufd, flags, utmr, otmr) \ + __sanitizer_syscall_post_impl_timerfd_settime( \ + res, (long)(ufd), (long)(flags), (long)(utmr), (long)(otmr)) +#define __sanitizer_syscall_pre_timerfd_gettime(ufd, otmr) \ + __sanitizer_syscall_pre_impl_timerfd_gettime((long)(ufd), (long)(otmr)) +#define __sanitizer_syscall_post_timerfd_gettime(res, ufd, otmr) \ + __sanitizer_syscall_post_impl_timerfd_gettime(res, (long)(ufd), (long)(otmr)) +#define __sanitizer_syscall_pre_eventfd(count) \ + __sanitizer_syscall_pre_impl_eventfd((long)(count)) +#define __sanitizer_syscall_post_eventfd(res, count) \ + __sanitizer_syscall_post_impl_eventfd(res, (long)(count)) +#define __sanitizer_syscall_pre_eventfd2(count, flags) \ + __sanitizer_syscall_pre_impl_eventfd2((long)(count), (long)(flags)) +#define __sanitizer_syscall_post_eventfd2(res, count, flags) \ + __sanitizer_syscall_post_impl_eventfd2(res, (long)(count), (long)(flags)) +#define __sanitizer_syscall_pre_old_readdir(arg0, arg1, arg2) \ + __sanitizer_syscall_pre_impl_old_readdir((long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_post_old_readdir(res, arg0, arg1, arg2) \ + __sanitizer_syscall_post_impl_old_readdir(res, (long)(arg0), (long)(arg1), \ + (long)(arg2)) +#define __sanitizer_syscall_pre_pselect6(arg0, arg1, arg2, arg3, arg4, arg5) \ + __sanitizer_syscall_pre_impl_pselect6((long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_post_pselect6(res, arg0, arg1, arg2, arg3, arg4, \ + arg5) \ + __sanitizer_syscall_post_impl_pselect6(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4), (long)(arg5)) +#define __sanitizer_syscall_pre_ppoll(arg0, arg1, arg2, arg3, arg4) \ + __sanitizer_syscall_pre_impl_ppoll((long)(arg0), (long)(arg1), (long)(arg2), \ + (long)(arg3), (long)(arg4)) +#define __sanitizer_syscall_post_ppoll(res, arg0, arg1, arg2, arg3, arg4) \ + __sanitizer_syscall_post_impl_ppoll(res, (long)(arg0), (long)(arg1), \ + (long)(arg2), (long)(arg3), \ + (long)(arg4)) +#define __sanitizer_syscall_pre_syncfs(fd) \ + __sanitizer_syscall_pre_impl_syncfs((long)(fd)) +#define __sanitizer_syscall_post_syncfs(res, fd) \ + __sanitizer_syscall_post_impl_syncfs(res, (long)(fd)) +#define __sanitizer_syscall_pre_perf_event_open(attr_uptr, pid, cpu, group_fd, \ + flags) \ + __sanitizer_syscall_pre_impl_perf_event_open((long)(attr_uptr), (long)(pid), \ + (long)(cpu), (long)(group_fd), \ + (long)(flags)) +#define __sanitizer_syscall_post_perf_event_open(res, attr_uptr, pid, cpu, \ + group_fd, flags) \ + __sanitizer_syscall_post_impl_perf_event_open( \ + res, (long)(attr_uptr), (long)(pid), (long)(cpu), (long)(group_fd), \ + (long)(flags)) +#define __sanitizer_syscall_pre_mmap_pgoff(addr, len, prot, flags, fd, pgoff) \ + __sanitizer_syscall_pre_impl_mmap_pgoff((long)(addr), (long)(len), \ + (long)(prot), (long)(flags), \ + (long)(fd), (long)(pgoff)) +#define __sanitizer_syscall_post_mmap_pgoff(res, addr, len, prot, flags, fd, \ + pgoff) \ + __sanitizer_syscall_post_impl_mmap_pgoff(res, (long)(addr), (long)(len), \ + (long)(prot), (long)(flags), \ + (long)(fd), (long)(pgoff)) +#define __sanitizer_syscall_pre_old_mmap(arg) \ + __sanitizer_syscall_pre_impl_old_mmap((long)(arg)) +#define __sanitizer_syscall_post_old_mmap(res, arg) \ + __sanitizer_syscall_post_impl_old_mmap(res, (long)(arg)) +#define __sanitizer_syscall_pre_name_to_handle_at(dfd, name, handle, mnt_id, \ + flag) \ + __sanitizer_syscall_pre_impl_name_to_handle_at( \ + (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), (long)(flag)) +#define __sanitizer_syscall_post_name_to_handle_at(res, dfd, name, handle, \ + mnt_id, flag) \ + __sanitizer_syscall_post_impl_name_to_handle_at( \ + res, (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), \ + (long)(flag)) +#define __sanitizer_syscall_pre_open_by_handle_at(mountdirfd, handle, flags) \ + __sanitizer_syscall_pre_impl_open_by_handle_at( \ + (long)(mountdirfd), (long)(handle), (long)(flags)) +#define __sanitizer_syscall_post_open_by_handle_at(res, mountdirfd, handle, \ + flags) \ + __sanitizer_syscall_post_impl_open_by_handle_at( \ + res, (long)(mountdirfd), (long)(handle), (long)(flags)) +#define __sanitizer_syscall_pre_setns(fd, nstype) \ + __sanitizer_syscall_pre_impl_setns((long)(fd), (long)(nstype)) +#define __sanitizer_syscall_post_setns(res, fd, nstype) \ + __sanitizer_syscall_post_impl_setns(res, (long)(fd), (long)(nstype)) +#define __sanitizer_syscall_pre_process_vm_readv(pid, lvec, liovcnt, rvec, \ + riovcnt, flags) \ + __sanitizer_syscall_pre_impl_process_vm_readv( \ + (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \ + (long)(riovcnt), (long)(flags)) +#define __sanitizer_syscall_post_process_vm_readv(res, pid, lvec, liovcnt, \ + rvec, riovcnt, flags) \ + __sanitizer_syscall_post_impl_process_vm_readv( \ + res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \ + (long)(riovcnt), (long)(flags)) +#define __sanitizer_syscall_pre_process_vm_writev(pid, lvec, liovcnt, rvec, \ + riovcnt, flags) \ + __sanitizer_syscall_pre_impl_process_vm_writev( \ + (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \ + (long)(riovcnt), (long)(flags)) +#define __sanitizer_syscall_post_process_vm_writev(res, pid, lvec, liovcnt, \ + rvec, riovcnt, flags) \ + __sanitizer_syscall_post_impl_process_vm_writev( \ + res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \ + (long)(riovcnt), (long)(flags)) +#define __sanitizer_syscall_pre_fork() __sanitizer_syscall_pre_impl_fork() +#define __sanitizer_syscall_post_fork(res) \ + __sanitizer_syscall_post_impl_fork(res) +#define __sanitizer_syscall_pre_vfork() __sanitizer_syscall_pre_impl_vfork() +#define __sanitizer_syscall_post_vfork(res) \ + __sanitizer_syscall_post_impl_vfork(res) +#define __sanitizer_syscall_pre_sigaction(signum, act, oldact) \ + __sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact) +#define __sanitizer_syscall_post_sigaction(res, signum, act, oldact) \ + __sanitizer_syscall_post_impl_sigaction(res, (long)signum, (long)act, \ + (long)oldact) +#define __sanitizer_syscall_pre_rt_sigaction(signum, act, oldact, sz) \ + __sanitizer_syscall_pre_impl_rt_sigaction((long)signum, (long)act, \ + (long)oldact, (long)sz) +#define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \ + __sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \ + (long)oldact, (long)sz) +#define __sanitizer_syscall_pre_sigaltstack(ss, oss) \ + __sanitizer_syscall_pre_impl_sigaltstack((long)ss, (long)oss) +#define __sanitizer_syscall_post_sigaltstack(res, ss, oss) \ + __sanitizer_syscall_post_impl_sigaltstack(res, (long)ss, (long)oss) + +// And now a few syscalls we don't handle yet. +#define __sanitizer_syscall_pre_afs_syscall(...) +#define __sanitizer_syscall_pre_arch_prctl(...) +#define __sanitizer_syscall_pre_break(...) +#define __sanitizer_syscall_pre_chown32(...) +#define __sanitizer_syscall_pre_clone(...) +#define __sanitizer_syscall_pre_create_module(...) +#define __sanitizer_syscall_pre_epoll_ctl_old(...) +#define __sanitizer_syscall_pre_epoll_wait_old(...) +#define __sanitizer_syscall_pre_execve(...) +#define __sanitizer_syscall_pre_fadvise64(...) +#define __sanitizer_syscall_pre_fadvise64_64(...) +#define __sanitizer_syscall_pre_fallocate(...) +#define __sanitizer_syscall_pre_fanotify_init(...) +#define __sanitizer_syscall_pre_fanotify_mark(...) +#define __sanitizer_syscall_pre_fchown32(...) +#define __sanitizer_syscall_pre_ftime(...) +#define __sanitizer_syscall_pre_ftruncate64(...) +#define __sanitizer_syscall_pre_futex(...) +#define __sanitizer_syscall_pre_getegid32(...) +#define __sanitizer_syscall_pre_geteuid32(...) +#define __sanitizer_syscall_pre_getgid32(...) +#define __sanitizer_syscall_pre_getgroups32(...) +#define __sanitizer_syscall_pre_get_kernel_syms(...) +#define __sanitizer_syscall_pre_getpmsg(...) +#define __sanitizer_syscall_pre_getresgid32(...) +#define __sanitizer_syscall_pre_getresuid32(...) +#define __sanitizer_syscall_pre_get_thread_area(...) +#define __sanitizer_syscall_pre_getuid32(...) +#define __sanitizer_syscall_pre_gtty(...) +#define __sanitizer_syscall_pre_idle(...) +#define __sanitizer_syscall_pre_iopl(...) +#define __sanitizer_syscall_pre_lchown32(...) +#define __sanitizer_syscall_pre__llseek(...) +#define __sanitizer_syscall_pre_lock(...) +#define __sanitizer_syscall_pre_madvise1(...) +#define __sanitizer_syscall_pre_mmap(...) +#define __sanitizer_syscall_pre_mmap2(...) +#define __sanitizer_syscall_pre_modify_ldt(...) +#define __sanitizer_syscall_pre_mpx(...) +#define __sanitizer_syscall_pre__newselect(...) +#define __sanitizer_syscall_pre_nfsservctl(...) +#define __sanitizer_syscall_pre_oldfstat(...) +#define __sanitizer_syscall_pre_oldlstat(...) +#define __sanitizer_syscall_pre_oldolduname(...) +#define __sanitizer_syscall_pre_oldstat(...) +#define __sanitizer_syscall_pre_prctl(...) +#define __sanitizer_syscall_pre_prof(...) +#define __sanitizer_syscall_pre_profil(...) +#define __sanitizer_syscall_pre_putpmsg(...) +#define __sanitizer_syscall_pre_query_module(...) +#define __sanitizer_syscall_pre_readahead(...) +#define __sanitizer_syscall_pre_readdir(...) +#define __sanitizer_syscall_pre_rt_sigreturn(...) +#define __sanitizer_syscall_pre_rt_sigsuspend(...) +#define __sanitizer_syscall_pre_security(...) +#define __sanitizer_syscall_pre_setfsgid32(...) +#define __sanitizer_syscall_pre_setfsuid32(...) +#define __sanitizer_syscall_pre_setgid32(...) +#define __sanitizer_syscall_pre_setgroups32(...) +#define __sanitizer_syscall_pre_setregid32(...) +#define __sanitizer_syscall_pre_setresgid32(...) +#define __sanitizer_syscall_pre_setresuid32(...) +#define __sanitizer_syscall_pre_setreuid32(...) +#define __sanitizer_syscall_pre_set_thread_area(...) +#define __sanitizer_syscall_pre_setuid32(...) +#define __sanitizer_syscall_pre_sigreturn(...) +#define __sanitizer_syscall_pre_sigsuspend(...) +#define __sanitizer_syscall_pre_stty(...) +#define __sanitizer_syscall_pre_sync_file_range(...) +#define __sanitizer_syscall_pre__sysctl(...) +#define __sanitizer_syscall_pre_truncate64(...) +#define __sanitizer_syscall_pre_tuxcall(...) +#define __sanitizer_syscall_pre_ugetrlimit(...) +#define __sanitizer_syscall_pre_ulimit(...) +#define __sanitizer_syscall_pre_umount2(...) +#define __sanitizer_syscall_pre_vm86(...) +#define __sanitizer_syscall_pre_vm86old(...) +#define __sanitizer_syscall_pre_vserver(...) + +#define __sanitizer_syscall_post_afs_syscall(res, ...) +#define __sanitizer_syscall_post_arch_prctl(res, ...) +#define __sanitizer_syscall_post_break(res, ...) +#define __sanitizer_syscall_post_chown32(res, ...) +#define __sanitizer_syscall_post_clone(res, ...) +#define __sanitizer_syscall_post_create_module(res, ...) +#define __sanitizer_syscall_post_epoll_ctl_old(res, ...) +#define __sanitizer_syscall_post_epoll_wait_old(res, ...) +#define __sanitizer_syscall_post_execve(res, ...) +#define __sanitizer_syscall_post_fadvise64(res, ...) +#define __sanitizer_syscall_post_fadvise64_64(res, ...) +#define __sanitizer_syscall_post_fallocate(res, ...) +#define __sanitizer_syscall_post_fanotify_init(res, ...) +#define __sanitizer_syscall_post_fanotify_mark(res, ...) +#define __sanitizer_syscall_post_fchown32(res, ...) +#define __sanitizer_syscall_post_ftime(res, ...) +#define __sanitizer_syscall_post_ftruncate64(res, ...) +#define __sanitizer_syscall_post_futex(res, ...) +#define __sanitizer_syscall_post_getegid32(res, ...) +#define __sanitizer_syscall_post_geteuid32(res, ...) +#define __sanitizer_syscall_post_getgid32(res, ...) +#define __sanitizer_syscall_post_getgroups32(res, ...) +#define __sanitizer_syscall_post_get_kernel_syms(res, ...) +#define __sanitizer_syscall_post_getpmsg(res, ...) +#define __sanitizer_syscall_post_getresgid32(res, ...) +#define __sanitizer_syscall_post_getresuid32(res, ...) +#define __sanitizer_syscall_post_get_thread_area(res, ...) +#define __sanitizer_syscall_post_getuid32(res, ...) +#define __sanitizer_syscall_post_gtty(res, ...) +#define __sanitizer_syscall_post_idle(res, ...) +#define __sanitizer_syscall_post_iopl(res, ...) +#define __sanitizer_syscall_post_lchown32(res, ...) +#define __sanitizer_syscall_post__llseek(res, ...) +#define __sanitizer_syscall_post_lock(res, ...) +#define __sanitizer_syscall_post_madvise1(res, ...) +#define __sanitizer_syscall_post_mmap2(res, ...) +#define __sanitizer_syscall_post_mmap(res, ...) +#define __sanitizer_syscall_post_modify_ldt(res, ...) +#define __sanitizer_syscall_post_mpx(res, ...) +#define __sanitizer_syscall_post__newselect(res, ...) +#define __sanitizer_syscall_post_nfsservctl(res, ...) +#define __sanitizer_syscall_post_oldfstat(res, ...) +#define __sanitizer_syscall_post_oldlstat(res, ...) +#define __sanitizer_syscall_post_oldolduname(res, ...) +#define __sanitizer_syscall_post_oldstat(res, ...) +#define __sanitizer_syscall_post_prctl(res, ...) +#define __sanitizer_syscall_post_profil(res, ...) +#define __sanitizer_syscall_post_prof(res, ...) +#define __sanitizer_syscall_post_putpmsg(res, ...) +#define __sanitizer_syscall_post_query_module(res, ...) +#define __sanitizer_syscall_post_readahead(res, ...) +#define __sanitizer_syscall_post_readdir(res, ...) +#define __sanitizer_syscall_post_rt_sigreturn(res, ...) +#define __sanitizer_syscall_post_rt_sigsuspend(res, ...) +#define __sanitizer_syscall_post_security(res, ...) +#define __sanitizer_syscall_post_setfsgid32(res, ...) +#define __sanitizer_syscall_post_setfsuid32(res, ...) +#define __sanitizer_syscall_post_setgid32(res, ...) +#define __sanitizer_syscall_post_setgroups32(res, ...) +#define __sanitizer_syscall_post_setregid32(res, ...) +#define __sanitizer_syscall_post_setresgid32(res, ...) +#define __sanitizer_syscall_post_setresuid32(res, ...) +#define __sanitizer_syscall_post_setreuid32(res, ...) +#define __sanitizer_syscall_post_set_thread_area(res, ...) +#define __sanitizer_syscall_post_setuid32(res, ...) +#define __sanitizer_syscall_post_sigreturn(res, ...) +#define __sanitizer_syscall_post_sigsuspend(res, ...) +#define __sanitizer_syscall_post_stty(res, ...) +#define __sanitizer_syscall_post_sync_file_range(res, ...) +#define __sanitizer_syscall_post__sysctl(res, ...) +#define __sanitizer_syscall_post_truncate64(res, ...) +#define __sanitizer_syscall_post_tuxcall(res, ...) +#define __sanitizer_syscall_post_ugetrlimit(res, ...) +#define __sanitizer_syscall_post_ulimit(res, ...) +#define __sanitizer_syscall_post_umount2(res, ...) +#define __sanitizer_syscall_post_vm86old(res, ...) +#define __sanitizer_syscall_post_vm86(res, ...) +#define __sanitizer_syscall_post_vserver(res, ...) + +#ifdef __cplusplus +extern "C" { +#endif + +// Private declarations. Do not call directly from user code. Use macros above. +void __sanitizer_syscall_pre_impl_time(long tloc); +void __sanitizer_syscall_post_impl_time(long res, long tloc); +void __sanitizer_syscall_pre_impl_stime(long tptr); +void __sanitizer_syscall_post_impl_stime(long res, long tptr); +void __sanitizer_syscall_pre_impl_gettimeofday(long tv, long tz); +void __sanitizer_syscall_post_impl_gettimeofday(long res, long tv, long tz); +void __sanitizer_syscall_pre_impl_settimeofday(long tv, long tz); +void __sanitizer_syscall_post_impl_settimeofday(long res, long tv, long tz); +void __sanitizer_syscall_pre_impl_adjtimex(long txc_p); +void __sanitizer_syscall_post_impl_adjtimex(long res, long txc_p); +void __sanitizer_syscall_pre_impl_times(long tbuf); +void __sanitizer_syscall_post_impl_times(long res, long tbuf); +void __sanitizer_syscall_pre_impl_gettid(); +void __sanitizer_syscall_post_impl_gettid(long res); +void __sanitizer_syscall_pre_impl_nanosleep(long rqtp, long rmtp); +void __sanitizer_syscall_post_impl_nanosleep(long res, long rqtp, long rmtp); +void __sanitizer_syscall_pre_impl_alarm(long seconds); +void __sanitizer_syscall_post_impl_alarm(long res, long seconds); +void __sanitizer_syscall_pre_impl_getpid(); +void __sanitizer_syscall_post_impl_getpid(long res); +void __sanitizer_syscall_pre_impl_getppid(); +void __sanitizer_syscall_post_impl_getppid(long res); +void __sanitizer_syscall_pre_impl_getuid(); +void __sanitizer_syscall_post_impl_getuid(long res); +void __sanitizer_syscall_pre_impl_geteuid(); +void __sanitizer_syscall_post_impl_geteuid(long res); +void __sanitizer_syscall_pre_impl_getgid(); +void __sanitizer_syscall_post_impl_getgid(long res); +void __sanitizer_syscall_pre_impl_getegid(); +void __sanitizer_syscall_post_impl_getegid(long res); +void __sanitizer_syscall_pre_impl_getresuid(long ruid, long euid, long suid); +void __sanitizer_syscall_post_impl_getresuid(long res, long ruid, long euid, + long suid); +void __sanitizer_syscall_pre_impl_getresgid(long rgid, long egid, long sgid); +void __sanitizer_syscall_post_impl_getresgid(long res, long rgid, long egid, + long sgid); +void __sanitizer_syscall_pre_impl_getpgid(long pid); +void __sanitizer_syscall_post_impl_getpgid(long res, long pid); +void __sanitizer_syscall_pre_impl_getpgrp(); +void __sanitizer_syscall_post_impl_getpgrp(long res); +void __sanitizer_syscall_pre_impl_getsid(long pid); +void __sanitizer_syscall_post_impl_getsid(long res, long pid); +void __sanitizer_syscall_pre_impl_getgroups(long gidsetsize, long grouplist); +void __sanitizer_syscall_post_impl_getgroups(long res, long gidsetsize, + long grouplist); +void __sanitizer_syscall_pre_impl_setregid(long rgid, long egid); +void __sanitizer_syscall_post_impl_setregid(long res, long rgid, long egid); +void __sanitizer_syscall_pre_impl_setgid(long gid); +void __sanitizer_syscall_post_impl_setgid(long res, long gid); +void __sanitizer_syscall_pre_impl_setreuid(long ruid, long euid); +void __sanitizer_syscall_post_impl_setreuid(long res, long ruid, long euid); +void __sanitizer_syscall_pre_impl_setuid(long uid); +void __sanitizer_syscall_post_impl_setuid(long res, long uid); +void __sanitizer_syscall_pre_impl_setresuid(long ruid, long euid, long suid); +void __sanitizer_syscall_post_impl_setresuid(long res, long ruid, long euid, + long suid); +void __sanitizer_syscall_pre_impl_setresgid(long rgid, long egid, long sgid); +void __sanitizer_syscall_post_impl_setresgid(long res, long rgid, long egid, + long sgid); +void __sanitizer_syscall_pre_impl_setfsuid(long uid); +void __sanitizer_syscall_post_impl_setfsuid(long res, long uid); +void __sanitizer_syscall_pre_impl_setfsgid(long gid); +void __sanitizer_syscall_post_impl_setfsgid(long res, long gid); +void __sanitizer_syscall_pre_impl_setpgid(long pid, long pgid); +void __sanitizer_syscall_post_impl_setpgid(long res, long pid, long pgid); +void __sanitizer_syscall_pre_impl_setsid(); +void __sanitizer_syscall_post_impl_setsid(long res); +void __sanitizer_syscall_pre_impl_setgroups(long gidsetsize, long grouplist); +void __sanitizer_syscall_post_impl_setgroups(long res, long gidsetsize, + long grouplist); +void __sanitizer_syscall_pre_impl_acct(long name); +void __sanitizer_syscall_post_impl_acct(long res, long name); +void __sanitizer_syscall_pre_impl_capget(long header, long dataptr); +void __sanitizer_syscall_post_impl_capget(long res, long header, long dataptr); +void __sanitizer_syscall_pre_impl_capset(long header, long data); +void __sanitizer_syscall_post_impl_capset(long res, long header, long data); +void __sanitizer_syscall_pre_impl_personality(long personality); +void __sanitizer_syscall_post_impl_personality(long res, long personality); +void __sanitizer_syscall_pre_impl_sigpending(long set); +void __sanitizer_syscall_post_impl_sigpending(long res, long set); +void __sanitizer_syscall_pre_impl_sigprocmask(long how, long set, long oset); +void __sanitizer_syscall_post_impl_sigprocmask(long res, long how, long set, + long oset); +void __sanitizer_syscall_pre_impl_getitimer(long which, long value); +void __sanitizer_syscall_post_impl_getitimer(long res, long which, long value); +void __sanitizer_syscall_pre_impl_setitimer(long which, long value, + long ovalue); +void __sanitizer_syscall_post_impl_setitimer(long res, long which, long value, + long ovalue); +void __sanitizer_syscall_pre_impl_timer_create(long which_clock, + long timer_event_spec, + long created_timer_id); +void __sanitizer_syscall_post_impl_timer_create(long res, long which_clock, + long timer_event_spec, + long created_timer_id); +void __sanitizer_syscall_pre_impl_timer_gettime(long timer_id, long setting); +void __sanitizer_syscall_post_impl_timer_gettime(long res, long timer_id, + long setting); +void __sanitizer_syscall_pre_impl_timer_getoverrun(long timer_id); +void __sanitizer_syscall_post_impl_timer_getoverrun(long res, long timer_id); +void __sanitizer_syscall_pre_impl_timer_settime(long timer_id, long flags, + long new_setting, + long old_setting); +void __sanitizer_syscall_post_impl_timer_settime(long res, long timer_id, + long flags, long new_setting, + long old_setting); +void __sanitizer_syscall_pre_impl_timer_delete(long timer_id); +void __sanitizer_syscall_post_impl_timer_delete(long res, long timer_id); +void __sanitizer_syscall_pre_impl_clock_settime(long which_clock, long tp); +void __sanitizer_syscall_post_impl_clock_settime(long res, long which_clock, + long tp); +void __sanitizer_syscall_pre_impl_clock_gettime(long which_clock, long tp); +void __sanitizer_syscall_post_impl_clock_gettime(long res, long which_clock, + long tp); +void __sanitizer_syscall_pre_impl_clock_adjtime(long which_clock, long tx); +void __sanitizer_syscall_post_impl_clock_adjtime(long res, long which_clock, + long tx); +void __sanitizer_syscall_pre_impl_clock_getres(long which_clock, long tp); +void __sanitizer_syscall_post_impl_clock_getres(long res, long which_clock, + long tp); +void __sanitizer_syscall_pre_impl_clock_nanosleep(long which_clock, long flags, + long rqtp, long rmtp); +void __sanitizer_syscall_post_impl_clock_nanosleep(long res, long which_clock, + long flags, long rqtp, + long rmtp); +void __sanitizer_syscall_pre_impl_nice(long increment); +void __sanitizer_syscall_post_impl_nice(long res, long increment); +void __sanitizer_syscall_pre_impl_sched_setscheduler(long pid, long policy, + long param); +void __sanitizer_syscall_post_impl_sched_setscheduler(long res, long pid, + long policy, long param); +void __sanitizer_syscall_pre_impl_sched_setparam(long pid, long param); +void __sanitizer_syscall_post_impl_sched_setparam(long res, long pid, + long param); +void __sanitizer_syscall_pre_impl_sched_getscheduler(long pid); +void __sanitizer_syscall_post_impl_sched_getscheduler(long res, long pid); +void __sanitizer_syscall_pre_impl_sched_getparam(long pid, long param); +void __sanitizer_syscall_post_impl_sched_getparam(long res, long pid, + long param); +void __sanitizer_syscall_pre_impl_sched_setaffinity(long pid, long len, + long user_mask_ptr); +void __sanitizer_syscall_post_impl_sched_setaffinity(long res, long pid, + long len, + long user_mask_ptr); +void __sanitizer_syscall_pre_impl_sched_getaffinity(long pid, long len, + long user_mask_ptr); +void __sanitizer_syscall_post_impl_sched_getaffinity(long res, long pid, + long len, + long user_mask_ptr); +void __sanitizer_syscall_pre_impl_sched_yield(); +void __sanitizer_syscall_post_impl_sched_yield(long res); +void __sanitizer_syscall_pre_impl_sched_get_priority_max(long policy); +void __sanitizer_syscall_post_impl_sched_get_priority_max(long res, + long policy); +void __sanitizer_syscall_pre_impl_sched_get_priority_min(long policy); +void __sanitizer_syscall_post_impl_sched_get_priority_min(long res, + long policy); +void __sanitizer_syscall_pre_impl_sched_rr_get_interval(long pid, + long interval); +void __sanitizer_syscall_post_impl_sched_rr_get_interval(long res, long pid, + long interval); +void __sanitizer_syscall_pre_impl_setpriority(long which, long who, + long niceval); +void __sanitizer_syscall_post_impl_setpriority(long res, long which, long who, + long niceval); +void __sanitizer_syscall_pre_impl_getpriority(long which, long who); +void __sanitizer_syscall_post_impl_getpriority(long res, long which, long who); +void __sanitizer_syscall_pre_impl_shutdown(long arg0, long arg1); +void __sanitizer_syscall_post_impl_shutdown(long res, long arg0, long arg1); +void __sanitizer_syscall_pre_impl_reboot(long magic1, long magic2, long cmd, + long arg); +void __sanitizer_syscall_post_impl_reboot(long res, long magic1, long magic2, + long cmd, long arg); +void __sanitizer_syscall_pre_impl_restart_syscall(); +void __sanitizer_syscall_post_impl_restart_syscall(long res); +void __sanitizer_syscall_pre_impl_kexec_load(long entry, long nr_segments, + long segments, long flags); +void __sanitizer_syscall_post_impl_kexec_load(long res, long entry, + long nr_segments, long segments, + long flags); +void __sanitizer_syscall_pre_impl_exit(long error_code); +void __sanitizer_syscall_post_impl_exit(long res, long error_code); +void __sanitizer_syscall_pre_impl_exit_group(long error_code); +void __sanitizer_syscall_post_impl_exit_group(long res, long error_code); +void __sanitizer_syscall_pre_impl_wait4(long pid, long stat_addr, long options, + long ru); +void __sanitizer_syscall_post_impl_wait4(long res, long pid, long stat_addr, + long options, long ru); +void __sanitizer_syscall_pre_impl_waitid(long which, long pid, long infop, + long options, long ru); +void __sanitizer_syscall_post_impl_waitid(long res, long which, long pid, + long infop, long options, long ru); +void __sanitizer_syscall_pre_impl_waitpid(long pid, long stat_addr, + long options); +void __sanitizer_syscall_post_impl_waitpid(long res, long pid, long stat_addr, + long options); +void __sanitizer_syscall_pre_impl_set_tid_address(long tidptr); +void __sanitizer_syscall_post_impl_set_tid_address(long res, long tidptr); +void __sanitizer_syscall_pre_impl_init_module(long umod, long len, long uargs); +void __sanitizer_syscall_post_impl_init_module(long res, long umod, long len, + long uargs); +void __sanitizer_syscall_pre_impl_delete_module(long name_user, long flags); +void __sanitizer_syscall_post_impl_delete_module(long res, long name_user, + long flags); +void __sanitizer_syscall_pre_impl_rt_sigprocmask(long how, long set, long oset, + long sigsetsize); +void __sanitizer_syscall_post_impl_rt_sigprocmask(long res, long how, long set, + long oset, long sigsetsize); +void __sanitizer_syscall_pre_impl_rt_sigpending(long set, long sigsetsize); +void __sanitizer_syscall_post_impl_rt_sigpending(long res, long set, + long sigsetsize); +void __sanitizer_syscall_pre_impl_rt_sigtimedwait(long uthese, long uinfo, + long uts, long sigsetsize); +void __sanitizer_syscall_post_impl_rt_sigtimedwait(long res, long uthese, + long uinfo, long uts, + long sigsetsize); +void __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo(long tgid, long pid, + long sig, long uinfo); +void __sanitizer_syscall_post_impl_rt_tgsigqueueinfo(long res, long tgid, + long pid, long sig, + long uinfo); +void __sanitizer_syscall_pre_impl_kill(long pid, long sig); +void __sanitizer_syscall_post_impl_kill(long res, long pid, long sig); +void __sanitizer_syscall_pre_impl_tgkill(long tgid, long pid, long sig); +void __sanitizer_syscall_post_impl_tgkill(long res, long tgid, long pid, + long sig); +void __sanitizer_syscall_pre_impl_tkill(long pid, long sig); +void __sanitizer_syscall_post_impl_tkill(long res, long pid, long sig); +void __sanitizer_syscall_pre_impl_rt_sigqueueinfo(long pid, long sig, + long uinfo); +void __sanitizer_syscall_post_impl_rt_sigqueueinfo(long res, long pid, long sig, + long uinfo); +void __sanitizer_syscall_pre_impl_sgetmask(); +void __sanitizer_syscall_post_impl_sgetmask(long res); +void __sanitizer_syscall_pre_impl_ssetmask(long newmask); +void __sanitizer_syscall_post_impl_ssetmask(long res, long newmask); +void __sanitizer_syscall_pre_impl_signal(long sig, long handler); +void __sanitizer_syscall_post_impl_signal(long res, long sig, long handler); +void __sanitizer_syscall_pre_impl_pause(); +void __sanitizer_syscall_post_impl_pause(long res); +void __sanitizer_syscall_pre_impl_sync(); +void __sanitizer_syscall_post_impl_sync(long res); +void __sanitizer_syscall_pre_impl_fsync(long fd); +void __sanitizer_syscall_post_impl_fsync(long res, long fd); +void __sanitizer_syscall_pre_impl_fdatasync(long fd); +void __sanitizer_syscall_post_impl_fdatasync(long res, long fd); +void __sanitizer_syscall_pre_impl_bdflush(long func, long data); +void __sanitizer_syscall_post_impl_bdflush(long res, long func, long data); +void __sanitizer_syscall_pre_impl_mount(long dev_name, long dir_name, long type, + long flags, long data); +void __sanitizer_syscall_post_impl_mount(long res, long dev_name, long dir_name, + long type, long flags, long data); +void __sanitizer_syscall_pre_impl_umount(long name, long flags); +void __sanitizer_syscall_post_impl_umount(long res, long name, long flags); +void __sanitizer_syscall_pre_impl_oldumount(long name); +void __sanitizer_syscall_post_impl_oldumount(long res, long name); +void __sanitizer_syscall_pre_impl_truncate(long path, long length); +void __sanitizer_syscall_post_impl_truncate(long res, long path, long length); +void __sanitizer_syscall_pre_impl_ftruncate(long fd, long length); +void __sanitizer_syscall_post_impl_ftruncate(long res, long fd, long length); +void __sanitizer_syscall_pre_impl_stat(long filename, long statbuf); +void __sanitizer_syscall_post_impl_stat(long res, long filename, long statbuf); +void __sanitizer_syscall_pre_impl_statfs(long path, long buf); +void __sanitizer_syscall_post_impl_statfs(long res, long path, long buf); +void __sanitizer_syscall_pre_impl_statfs64(long path, long sz, long buf); +void __sanitizer_syscall_post_impl_statfs64(long res, long path, long sz, + long buf); +void __sanitizer_syscall_pre_impl_fstatfs(long fd, long buf); +void __sanitizer_syscall_post_impl_fstatfs(long res, long fd, long buf); +void __sanitizer_syscall_pre_impl_fstatfs64(long fd, long sz, long buf); +void __sanitizer_syscall_post_impl_fstatfs64(long res, long fd, long sz, + long buf); +void __sanitizer_syscall_pre_impl_lstat(long filename, long statbuf); +void __sanitizer_syscall_post_impl_lstat(long res, long filename, long statbuf); +void __sanitizer_syscall_pre_impl_fstat(long fd, long statbuf); +void __sanitizer_syscall_post_impl_fstat(long res, long fd, long statbuf); +void __sanitizer_syscall_pre_impl_newstat(long filename, long statbuf); +void __sanitizer_syscall_post_impl_newstat(long res, long filename, + long statbuf); +void __sanitizer_syscall_pre_impl_newlstat(long filename, long statbuf); +void __sanitizer_syscall_post_impl_newlstat(long res, long filename, + long statbuf); +void __sanitizer_syscall_pre_impl_newfstat(long fd, long statbuf); +void __sanitizer_syscall_post_impl_newfstat(long res, long fd, long statbuf); +void __sanitizer_syscall_pre_impl_ustat(long dev, long ubuf); +void __sanitizer_syscall_post_impl_ustat(long res, long dev, long ubuf); +void __sanitizer_syscall_pre_impl_stat64(long filename, long statbuf); +void __sanitizer_syscall_post_impl_stat64(long res, long filename, + long statbuf); +void __sanitizer_syscall_pre_impl_fstat64(long fd, long statbuf); +void __sanitizer_syscall_post_impl_fstat64(long res, long fd, long statbuf); +void __sanitizer_syscall_pre_impl_lstat64(long filename, long statbuf); +void __sanitizer_syscall_post_impl_lstat64(long res, long filename, + long statbuf); +void __sanitizer_syscall_pre_impl_setxattr(long path, long name, long value, + long size, long flags); +void __sanitizer_syscall_post_impl_setxattr(long res, long path, long name, + long value, long size, long flags); +void __sanitizer_syscall_pre_impl_lsetxattr(long path, long name, long value, + long size, long flags); +void __sanitizer_syscall_post_impl_lsetxattr(long res, long path, long name, + long value, long size, long flags); +void __sanitizer_syscall_pre_impl_fsetxattr(long fd, long name, long value, + long size, long flags); +void __sanitizer_syscall_post_impl_fsetxattr(long res, long fd, long name, + long value, long size, long flags); +void __sanitizer_syscall_pre_impl_getxattr(long path, long name, long value, + long size); +void __sanitizer_syscall_post_impl_getxattr(long res, long path, long name, + long value, long size); +void __sanitizer_syscall_pre_impl_lgetxattr(long path, long name, long value, + long size); +void __sanitizer_syscall_post_impl_lgetxattr(long res, long path, long name, + long value, long size); +void __sanitizer_syscall_pre_impl_fgetxattr(long fd, long name, long value, + long size); +void __sanitizer_syscall_post_impl_fgetxattr(long res, long fd, long name, + long value, long size); +void __sanitizer_syscall_pre_impl_listxattr(long path, long list, long size); +void __sanitizer_syscall_post_impl_listxattr(long res, long path, long list, + long size); +void __sanitizer_syscall_pre_impl_llistxattr(long path, long list, long size); +void __sanitizer_syscall_post_impl_llistxattr(long res, long path, long list, + long size); +void __sanitizer_syscall_pre_impl_flistxattr(long fd, long list, long size); +void __sanitizer_syscall_post_impl_flistxattr(long res, long fd, long list, + long size); +void __sanitizer_syscall_pre_impl_removexattr(long path, long name); +void __sanitizer_syscall_post_impl_removexattr(long res, long path, long name); +void __sanitizer_syscall_pre_impl_lremovexattr(long path, long name); +void __sanitizer_syscall_post_impl_lremovexattr(long res, long path, long name); +void __sanitizer_syscall_pre_impl_fremovexattr(long fd, long name); +void __sanitizer_syscall_post_impl_fremovexattr(long res, long fd, long name); +void __sanitizer_syscall_pre_impl_brk(long brk); +void __sanitizer_syscall_post_impl_brk(long res, long brk); +void __sanitizer_syscall_pre_impl_mprotect(long start, long len, long prot); +void __sanitizer_syscall_post_impl_mprotect(long res, long start, long len, + long prot); +void __sanitizer_syscall_pre_impl_mremap(long addr, long old_len, long new_len, + long flags, long new_addr); +void __sanitizer_syscall_post_impl_mremap(long res, long addr, long old_len, + long new_len, long flags, + long new_addr); +void __sanitizer_syscall_pre_impl_remap_file_pages(long start, long size, + long prot, long pgoff, + long flags); +void __sanitizer_syscall_post_impl_remap_file_pages(long res, long start, + long size, long prot, + long pgoff, long flags); +void __sanitizer_syscall_pre_impl_msync(long start, long len, long flags); +void __sanitizer_syscall_post_impl_msync(long res, long start, long len, + long flags); +void __sanitizer_syscall_pre_impl_munmap(long addr, long len); +void __sanitizer_syscall_post_impl_munmap(long res, long addr, long len); +void __sanitizer_syscall_pre_impl_mlock(long start, long len); +void __sanitizer_syscall_post_impl_mlock(long res, long start, long len); +void __sanitizer_syscall_pre_impl_munlock(long start, long len); +void __sanitizer_syscall_post_impl_munlock(long res, long start, long len); +void __sanitizer_syscall_pre_impl_mlockall(long flags); +void __sanitizer_syscall_post_impl_mlockall(long res, long flags); +void __sanitizer_syscall_pre_impl_munlockall(); +void __sanitizer_syscall_post_impl_munlockall(long res); +void __sanitizer_syscall_pre_impl_madvise(long start, long len, long behavior); +void __sanitizer_syscall_post_impl_madvise(long res, long start, long len, + long behavior); +void __sanitizer_syscall_pre_impl_mincore(long start, long len, long vec); +void __sanitizer_syscall_post_impl_mincore(long res, long start, long len, + long vec); +void __sanitizer_syscall_pre_impl_pivot_root(long new_root, long put_old); +void __sanitizer_syscall_post_impl_pivot_root(long res, long new_root, + long put_old); +void __sanitizer_syscall_pre_impl_chroot(long filename); +void __sanitizer_syscall_post_impl_chroot(long res, long filename); +void __sanitizer_syscall_pre_impl_mknod(long filename, long mode, long dev); +void __sanitizer_syscall_post_impl_mknod(long res, long filename, long mode, + long dev); +void __sanitizer_syscall_pre_impl_link(long oldname, long newname); +void __sanitizer_syscall_post_impl_link(long res, long oldname, long newname); +void __sanitizer_syscall_pre_impl_symlink(long old, long new_); +void __sanitizer_syscall_post_impl_symlink(long res, long old, long new_); +void __sanitizer_syscall_pre_impl_unlink(long pathname); +void __sanitizer_syscall_post_impl_unlink(long res, long pathname); +void __sanitizer_syscall_pre_impl_rename(long oldname, long newname); +void __sanitizer_syscall_post_impl_rename(long res, long oldname, long newname); +void __sanitizer_syscall_pre_impl_chmod(long filename, long mode); +void __sanitizer_syscall_post_impl_chmod(long res, long filename, long mode); +void __sanitizer_syscall_pre_impl_fchmod(long fd, long mode); +void __sanitizer_syscall_post_impl_fchmod(long res, long fd, long mode); +void __sanitizer_syscall_pre_impl_fcntl(long fd, long cmd, long arg); +void __sanitizer_syscall_post_impl_fcntl(long res, long fd, long cmd, long arg); +void __sanitizer_syscall_pre_impl_fcntl64(long fd, long cmd, long arg); +void __sanitizer_syscall_post_impl_fcntl64(long res, long fd, long cmd, + long arg); +void __sanitizer_syscall_pre_impl_pipe(long fildes); +void __sanitizer_syscall_post_impl_pipe(long res, long fildes); +void __sanitizer_syscall_pre_impl_pipe2(long fildes, long flags); +void __sanitizer_syscall_post_impl_pipe2(long res, long fildes, long flags); +void __sanitizer_syscall_pre_impl_dup(long fildes); +void __sanitizer_syscall_post_impl_dup(long res, long fildes); +void __sanitizer_syscall_pre_impl_dup2(long oldfd, long newfd); +void __sanitizer_syscall_post_impl_dup2(long res, long oldfd, long newfd); +void __sanitizer_syscall_pre_impl_dup3(long oldfd, long newfd, long flags); +void __sanitizer_syscall_post_impl_dup3(long res, long oldfd, long newfd, + long flags); +void __sanitizer_syscall_pre_impl_ioperm(long from, long num, long on); +void __sanitizer_syscall_post_impl_ioperm(long res, long from, long num, + long on); +void __sanitizer_syscall_pre_impl_ioctl(long fd, long cmd, long arg); +void __sanitizer_syscall_post_impl_ioctl(long res, long fd, long cmd, long arg); +void __sanitizer_syscall_pre_impl_flock(long fd, long cmd); +void __sanitizer_syscall_post_impl_flock(long res, long fd, long cmd); +void __sanitizer_syscall_pre_impl_io_setup(long nr_reqs, long ctx); +void __sanitizer_syscall_post_impl_io_setup(long res, long nr_reqs, long ctx); +void __sanitizer_syscall_pre_impl_io_destroy(long ctx); +void __sanitizer_syscall_post_impl_io_destroy(long res, long ctx); +void __sanitizer_syscall_pre_impl_io_getevents(long ctx_id, long min_nr, + long nr, long events, + long timeout); +void __sanitizer_syscall_post_impl_io_getevents(long res, long ctx_id, + long min_nr, long nr, + long events, long timeout); +void __sanitizer_syscall_pre_impl_io_submit(long ctx_id, long arg1, long arg2); +void __sanitizer_syscall_post_impl_io_submit(long res, long ctx_id, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_io_cancel(long ctx_id, long iocb, + long result); +void __sanitizer_syscall_post_impl_io_cancel(long res, long ctx_id, long iocb, + long result); +void __sanitizer_syscall_pre_impl_sendfile(long out_fd, long in_fd, long offset, + long count); +void __sanitizer_syscall_post_impl_sendfile(long res, long out_fd, long in_fd, + long offset, long count); +void __sanitizer_syscall_pre_impl_sendfile64(long out_fd, long in_fd, + long offset, long count); +void __sanitizer_syscall_post_impl_sendfile64(long res, long out_fd, long in_fd, + long offset, long count); +void __sanitizer_syscall_pre_impl_readlink(long path, long buf, long bufsiz); +void __sanitizer_syscall_post_impl_readlink(long res, long path, long buf, + long bufsiz); +void __sanitizer_syscall_pre_impl_creat(long pathname, long mode); +void __sanitizer_syscall_post_impl_creat(long res, long pathname, long mode); +void __sanitizer_syscall_pre_impl_open(long filename, long flags, long mode); +void __sanitizer_syscall_post_impl_open(long res, long filename, long flags, + long mode); +void __sanitizer_syscall_pre_impl_close(long fd); +void __sanitizer_syscall_post_impl_close(long res, long fd); +void __sanitizer_syscall_pre_impl_access(long filename, long mode); +void __sanitizer_syscall_post_impl_access(long res, long filename, long mode); +void __sanitizer_syscall_pre_impl_vhangup(); +void __sanitizer_syscall_post_impl_vhangup(long res); +void __sanitizer_syscall_pre_impl_chown(long filename, long user, long group); +void __sanitizer_syscall_post_impl_chown(long res, long filename, long user, + long group); +void __sanitizer_syscall_pre_impl_lchown(long filename, long user, long group); +void __sanitizer_syscall_post_impl_lchown(long res, long filename, long user, + long group); +void __sanitizer_syscall_pre_impl_fchown(long fd, long user, long group); +void __sanitizer_syscall_post_impl_fchown(long res, long fd, long user, + long group); +void __sanitizer_syscall_pre_impl_chown16(long filename, long user, long group); +void __sanitizer_syscall_post_impl_chown16(long res, long filename, long user, + long group); +void __sanitizer_syscall_pre_impl_lchown16(long filename, long user, + long group); +void __sanitizer_syscall_post_impl_lchown16(long res, long filename, long user, + long group); +void __sanitizer_syscall_pre_impl_fchown16(long fd, long user, long group); +void __sanitizer_syscall_post_impl_fchown16(long res, long fd, long user, + long group); +void __sanitizer_syscall_pre_impl_setregid16(long rgid, long egid); +void __sanitizer_syscall_post_impl_setregid16(long res, long rgid, long egid); +void __sanitizer_syscall_pre_impl_setgid16(long gid); +void __sanitizer_syscall_post_impl_setgid16(long res, long gid); +void __sanitizer_syscall_pre_impl_setreuid16(long ruid, long euid); +void __sanitizer_syscall_post_impl_setreuid16(long res, long ruid, long euid); +void __sanitizer_syscall_pre_impl_setuid16(long uid); +void __sanitizer_syscall_post_impl_setuid16(long res, long uid); +void __sanitizer_syscall_pre_impl_setresuid16(long ruid, long euid, long suid); +void __sanitizer_syscall_post_impl_setresuid16(long res, long ruid, long euid, + long suid); +void __sanitizer_syscall_pre_impl_getresuid16(long ruid, long euid, long suid); +void __sanitizer_syscall_post_impl_getresuid16(long res, long ruid, long euid, + long suid); +void __sanitizer_syscall_pre_impl_setresgid16(long rgid, long egid, long sgid); +void __sanitizer_syscall_post_impl_setresgid16(long res, long rgid, long egid, + long sgid); +void __sanitizer_syscall_pre_impl_getresgid16(long rgid, long egid, long sgid); +void __sanitizer_syscall_post_impl_getresgid16(long res, long rgid, long egid, + long sgid); +void __sanitizer_syscall_pre_impl_setfsuid16(long uid); +void __sanitizer_syscall_post_impl_setfsuid16(long res, long uid); +void __sanitizer_syscall_pre_impl_setfsgid16(long gid); +void __sanitizer_syscall_post_impl_setfsgid16(long res, long gid); +void __sanitizer_syscall_pre_impl_getgroups16(long gidsetsize, long grouplist); +void __sanitizer_syscall_post_impl_getgroups16(long res, long gidsetsize, + long grouplist); +void __sanitizer_syscall_pre_impl_setgroups16(long gidsetsize, long grouplist); +void __sanitizer_syscall_post_impl_setgroups16(long res, long gidsetsize, + long grouplist); +void __sanitizer_syscall_pre_impl_getuid16(); +void __sanitizer_syscall_post_impl_getuid16(long res); +void __sanitizer_syscall_pre_impl_geteuid16(); +void __sanitizer_syscall_post_impl_geteuid16(long res); +void __sanitizer_syscall_pre_impl_getgid16(); +void __sanitizer_syscall_post_impl_getgid16(long res); +void __sanitizer_syscall_pre_impl_getegid16(); +void __sanitizer_syscall_post_impl_getegid16(long res); +void __sanitizer_syscall_pre_impl_utime(long filename, long times); +void __sanitizer_syscall_post_impl_utime(long res, long filename, long times); +void __sanitizer_syscall_pre_impl_utimes(long filename, long utimes); +void __sanitizer_syscall_post_impl_utimes(long res, long filename, long utimes); +void __sanitizer_syscall_pre_impl_lseek(long fd, long offset, long origin); +void __sanitizer_syscall_post_impl_lseek(long res, long fd, long offset, + long origin); +void __sanitizer_syscall_pre_impl_llseek(long fd, long offset_high, + long offset_low, long result, + long origin); +void __sanitizer_syscall_post_impl_llseek(long res, long fd, long offset_high, + long offset_low, long result, + long origin); +void __sanitizer_syscall_pre_impl_read(long fd, long buf, long count); +void __sanitizer_syscall_post_impl_read(long res, long fd, long buf, + long count); +void __sanitizer_syscall_pre_impl_readv(long fd, long vec, long vlen); +void __sanitizer_syscall_post_impl_readv(long res, long fd, long vec, + long vlen); +void __sanitizer_syscall_pre_impl_write(long fd, long buf, long count); +void __sanitizer_syscall_post_impl_write(long res, long fd, long buf, + long count); +void __sanitizer_syscall_pre_impl_writev(long fd, long vec, long vlen); +void __sanitizer_syscall_post_impl_writev(long res, long fd, long vec, + long vlen); + +#ifdef _LP64 +void __sanitizer_syscall_pre_impl_pread64(long fd, long buf, long count, + long pos); +void __sanitizer_syscall_post_impl_pread64(long res, long fd, long buf, + long count, long pos); +void __sanitizer_syscall_pre_impl_pwrite64(long fd, long buf, long count, + long pos); +void __sanitizer_syscall_post_impl_pwrite64(long res, long fd, long buf, + long count, long pos); +#else +void __sanitizer_syscall_pre_impl_pread64(long fd, long buf, long count, + long pos0, long pos1); +void __sanitizer_syscall_post_impl_pread64(long res, long fd, long buf, + long count, long pos0, long pos1); +void __sanitizer_syscall_pre_impl_pwrite64(long fd, long buf, long count, + long pos0, long pos1); +void __sanitizer_syscall_post_impl_pwrite64(long res, long fd, long buf, + long count, long pos0, long pos1); +#endif + +void __sanitizer_syscall_pre_impl_preadv(long fd, long vec, long vlen, + long pos_l, long pos_h); +void __sanitizer_syscall_post_impl_preadv(long res, long fd, long vec, + long vlen, long pos_l, long pos_h); +void __sanitizer_syscall_pre_impl_pwritev(long fd, long vec, long vlen, + long pos_l, long pos_h); +void __sanitizer_syscall_post_impl_pwritev(long res, long fd, long vec, + long vlen, long pos_l, long pos_h); +void __sanitizer_syscall_pre_impl_getcwd(long buf, long size); +void __sanitizer_syscall_post_impl_getcwd(long res, long buf, long size); +void __sanitizer_syscall_pre_impl_mkdir(long pathname, long mode); +void __sanitizer_syscall_post_impl_mkdir(long res, long pathname, long mode); +void __sanitizer_syscall_pre_impl_chdir(long filename); +void __sanitizer_syscall_post_impl_chdir(long res, long filename); +void __sanitizer_syscall_pre_impl_fchdir(long fd); +void __sanitizer_syscall_post_impl_fchdir(long res, long fd); +void __sanitizer_syscall_pre_impl_rmdir(long pathname); +void __sanitizer_syscall_post_impl_rmdir(long res, long pathname); +void __sanitizer_syscall_pre_impl_lookup_dcookie(long cookie64, long buf, + long len); +void __sanitizer_syscall_post_impl_lookup_dcookie(long res, long cookie64, + long buf, long len); +void __sanitizer_syscall_pre_impl_quotactl(long cmd, long special, long id, + long addr); +void __sanitizer_syscall_post_impl_quotactl(long res, long cmd, long special, + long id, long addr); +void __sanitizer_syscall_pre_impl_getdents(long fd, long dirent, long count); +void __sanitizer_syscall_post_impl_getdents(long res, long fd, long dirent, + long count); +void __sanitizer_syscall_pre_impl_getdents64(long fd, long dirent, long count); +void __sanitizer_syscall_post_impl_getdents64(long res, long fd, long dirent, + long count); +void __sanitizer_syscall_pre_impl_setsockopt(long fd, long level, long optname, + long optval, long optlen); +void __sanitizer_syscall_post_impl_setsockopt(long res, long fd, long level, + long optname, long optval, + long optlen); +void __sanitizer_syscall_pre_impl_getsockopt(long fd, long level, long optname, + long optval, long optlen); +void __sanitizer_syscall_post_impl_getsockopt(long res, long fd, long level, + long optname, long optval, + long optlen); +void __sanitizer_syscall_pre_impl_bind(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_bind(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_connect(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_connect(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_accept(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_accept(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_accept4(long arg0, long arg1, long arg2, + long arg3); +void __sanitizer_syscall_post_impl_accept4(long res, long arg0, long arg1, + long arg2, long arg3); +void __sanitizer_syscall_pre_impl_getsockname(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_getsockname(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_getpeername(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_getpeername(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_send(long arg0, long arg1, long arg2, + long arg3); +void __sanitizer_syscall_post_impl_send(long res, long arg0, long arg1, + long arg2, long arg3); +void __sanitizer_syscall_pre_impl_sendto(long arg0, long arg1, long arg2, + long arg3, long arg4, long arg5); +void __sanitizer_syscall_post_impl_sendto(long res, long arg0, long arg1, + long arg2, long arg3, long arg4, + long arg5); +void __sanitizer_syscall_pre_impl_sendmsg(long fd, long msg, long flags); +void __sanitizer_syscall_post_impl_sendmsg(long res, long fd, long msg, + long flags); +void __sanitizer_syscall_pre_impl_sendmmsg(long fd, long msg, long vlen, + long flags); +void __sanitizer_syscall_post_impl_sendmmsg(long res, long fd, long msg, + long vlen, long flags); +void __sanitizer_syscall_pre_impl_recv(long arg0, long arg1, long arg2, + long arg3); +void __sanitizer_syscall_post_impl_recv(long res, long arg0, long arg1, + long arg2, long arg3); +void __sanitizer_syscall_pre_impl_recvfrom(long arg0, long arg1, long arg2, + long arg3, long arg4, long arg5); +void __sanitizer_syscall_post_impl_recvfrom(long res, long arg0, long arg1, + long arg2, long arg3, long arg4, + long arg5); +void __sanitizer_syscall_pre_impl_recvmsg(long fd, long msg, long flags); +void __sanitizer_syscall_post_impl_recvmsg(long res, long fd, long msg, + long flags); +void __sanitizer_syscall_pre_impl_recvmmsg(long fd, long msg, long vlen, + long flags, long timeout); +void __sanitizer_syscall_post_impl_recvmmsg(long res, long fd, long msg, + long vlen, long flags, + long timeout); +void __sanitizer_syscall_pre_impl_socket(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_socket(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_socketpair(long arg0, long arg1, long arg2, + long arg3); +void __sanitizer_syscall_post_impl_socketpair(long res, long arg0, long arg1, + long arg2, long arg3); +void __sanitizer_syscall_pre_impl_socketcall(long call, long args); +void __sanitizer_syscall_post_impl_socketcall(long res, long call, long args); +void __sanitizer_syscall_pre_impl_listen(long arg0, long arg1); +void __sanitizer_syscall_post_impl_listen(long res, long arg0, long arg1); +void __sanitizer_syscall_pre_impl_poll(long ufds, long nfds, long timeout); +void __sanitizer_syscall_post_impl_poll(long res, long ufds, long nfds, + long timeout); +void __sanitizer_syscall_pre_impl_select(long n, long inp, long outp, long exp, + long tvp); +void __sanitizer_syscall_post_impl_select(long res, long n, long inp, long outp, + long exp, long tvp); +void __sanitizer_syscall_pre_impl_old_select(long arg); +void __sanitizer_syscall_post_impl_old_select(long res, long arg); +void __sanitizer_syscall_pre_impl_epoll_create(long size); +void __sanitizer_syscall_post_impl_epoll_create(long res, long size); +void __sanitizer_syscall_pre_impl_epoll_create1(long flags); +void __sanitizer_syscall_post_impl_epoll_create1(long res, long flags); +void __sanitizer_syscall_pre_impl_epoll_ctl(long epfd, long op, long fd, + long event); +void __sanitizer_syscall_post_impl_epoll_ctl(long res, long epfd, long op, + long fd, long event); +void __sanitizer_syscall_pre_impl_epoll_wait(long epfd, long events, + long maxevents, long timeout); +void __sanitizer_syscall_post_impl_epoll_wait(long res, long epfd, long events, + long maxevents, long timeout); +void __sanitizer_syscall_pre_impl_epoll_pwait(long epfd, long events, + long maxevents, long timeout, + long sigmask, long sigsetsize); +void __sanitizer_syscall_post_impl_epoll_pwait(long res, long epfd, long events, + long maxevents, long timeout, + long sigmask, long sigsetsize); +void __sanitizer_syscall_pre_impl_epoll_pwait2(long epfd, long events, + long maxevents, long timeout, + long sigmask, long sigsetsize); +void __sanitizer_syscall_post_impl_epoll_pwait2(long res, long epfd, + long events, long maxevents, + long timeout, long sigmask, + long sigsetsize); +void __sanitizer_syscall_pre_impl_gethostname(long name, long len); +void __sanitizer_syscall_post_impl_gethostname(long res, long name, long len); +void __sanitizer_syscall_pre_impl_sethostname(long name, long len); +void __sanitizer_syscall_post_impl_sethostname(long res, long name, long len); +void __sanitizer_syscall_pre_impl_setdomainname(long name, long len); +void __sanitizer_syscall_post_impl_setdomainname(long res, long name, long len); +void __sanitizer_syscall_pre_impl_newuname(long name); +void __sanitizer_syscall_post_impl_newuname(long res, long name); +void __sanitizer_syscall_pre_impl_uname(long arg0); +void __sanitizer_syscall_post_impl_uname(long res, long arg0); +void __sanitizer_syscall_pre_impl_olduname(long arg0); +void __sanitizer_syscall_post_impl_olduname(long res, long arg0); +void __sanitizer_syscall_pre_impl_getrlimit(long resource, long rlim); +void __sanitizer_syscall_post_impl_getrlimit(long res, long resource, + long rlim); +void __sanitizer_syscall_pre_impl_old_getrlimit(long resource, long rlim); +void __sanitizer_syscall_post_impl_old_getrlimit(long res, long resource, + long rlim); +void __sanitizer_syscall_pre_impl_setrlimit(long resource, long rlim); +void __sanitizer_syscall_post_impl_setrlimit(long res, long resource, + long rlim); +void __sanitizer_syscall_pre_impl_prlimit64(long pid, long resource, + long new_rlim, long old_rlim); +void __sanitizer_syscall_post_impl_prlimit64(long res, long pid, long resource, + long new_rlim, long old_rlim); +void __sanitizer_syscall_pre_impl_getrusage(long who, long ru); +void __sanitizer_syscall_post_impl_getrusage(long res, long who, long ru); +void __sanitizer_syscall_pre_impl_umask(long mask); +void __sanitizer_syscall_post_impl_umask(long res, long mask); +void __sanitizer_syscall_pre_impl_msgget(long key, long msgflg); +void __sanitizer_syscall_post_impl_msgget(long res, long key, long msgflg); +void __sanitizer_syscall_pre_impl_msgsnd(long msqid, long msgp, long msgsz, + long msgflg); +void __sanitizer_syscall_post_impl_msgsnd(long res, long msqid, long msgp, + long msgsz, long msgflg); +void __sanitizer_syscall_pre_impl_msgrcv(long msqid, long msgp, long msgsz, + long msgtyp, long msgflg); +void __sanitizer_syscall_post_impl_msgrcv(long res, long msqid, long msgp, + long msgsz, long msgtyp, long msgflg); +void __sanitizer_syscall_pre_impl_msgctl(long msqid, long cmd, long buf); +void __sanitizer_syscall_post_impl_msgctl(long res, long msqid, long cmd, + long buf); +void __sanitizer_syscall_pre_impl_semget(long key, long nsems, long semflg); +void __sanitizer_syscall_post_impl_semget(long res, long key, long nsems, + long semflg); +void __sanitizer_syscall_pre_impl_semop(long semid, long sops, long nsops); +void __sanitizer_syscall_post_impl_semop(long res, long semid, long sops, + long nsops); +void __sanitizer_syscall_pre_impl_semctl(long semid, long semnum, long cmd, + long arg); +void __sanitizer_syscall_post_impl_semctl(long res, long semid, long semnum, + long cmd, long arg); +void __sanitizer_syscall_pre_impl_semtimedop(long semid, long sops, long nsops, + long timeout); +void __sanitizer_syscall_post_impl_semtimedop(long res, long semid, long sops, + long nsops, long timeout); +void __sanitizer_syscall_pre_impl_shmat(long shmid, long shmaddr, long shmflg); +void __sanitizer_syscall_post_impl_shmat(long res, long shmid, long shmaddr, + long shmflg); +void __sanitizer_syscall_pre_impl_shmget(long key, long size, long flag); +void __sanitizer_syscall_post_impl_shmget(long res, long key, long size, + long flag); +void __sanitizer_syscall_pre_impl_shmdt(long shmaddr); +void __sanitizer_syscall_post_impl_shmdt(long res, long shmaddr); +void __sanitizer_syscall_pre_impl_shmctl(long shmid, long cmd, long buf); +void __sanitizer_syscall_post_impl_shmctl(long res, long shmid, long cmd, + long buf); +void __sanitizer_syscall_pre_impl_ipc(long call, long first, long second, + long third, long ptr, long fifth); +void __sanitizer_syscall_post_impl_ipc(long res, long call, long first, + long second, long third, long ptr, + long fifth); +void __sanitizer_syscall_pre_impl_mq_open(long name, long oflag, long mode, + long attr); +void __sanitizer_syscall_post_impl_mq_open(long res, long name, long oflag, + long mode, long attr); +void __sanitizer_syscall_pre_impl_mq_unlink(long name); +void __sanitizer_syscall_post_impl_mq_unlink(long res, long name); +void __sanitizer_syscall_pre_impl_mq_timedsend(long mqdes, long msg_ptr, + long msg_len, long msg_prio, + long abs_timeout); +void __sanitizer_syscall_post_impl_mq_timedsend(long res, long mqdes, + long msg_ptr, long msg_len, + long msg_prio, + long abs_timeout); +void __sanitizer_syscall_pre_impl_mq_timedreceive(long mqdes, long msg_ptr, + long msg_len, long msg_prio, + long abs_timeout); +void __sanitizer_syscall_post_impl_mq_timedreceive(long res, long mqdes, + long msg_ptr, long msg_len, + long msg_prio, + long abs_timeout); +void __sanitizer_syscall_pre_impl_mq_notify(long mqdes, long notification); +void __sanitizer_syscall_post_impl_mq_notify(long res, long mqdes, + long notification); +void __sanitizer_syscall_pre_impl_mq_getsetattr(long mqdes, long mqstat, + long omqstat); +void __sanitizer_syscall_post_impl_mq_getsetattr(long res, long mqdes, + long mqstat, long omqstat); +void __sanitizer_syscall_pre_impl_pciconfig_iobase(long which, long bus, + long devfn); +void __sanitizer_syscall_post_impl_pciconfig_iobase(long res, long which, + long bus, long devfn); +void __sanitizer_syscall_pre_impl_pciconfig_read(long bus, long dfn, long off, + long len, long buf); +void __sanitizer_syscall_post_impl_pciconfig_read(long res, long bus, long dfn, + long off, long len, long buf); +void __sanitizer_syscall_pre_impl_pciconfig_write(long bus, long dfn, long off, + long len, long buf); +void __sanitizer_syscall_post_impl_pciconfig_write(long res, long bus, long dfn, + long off, long len, + long buf); +void __sanitizer_syscall_pre_impl_swapon(long specialfile, long swap_flags); +void __sanitizer_syscall_post_impl_swapon(long res, long specialfile, + long swap_flags); +void __sanitizer_syscall_pre_impl_swapoff(long specialfile); +void __sanitizer_syscall_post_impl_swapoff(long res, long specialfile); +void __sanitizer_syscall_pre_impl_sysctl(long args); +void __sanitizer_syscall_post_impl_sysctl(long res, long args); +void __sanitizer_syscall_pre_impl_sysinfo(long info); +void __sanitizer_syscall_post_impl_sysinfo(long res, long info); +void __sanitizer_syscall_pre_impl_sysfs(long option, long arg1, long arg2); +void __sanitizer_syscall_post_impl_sysfs(long res, long option, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_syslog(long type, long buf, long len); +void __sanitizer_syscall_post_impl_syslog(long res, long type, long buf, + long len); +void __sanitizer_syscall_pre_impl_uselib(long library); +void __sanitizer_syscall_post_impl_uselib(long res, long library); +void __sanitizer_syscall_pre_impl_ni_syscall(); +void __sanitizer_syscall_post_impl_ni_syscall(long res); +void __sanitizer_syscall_pre_impl_ptrace(long request, long pid, long addr, + long data); +void __sanitizer_syscall_post_impl_ptrace(long res, long request, long pid, + long addr, long data); +void __sanitizer_syscall_pre_impl_add_key(long _type, long _description, + long _payload, long plen, + long destringid); +void __sanitizer_syscall_post_impl_add_key(long res, long _type, + long _description, long _payload, + long plen, long destringid); +void __sanitizer_syscall_pre_impl_request_key(long _type, long _description, + long _callout_info, + long destringid); +void __sanitizer_syscall_post_impl_request_key(long res, long _type, + long _description, + long _callout_info, + long destringid); +void __sanitizer_syscall_pre_impl_keyctl(long cmd, long arg2, long arg3, + long arg4, long arg5); +void __sanitizer_syscall_post_impl_keyctl(long res, long cmd, long arg2, + long arg3, long arg4, long arg5); +void __sanitizer_syscall_pre_impl_ioprio_set(long which, long who, long ioprio); +void __sanitizer_syscall_post_impl_ioprio_set(long res, long which, long who, + long ioprio); +void __sanitizer_syscall_pre_impl_ioprio_get(long which, long who); +void __sanitizer_syscall_post_impl_ioprio_get(long res, long which, long who); +void __sanitizer_syscall_pre_impl_set_mempolicy(long mode, long nmask, + long maxnode); +void __sanitizer_syscall_post_impl_set_mempolicy(long res, long mode, + long nmask, long maxnode); +void __sanitizer_syscall_pre_impl_migrate_pages(long pid, long maxnode, + long from, long to); +void __sanitizer_syscall_post_impl_migrate_pages(long res, long pid, + long maxnode, long from, + long to); +void __sanitizer_syscall_pre_impl_move_pages(long pid, long nr_pages, + long pages, long nodes, + long status, long flags); +void __sanitizer_syscall_post_impl_move_pages(long res, long pid, long nr_pages, + long pages, long nodes, + long status, long flags); +void __sanitizer_syscall_pre_impl_mbind(long start, long len, long mode, + long nmask, long maxnode, long flags); +void __sanitizer_syscall_post_impl_mbind(long res, long start, long len, + long mode, long nmask, long maxnode, + long flags); +void __sanitizer_syscall_pre_impl_get_mempolicy(long policy, long nmask, + long maxnode, long addr, + long flags); +void __sanitizer_syscall_post_impl_get_mempolicy(long res, long policy, + long nmask, long maxnode, + long addr, long flags); +void __sanitizer_syscall_pre_impl_inotify_init(); +void __sanitizer_syscall_post_impl_inotify_init(long res); +void __sanitizer_syscall_pre_impl_inotify_init1(long flags); +void __sanitizer_syscall_post_impl_inotify_init1(long res, long flags); +void __sanitizer_syscall_pre_impl_inotify_add_watch(long fd, long path, + long mask); +void __sanitizer_syscall_post_impl_inotify_add_watch(long res, long fd, + long path, long mask); +void __sanitizer_syscall_pre_impl_inotify_rm_watch(long fd, long wd); +void __sanitizer_syscall_post_impl_inotify_rm_watch(long res, long fd, long wd); +void __sanitizer_syscall_pre_impl_spu_run(long fd, long unpc, long ustatus); +void __sanitizer_syscall_post_impl_spu_run(long res, long fd, long unpc, + long ustatus); +void __sanitizer_syscall_pre_impl_spu_create(long name, long flags, long mode, + long fd); +void __sanitizer_syscall_post_impl_spu_create(long res, long name, long flags, + long mode, long fd); +void __sanitizer_syscall_pre_impl_mknodat(long dfd, long filename, long mode, + long dev); +void __sanitizer_syscall_post_impl_mknodat(long res, long dfd, long filename, + long mode, long dev); +void __sanitizer_syscall_pre_impl_mkdirat(long dfd, long pathname, long mode); +void __sanitizer_syscall_post_impl_mkdirat(long res, long dfd, long pathname, + long mode); +void __sanitizer_syscall_pre_impl_unlinkat(long dfd, long pathname, long flag); +void __sanitizer_syscall_post_impl_unlinkat(long res, long dfd, long pathname, + long flag); +void __sanitizer_syscall_pre_impl_symlinkat(long oldname, long newdfd, + long newname); +void __sanitizer_syscall_post_impl_symlinkat(long res, long oldname, + long newdfd, long newname); +void __sanitizer_syscall_pre_impl_linkat(long olddfd, long oldname, long newdfd, + long newname, long flags); +void __sanitizer_syscall_post_impl_linkat(long res, long olddfd, long oldname, + long newdfd, long newname, + long flags); +void __sanitizer_syscall_pre_impl_renameat(long olddfd, long oldname, + long newdfd, long newname); +void __sanitizer_syscall_post_impl_renameat(long res, long olddfd, long oldname, + long newdfd, long newname); +void __sanitizer_syscall_pre_impl_futimesat(long dfd, long filename, + long utimes); +void __sanitizer_syscall_post_impl_futimesat(long res, long dfd, long filename, + long utimes); +void __sanitizer_syscall_pre_impl_faccessat(long dfd, long filename, long mode); +void __sanitizer_syscall_post_impl_faccessat(long res, long dfd, long filename, + long mode); +void __sanitizer_syscall_pre_impl_fchmodat(long dfd, long filename, long mode); +void __sanitizer_syscall_post_impl_fchmodat(long res, long dfd, long filename, + long mode); +void __sanitizer_syscall_pre_impl_fchownat(long dfd, long filename, long user, + long group, long flag); +void __sanitizer_syscall_post_impl_fchownat(long res, long dfd, long filename, + long user, long group, long flag); +void __sanitizer_syscall_pre_impl_openat(long dfd, long filename, long flags, + long mode); +void __sanitizer_syscall_post_impl_openat(long res, long dfd, long filename, + long flags, long mode); +void __sanitizer_syscall_pre_impl_newfstatat(long dfd, long filename, + long statbuf, long flag); +void __sanitizer_syscall_post_impl_newfstatat(long res, long dfd, long filename, + long statbuf, long flag); +void __sanitizer_syscall_pre_impl_fstatat64(long dfd, long filename, + long statbuf, long flag); +void __sanitizer_syscall_post_impl_fstatat64(long res, long dfd, long filename, + long statbuf, long flag); +void __sanitizer_syscall_pre_impl_readlinkat(long dfd, long path, long buf, + long bufsiz); +void __sanitizer_syscall_post_impl_readlinkat(long res, long dfd, long path, + long buf, long bufsiz); +void __sanitizer_syscall_pre_impl_utimensat(long dfd, long filename, + long utimes, long flags); +void __sanitizer_syscall_post_impl_utimensat(long res, long dfd, long filename, + long utimes, long flags); +void __sanitizer_syscall_pre_impl_unshare(long unshare_flags); +void __sanitizer_syscall_post_impl_unshare(long res, long unshare_flags); +void __sanitizer_syscall_pre_impl_splice(long fd_in, long off_in, long fd_out, + long off_out, long len, long flags); +void __sanitizer_syscall_post_impl_splice(long res, long fd_in, long off_in, + long fd_out, long off_out, long len, + long flags); +void __sanitizer_syscall_pre_impl_vmsplice(long fd, long iov, long nr_segs, + long flags); +void __sanitizer_syscall_post_impl_vmsplice(long res, long fd, long iov, + long nr_segs, long flags); +void __sanitizer_syscall_pre_impl_tee(long fdin, long fdout, long len, + long flags); +void __sanitizer_syscall_post_impl_tee(long res, long fdin, long fdout, + long len, long flags); +void __sanitizer_syscall_pre_impl_get_robust_list(long pid, long head_ptr, + long len_ptr); +void __sanitizer_syscall_post_impl_get_robust_list(long res, long pid, + long head_ptr, long len_ptr); +void __sanitizer_syscall_pre_impl_set_robust_list(long head, long len); +void __sanitizer_syscall_post_impl_set_robust_list(long res, long head, + long len); +void __sanitizer_syscall_pre_impl_getcpu(long cpu, long node, long cache); +void __sanitizer_syscall_post_impl_getcpu(long res, long cpu, long node, + long cache); +void __sanitizer_syscall_pre_impl_signalfd(long ufd, long user_mask, + long sizemask); +void __sanitizer_syscall_post_impl_signalfd(long res, long ufd, long user_mask, + long sizemask); +void __sanitizer_syscall_pre_impl_signalfd4(long ufd, long user_mask, + long sizemask, long flags); +void __sanitizer_syscall_post_impl_signalfd4(long res, long ufd, long user_mask, + long sizemask, long flags); +void __sanitizer_syscall_pre_impl_timerfd_create(long clockid, long flags); +void __sanitizer_syscall_post_impl_timerfd_create(long res, long clockid, + long flags); +void __sanitizer_syscall_pre_impl_timerfd_settime(long ufd, long flags, + long utmr, long otmr); +void __sanitizer_syscall_post_impl_timerfd_settime(long res, long ufd, + long flags, long utmr, + long otmr); +void __sanitizer_syscall_pre_impl_timerfd_gettime(long ufd, long otmr); +void __sanitizer_syscall_post_impl_timerfd_gettime(long res, long ufd, + long otmr); +void __sanitizer_syscall_pre_impl_eventfd(long count); +void __sanitizer_syscall_post_impl_eventfd(long res, long count); +void __sanitizer_syscall_pre_impl_eventfd2(long count, long flags); +void __sanitizer_syscall_post_impl_eventfd2(long res, long count, long flags); +void __sanitizer_syscall_pre_impl_old_readdir(long arg0, long arg1, long arg2); +void __sanitizer_syscall_post_impl_old_readdir(long res, long arg0, long arg1, + long arg2); +void __sanitizer_syscall_pre_impl_pselect6(long arg0, long arg1, long arg2, + long arg3, long arg4, long arg5); +void __sanitizer_syscall_post_impl_pselect6(long res, long arg0, long arg1, + long arg2, long arg3, long arg4, + long arg5); +void __sanitizer_syscall_pre_impl_ppoll(long arg0, long arg1, long arg2, + long arg3, long arg4); +void __sanitizer_syscall_post_impl_ppoll(long res, long arg0, long arg1, + long arg2, long arg3, long arg4); +void __sanitizer_syscall_pre_impl_fanotify_init(long flags, long event_f_flags); +void __sanitizer_syscall_post_impl_fanotify_init(long res, long flags, + long event_f_flags); +void __sanitizer_syscall_pre_impl_fanotify_mark(long fanotify_fd, long flags, + long mask, long fd, + long pathname); +void __sanitizer_syscall_post_impl_fanotify_mark(long res, long fanotify_fd, + long flags, long mask, long fd, + long pathname); +void __sanitizer_syscall_pre_impl_syncfs(long fd); +void __sanitizer_syscall_post_impl_syncfs(long res, long fd); +void __sanitizer_syscall_pre_impl_perf_event_open(long attr_uptr, long pid, + long cpu, long group_fd, + long flags); +void __sanitizer_syscall_post_impl_perf_event_open(long res, long attr_uptr, + long pid, long cpu, + long group_fd, long flags); +void __sanitizer_syscall_pre_impl_mmap_pgoff(long addr, long len, long prot, + long flags, long fd, long pgoff); +void __sanitizer_syscall_post_impl_mmap_pgoff(long res, long addr, long len, + long prot, long flags, long fd, + long pgoff); +void __sanitizer_syscall_pre_impl_old_mmap(long arg); +void __sanitizer_syscall_post_impl_old_mmap(long res, long arg); +void __sanitizer_syscall_pre_impl_name_to_handle_at(long dfd, long name, + long handle, long mnt_id, + long flag); +void __sanitizer_syscall_post_impl_name_to_handle_at(long res, long dfd, + long name, long handle, + long mnt_id, long flag); +void __sanitizer_syscall_pre_impl_open_by_handle_at(long mountdirfd, + long handle, long flags); +void __sanitizer_syscall_post_impl_open_by_handle_at(long res, long mountdirfd, + long handle, long flags); +void __sanitizer_syscall_pre_impl_setns(long fd, long nstype); +void __sanitizer_syscall_post_impl_setns(long res, long fd, long nstype); +void __sanitizer_syscall_pre_impl_process_vm_readv(long pid, long lvec, + long liovcnt, long rvec, + long riovcnt, long flags); +void __sanitizer_syscall_post_impl_process_vm_readv(long res, long pid, + long lvec, long liovcnt, + long rvec, long riovcnt, + long flags); +void __sanitizer_syscall_pre_impl_process_vm_writev(long pid, long lvec, + long liovcnt, long rvec, + long riovcnt, long flags); +void __sanitizer_syscall_post_impl_process_vm_writev(long res, long pid, + long lvec, long liovcnt, + long rvec, long riovcnt, + long flags); +void __sanitizer_syscall_pre_impl_fork(); +void __sanitizer_syscall_post_impl_fork(long res); +void __sanitizer_syscall_pre_impl_vfork(); +void __sanitizer_syscall_post_impl_vfork(long res); +void __sanitizer_syscall_pre_impl_sigaction(long signum, long act, long oldact); +void __sanitizer_syscall_post_impl_sigaction(long res, long signum, long act, + long oldact); +void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act, + long oldact, long sz); +void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act, + long oldact, long sz); +void __sanitizer_syscall_pre_impl_sigaltstack(long ss, long oss); +void __sanitizer_syscall_post_impl_sigaltstack(long res, long ss, long oss); +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_LINUX_SYSCALL_HOOKS_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/lsan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/lsan_interface.h new file mode 100644 index 0000000..2bb9926 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/lsan_interface.h @@ -0,0 +1,89 @@ +//===-- sanitizer/lsan_interface.h ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_LSAN_INTERFACE_H +#define SANITIZER_LSAN_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + // Allocations made between calls to __lsan_disable() and __lsan_enable() will + // be treated as non-leaks. Disable/enable pairs may be nested. + void __lsan_disable(void); + void __lsan_enable(void); + + // The heap object into which p points will be treated as a non-leak. + void __lsan_ignore_object(const void *p); + + // Memory regions registered through this interface will be treated as sources + // of live pointers during leak checking. Useful if you store pointers in + // mapped memory. + // Points of note: + // - __lsan_unregister_root_region() must be called with the same pointer and + // size that have earlier been passed to __lsan_register_root_region() + // - LSan will skip any inaccessible memory when scanning a root region. E.g., + // if you map memory within a larger region that you have mprotect'ed, you can + // register the entire large region. + // - the implementation is not optimized for performance. This interface is + // intended to be used for a small number of relatively static regions. + void __lsan_register_root_region(const void *p, size_t size); + void __lsan_unregister_root_region(const void *p, size_t size); + + // Check for leaks now. This function behaves identically to the default + // end-of-process leak check. In particular, it will terminate the process if + // leaks are found and the exitcode runtime flag is non-zero. + // Subsequent calls to this function will have no effect and end-of-process + // leak check will not run. Effectively, end-of-process leak check is moved to + // the time of first invocation of this function. + // By calling this function early during process shutdown, you can instruct + // LSan to ignore shutdown-only leaks which happen later on. + void __lsan_do_leak_check(void); + + // Check for leaks now. Returns zero if no leaks have been found or if leak + // detection is disabled, non-zero otherwise. + // This function may be called repeatedly, e.g. to periodically check a + // long-running process. It prints a leak report if appropriate, but does not + // terminate the process. It does not affect the behavior of + // __lsan_do_leak_check() or the end-of-process leak check, and is not + // affected by them. + int __lsan_do_recoverable_leak_check(void); + + // The user may optionally provide this function to disallow leak checking + // for the program it is linked into (if the return value is non-zero). This + // function must be defined as returning a constant value; any behavior beyond + // that is unsupported. + // To avoid dead stripping, you may need to define this function with + // __attribute__((used)) + int __lsan_is_turned_off(void); + + // This function may be optionally provided by user and should return + // a string containing LSan runtime options. See lsan_flags.inc for details. + const char *__lsan_default_options(void); + + // This function may be optionally provided by the user and should return + // a string containing LSan suppressions. + const char *__lsan_default_suppressions(void); +#ifdef __cplusplus +} // extern "C" + +namespace __lsan { +class ScopedDisabler { + public: + ScopedDisabler() { __lsan_disable(); } + ~ScopedDisabler() { __lsan_enable(); } +}; +} // namespace __lsan +#endif + +#endif // SANITIZER_LSAN_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/msan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/msan_interface.h new file mode 100644 index 0000000..eeb39fb --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/msan_interface.h @@ -0,0 +1,124 @@ +//===-- msan_interface.h --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of MemorySanitizer. +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef MSAN_INTERFACE_H +#define MSAN_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + /* Set raw origin for the memory range. */ + void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin); + + /* Get raw origin for an address. */ + uint32_t __msan_get_origin(const volatile void *a); + + /* Test that this_id is a descendant of prev_id (or they are simply equal). + * "descendant" here means they are part of the same chain, created with + * __msan_chain_origin. */ + int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id); + + /* Returns non-zero if tracking origins. */ + int __msan_get_track_origins(void); + + /* Returns the origin id of the latest UMR in the calling thread. */ + uint32_t __msan_get_umr_origin(void); + + /* Make memory region fully initialized (without changing its contents). */ + void __msan_unpoison(const volatile void *a, size_t size); + + /* Make a null-terminated string fully initialized (without changing its + contents). */ + void __msan_unpoison_string(const volatile char *a); + + /* Make first n parameters of the next function call fully initialized. */ + void __msan_unpoison_param(size_t n); + + /* Make memory region fully uninitialized (without changing its contents). + This is a legacy interface that does not update origin information. Use + __msan_allocated_memory() instead. */ + void __msan_poison(const volatile void *a, size_t size); + + /* Make memory region partially uninitialized (without changing its contents). + */ + void __msan_partial_poison(const volatile void *data, void *shadow, + size_t size); + + /* Returns the offset of the first (at least partially) poisoned byte in the + memory range, or -1 if the whole range is good. */ + intptr_t __msan_test_shadow(const volatile void *x, size_t size); + + /* Checks that memory range is fully initialized, and reports an error if it + * is not. */ + void __msan_check_mem_is_initialized(const volatile void *x, size_t size); + + /* For testing: + __msan_set_expect_umr(1); + ... some buggy code ... + __msan_set_expect_umr(0); + The last line will verify that a UMR happened. */ + void __msan_set_expect_umr(int expect_umr); + + /* Change the value of keep_going flag. Non-zero value means don't terminate + program execution when an error is detected. This will not affect error in + modules that were compiled without the corresponding compiler flag. */ + void __msan_set_keep_going(int keep_going); + + /* Print shadow and origin for the memory range to stderr in a human-readable + format. */ + void __msan_print_shadow(const volatile void *x, size_t size); + + /* Print shadow for the memory range to stderr in a minimalistic + human-readable format. */ + void __msan_dump_shadow(const volatile void *x, size_t size); + + /* Returns true if running under a dynamic tool (DynamoRio-based). */ + int __msan_has_dynamic_component(void); + + /* Tell MSan about newly allocated memory (ex.: custom allocator). + Memory will be marked uninitialized, with origin at the call site. */ + void __msan_allocated_memory(const volatile void* data, size_t size); + + /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */ + void __sanitizer_dtor_callback(const volatile void* data, size_t size); + + /* This function may be optionally provided by user and should return + a string containing Msan runtime options. See msan_flags.h for details. */ + const char* __msan_default_options(void); + + /* Deprecated. Call __sanitizer_set_death_callback instead. */ + void __msan_set_death_callback(void (*callback)(void)); + + /* Update shadow for the application copy of size bytes from src to dst. + Src and dst are application addresses. This function does not copy the + actual application memory, it only updates shadow and origin for such + copy. Source and destination regions can overlap. */ + void __msan_copy_shadow(const volatile void *dst, const volatile void *src, + size_t size); + + /* Disables uninitialized memory checks in interceptors. */ + void __msan_scoped_disable_interceptor_checks(void); + + /* Re-enables uninitialized memory checks in interceptors after a previous + call to __msan_scoped_disable_interceptor_checks. */ + void __msan_scoped_enable_interceptor_checks(void); + + void __msan_start_switch_fiber(const void *bottom, size_t size); + void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/netbsd_syscall_hooks.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/netbsd_syscall_hooks.h new file mode 100644 index 0000000..f661152 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/netbsd_syscall_hooks.h @@ -0,0 +1,5005 @@ +//===-- netbsd_syscall_hooks.h --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of public sanitizer interface. +// +// System call handlers. +// +// Interface methods declared in this header implement pre- and post- syscall +// actions for the active sanitizer. +// Usage: +// __sanitizer_syscall_pre_getfoo(...args...); +// long long res = syscall(SYS_getfoo, ...args...); +// __sanitizer_syscall_post_getfoo(res, ...args...); +// +// DO NOT EDIT! THIS FILE HAS BEEN GENERATED! +// +// Generated with: generate_netbsd_syscalls.awk +// Generated date: 2020-09-10 +// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H +#define SANITIZER_NETBSD_SYSCALL_HOOKS_H + +#define __sanitizer_syscall_pre_syscall(code, arg0, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) \ + __sanitizer_syscall_pre_impl_syscall( \ + (long long)(code), (long long)(arg0), (long long)(arg1), \ + (long long)(arg2), (long long)(arg3), (long long)(arg4), \ + (long long)(arg5), (long long)(arg6), (long long)(arg7)) +#define __sanitizer_syscall_post_syscall(res, code, arg0, arg1, arg2, arg3, \ + arg4, arg5, arg6, arg7) \ + __sanitizer_syscall_post_impl_syscall( \ + res, (long long)(code), (long long)(arg0), (long long)(arg1), \ + (long long)(arg2), (long long)(arg3), (long long)(arg4), \ + (long long)(arg5), (long long)(arg6), (long long)(arg7)) +#define __sanitizer_syscall_pre_exit(rval) \ + __sanitizer_syscall_pre_impl_exit((long long)(rval)) +#define __sanitizer_syscall_post_exit(res, rval) \ + __sanitizer_syscall_post_impl_exit(res, (long long)(rval)) +#define __sanitizer_syscall_pre_fork() __sanitizer_syscall_pre_impl_fork() +#define __sanitizer_syscall_post_fork(res) \ + __sanitizer_syscall_post_impl_fork(res) +#define __sanitizer_syscall_pre_read(fd, buf, nbyte) \ + __sanitizer_syscall_pre_impl_read((long long)(fd), (long long)(buf), \ + (long long)(nbyte)) +#define __sanitizer_syscall_post_read(res, fd, buf, nbyte) \ + __sanitizer_syscall_post_impl_read(res, (long long)(fd), (long long)(buf), \ + (long long)(nbyte)) +#define __sanitizer_syscall_pre_write(fd, buf, nbyte) \ + __sanitizer_syscall_pre_impl_write((long long)(fd), (long long)(buf), \ + (long long)(nbyte)) +#define __sanitizer_syscall_post_write(res, fd, buf, nbyte) \ + __sanitizer_syscall_post_impl_write(res, (long long)(fd), (long long)(buf), \ + (long long)(nbyte)) +#define __sanitizer_syscall_pre_open(path, flags, mode) \ + __sanitizer_syscall_pre_impl_open((long long)(path), (long long)(flags), \ + (long long)(mode)) +#define __sanitizer_syscall_post_open(res, path, flags, mode) \ + __sanitizer_syscall_post_impl_open(res, (long long)(path), \ + (long long)(flags), (long long)(mode)) +#define __sanitizer_syscall_pre_close(fd) \ + __sanitizer_syscall_pre_impl_close((long long)(fd)) +#define __sanitizer_syscall_post_close(res, fd) \ + __sanitizer_syscall_post_impl_close(res, (long long)(fd)) +#define __sanitizer_syscall_pre_compat_50_wait4(pid, status, options, rusage) \ + __sanitizer_syscall_pre_impl_compat_50_wait4( \ + (long long)(pid), (long long)(status), (long long)(options), \ + (long long)(rusage)) +#define __sanitizer_syscall_post_compat_50_wait4(res, pid, status, options, \ + rusage) \ + __sanitizer_syscall_post_impl_compat_50_wait4( \ + res, (long long)(pid), (long long)(status), (long long)(options), \ + (long long)(rusage)) +#define __sanitizer_syscall_pre_compat_43_ocreat(path, mode) \ + __sanitizer_syscall_pre_impl_compat_43_ocreat((long long)(path), \ + (long long)(mode)) +#define __sanitizer_syscall_post_compat_43_ocreat(res, path, mode) \ + __sanitizer_syscall_post_impl_compat_43_ocreat(res, (long long)(path), \ + (long long)(mode)) +#define __sanitizer_syscall_pre_link(path, link) \ + __sanitizer_syscall_pre_impl_link((long long)(path), (long long)(link)) +#define __sanitizer_syscall_post_link(res, path, link) \ + __sanitizer_syscall_post_impl_link(res, (long long)(path), (long long)(link)) +#define __sanitizer_syscall_pre_unlink(path) \ + __sanitizer_syscall_pre_impl_unlink((long long)(path)) +#define __sanitizer_syscall_post_unlink(res, path) \ + __sanitizer_syscall_post_impl_unlink(res, (long long)(path)) +/* syscall 11 has been skipped */ +#define __sanitizer_syscall_pre_chdir(path) \ + __sanitizer_syscall_pre_impl_chdir((long long)(path)) +#define __sanitizer_syscall_post_chdir(res, path) \ + __sanitizer_syscall_post_impl_chdir(res, (long long)(path)) +#define __sanitizer_syscall_pre_fchdir(fd) \ + __sanitizer_syscall_pre_impl_fchdir((long long)(fd)) +#define __sanitizer_syscall_post_fchdir(res, fd) \ + __sanitizer_syscall_post_impl_fchdir(res, (long long)(fd)) +#define __sanitizer_syscall_pre_compat_50_mknod(path, mode, dev) \ + __sanitizer_syscall_pre_impl_compat_50_mknod( \ + (long long)(path), (long long)(mode), (long long)(dev)) +#define __sanitizer_syscall_post_compat_50_mknod(res, path, mode, dev) \ + __sanitizer_syscall_post_impl_compat_50_mknod( \ + res, (long long)(path), (long long)(mode), (long long)(dev)) +#define __sanitizer_syscall_pre_chmod(path, mode) \ + __sanitizer_syscall_pre_impl_chmod((long long)(path), (long long)(mode)) +#define __sanitizer_syscall_post_chmod(res, path, mode) \ + __sanitizer_syscall_post_impl_chmod(res, (long long)(path), (long long)(mode)) +#define __sanitizer_syscall_pre_chown(path, uid, gid) \ + __sanitizer_syscall_pre_impl_chown((long long)(path), (long long)(uid), \ + (long long)(gid)) +#define __sanitizer_syscall_post_chown(res, path, uid, gid) \ + __sanitizer_syscall_post_impl_chown(res, (long long)(path), \ + (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_pre_break(nsize) \ + __sanitizer_syscall_pre_impl_break((long long)(nsize)) +#define __sanitizer_syscall_post_break(res, nsize) \ + __sanitizer_syscall_post_impl_break(res, (long long)(nsize)) +#define __sanitizer_syscall_pre_compat_20_getfsstat(buf, bufsize, flags) \ + __sanitizer_syscall_pre_impl_compat_20_getfsstat( \ + (long long)(buf), (long long)(bufsize), (long long)(flags)) +#define __sanitizer_syscall_post_compat_20_getfsstat(res, buf, bufsize, flags) \ + __sanitizer_syscall_post_impl_compat_20_getfsstat( \ + res, (long long)(buf), (long long)(bufsize), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_43_olseek(fd, offset, whence) \ + __sanitizer_syscall_pre_impl_compat_43_olseek( \ + (long long)(fd), (long long)(offset), (long long)(whence)) +#define __sanitizer_syscall_post_compat_43_olseek(res, fd, offset, whence) \ + __sanitizer_syscall_post_impl_compat_43_olseek( \ + res, (long long)(fd), (long long)(offset), (long long)(whence)) +#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid() +#define __sanitizer_syscall_post_getpid(res) \ + __sanitizer_syscall_post_impl_getpid(res) +#define __sanitizer_syscall_pre_compat_40_mount(type, path, flags, data) \ + __sanitizer_syscall_pre_impl_compat_40_mount( \ + (long long)(type), (long long)(path), (long long)(flags), \ + (long long)(data)) +#define __sanitizer_syscall_post_compat_40_mount(res, type, path, flags, data) \ + __sanitizer_syscall_post_impl_compat_40_mount( \ + res, (long long)(type), (long long)(path), (long long)(flags), \ + (long long)(data)) +#define __sanitizer_syscall_pre_unmount(path, flags) \ + __sanitizer_syscall_pre_impl_unmount((long long)(path), (long long)(flags)) +#define __sanitizer_syscall_post_unmount(res, path, flags) \ + __sanitizer_syscall_post_impl_unmount(res, (long long)(path), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_setuid(uid) \ + __sanitizer_syscall_pre_impl_setuid((long long)(uid)) +#define __sanitizer_syscall_post_setuid(res, uid) \ + __sanitizer_syscall_post_impl_setuid(res, (long long)(uid)) +#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid() +#define __sanitizer_syscall_post_getuid(res) \ + __sanitizer_syscall_post_impl_getuid(res) +#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid() +#define __sanitizer_syscall_post_geteuid(res) \ + __sanitizer_syscall_post_impl_geteuid(res) +#define __sanitizer_syscall_pre_ptrace(req, pid, addr, data) \ + __sanitizer_syscall_pre_impl_ptrace((long long)(req), (long long)(pid), \ + (long long)(addr), (long long)(data)) +#define __sanitizer_syscall_post_ptrace(res, req, pid, addr, data) \ + __sanitizer_syscall_post_impl_ptrace(res, (long long)(req), \ + (long long)(pid), (long long)(addr), \ + (long long)(data)) +#define __sanitizer_syscall_pre_recvmsg(s, msg, flags) \ + __sanitizer_syscall_pre_impl_recvmsg((long long)(s), (long long)(msg), \ + (long long)(flags)) +#define __sanitizer_syscall_post_recvmsg(res, s, msg, flags) \ + __sanitizer_syscall_post_impl_recvmsg(res, (long long)(s), (long long)(msg), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_sendmsg(s, msg, flags) \ + __sanitizer_syscall_pre_impl_sendmsg((long long)(s), (long long)(msg), \ + (long long)(flags)) +#define __sanitizer_syscall_post_sendmsg(res, s, msg, flags) \ + __sanitizer_syscall_post_impl_sendmsg(res, (long long)(s), (long long)(msg), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_recvfrom(s, buf, len, flags, from, \ + fromlenaddr) \ + __sanitizer_syscall_pre_impl_recvfrom( \ + (long long)(s), (long long)(buf), (long long)(len), (long long)(flags), \ + (long long)(from), (long long)(fromlenaddr)) +#define __sanitizer_syscall_post_recvfrom(res, s, buf, len, flags, from, \ + fromlenaddr) \ + __sanitizer_syscall_post_impl_recvfrom( \ + res, (long long)(s), (long long)(buf), (long long)(len), \ + (long long)(flags), (long long)(from), (long long)(fromlenaddr)) +#define __sanitizer_syscall_pre_accept(s, name, anamelen) \ + __sanitizer_syscall_pre_impl_accept((long long)(s), (long long)(name), \ + (long long)(anamelen)) +#define __sanitizer_syscall_post_accept(res, s, name, anamelen) \ + __sanitizer_syscall_post_impl_accept(res, (long long)(s), (long long)(name), \ + (long long)(anamelen)) +#define __sanitizer_syscall_pre_getpeername(fdes, asa, alen) \ + __sanitizer_syscall_pre_impl_getpeername( \ + (long long)(fdes), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_post_getpeername(res, fdes, asa, alen) \ + __sanitizer_syscall_post_impl_getpeername( \ + res, (long long)(fdes), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_pre_getsockname(fdes, asa, alen) \ + __sanitizer_syscall_pre_impl_getsockname( \ + (long long)(fdes), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_post_getsockname(res, fdes, asa, alen) \ + __sanitizer_syscall_post_impl_getsockname( \ + res, (long long)(fdes), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_pre_access(path, flags) \ + __sanitizer_syscall_pre_impl_access((long long)(path), (long long)(flags)) +#define __sanitizer_syscall_post_access(res, path, flags) \ + __sanitizer_syscall_post_impl_access(res, (long long)(path), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_chflags(path, flags) \ + __sanitizer_syscall_pre_impl_chflags((long long)(path), (long long)(flags)) +#define __sanitizer_syscall_post_chflags(res, path, flags) \ + __sanitizer_syscall_post_impl_chflags(res, (long long)(path), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_fchflags(fd, flags) \ + __sanitizer_syscall_pre_impl_fchflags((long long)(fd), (long long)(flags)) +#define __sanitizer_syscall_post_fchflags(res, fd, flags) \ + __sanitizer_syscall_post_impl_fchflags(res, (long long)(fd), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync() +#define __sanitizer_syscall_post_sync(res) \ + __sanitizer_syscall_post_impl_sync(res) +#define __sanitizer_syscall_pre_kill(pid, signum) \ + __sanitizer_syscall_pre_impl_kill((long long)(pid), (long long)(signum)) +#define __sanitizer_syscall_post_kill(res, pid, signum) \ + __sanitizer_syscall_post_impl_kill(res, (long long)(pid), (long long)(signum)) +#define __sanitizer_syscall_pre_compat_43_stat43(path, ub) \ + __sanitizer_syscall_pre_impl_compat_43_stat43((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_43_stat43(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_43_stat43(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid() +#define __sanitizer_syscall_post_getppid(res) \ + __sanitizer_syscall_post_impl_getppid(res) +#define __sanitizer_syscall_pre_compat_43_lstat43(path, ub) \ + __sanitizer_syscall_pre_impl_compat_43_lstat43((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_43_lstat43(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_43_lstat43(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_dup(fd) \ + __sanitizer_syscall_pre_impl_dup((long long)(fd)) +#define __sanitizer_syscall_post_dup(res, fd) \ + __sanitizer_syscall_post_impl_dup(res, (long long)(fd)) +#define __sanitizer_syscall_pre_pipe() __sanitizer_syscall_pre_impl_pipe() +#define __sanitizer_syscall_post_pipe(res) \ + __sanitizer_syscall_post_impl_pipe(res) +#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid() +#define __sanitizer_syscall_post_getegid(res) \ + __sanitizer_syscall_post_impl_getegid(res) +#define __sanitizer_syscall_pre_profil(samples, size, offset, scale) \ + __sanitizer_syscall_pre_impl_profil((long long)(samples), (long long)(size), \ + (long long)(offset), (long long)(scale)) +#define __sanitizer_syscall_post_profil(res, samples, size, offset, scale) \ + __sanitizer_syscall_post_impl_profil(res, (long long)(samples), \ + (long long)(size), (long long)(offset), \ + (long long)(scale)) +#define __sanitizer_syscall_pre_ktrace(fname, ops, facs, pid) \ + __sanitizer_syscall_pre_impl_ktrace((long long)(fname), (long long)(ops), \ + (long long)(facs), (long long)(pid)) +#define __sanitizer_syscall_post_ktrace(res, fname, ops, facs, pid) \ + __sanitizer_syscall_post_impl_ktrace(res, (long long)(fname), \ + (long long)(ops), (long long)(facs), \ + (long long)(pid)) +#define __sanitizer_syscall_pre_compat_13_sigaction13(signum, nsa, osa) \ + __sanitizer_syscall_pre_impl_compat_13_sigaction13( \ + (long long)(signum), (long long)(nsa), (long long)(osa)) +#define __sanitizer_syscall_post_compat_13_sigaction13(res, signum, nsa, osa) \ + __sanitizer_syscall_post_impl_compat_13_sigaction13( \ + res, (long long)(signum), (long long)(nsa), (long long)(osa)) +#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid() +#define __sanitizer_syscall_post_getgid(res) \ + __sanitizer_syscall_post_impl_getgid(res) +#define __sanitizer_syscall_pre_compat_13_sigprocmask13(how, mask) \ + __sanitizer_syscall_pre_impl_compat_13_sigprocmask13((long long)(how), \ + (long long)(mask)) +#define __sanitizer_syscall_post_compat_13_sigprocmask13(res, how, mask) \ + __sanitizer_syscall_post_impl_compat_13_sigprocmask13(res, (long long)(how), \ + (long long)(mask)) +#define __sanitizer_syscall_pre___getlogin(namebuf, namelen) \ + __sanitizer_syscall_pre_impl___getlogin((long long)(namebuf), \ + (long long)(namelen)) +#define __sanitizer_syscall_post___getlogin(res, namebuf, namelen) \ + __sanitizer_syscall_post_impl___getlogin(res, (long long)(namebuf), \ + (long long)(namelen)) +#define __sanitizer_syscall_pre___setlogin(namebuf) \ + __sanitizer_syscall_pre_impl___setlogin((long long)(namebuf)) +#define __sanitizer_syscall_post___setlogin(res, namebuf) \ + __sanitizer_syscall_post_impl___setlogin(res, (long long)(namebuf)) +#define __sanitizer_syscall_pre_acct(path) \ + __sanitizer_syscall_pre_impl_acct((long long)(path)) +#define __sanitizer_syscall_post_acct(res, path) \ + __sanitizer_syscall_post_impl_acct(res, (long long)(path)) +#define __sanitizer_syscall_pre_compat_13_sigpending13() \ + __sanitizer_syscall_pre_impl_compat_13_sigpending13() +#define __sanitizer_syscall_post_compat_13_sigpending13(res) \ + __sanitizer_syscall_post_impl_compat_13_sigpending13(res) +#define __sanitizer_syscall_pre_compat_13_sigaltstack13(nss, oss) \ + __sanitizer_syscall_pre_impl_compat_13_sigaltstack13((long long)(nss), \ + (long long)(oss)) +#define __sanitizer_syscall_post_compat_13_sigaltstack13(res, nss, oss) \ + __sanitizer_syscall_post_impl_compat_13_sigaltstack13(res, (long long)(nss), \ + (long long)(oss)) +#define __sanitizer_syscall_pre_ioctl(fd, com, data) \ + __sanitizer_syscall_pre_impl_ioctl((long long)(fd), (long long)(com), \ + (long long)(data)) +#define __sanitizer_syscall_post_ioctl(res, fd, com, data) \ + __sanitizer_syscall_post_impl_ioctl(res, (long long)(fd), (long long)(com), \ + (long long)(data)) +#define __sanitizer_syscall_pre_compat_12_oreboot(opt) \ + __sanitizer_syscall_pre_impl_compat_12_oreboot((long long)(opt)) +#define __sanitizer_syscall_post_compat_12_oreboot(res, opt) \ + __sanitizer_syscall_post_impl_compat_12_oreboot(res, (long long)(opt)) +#define __sanitizer_syscall_pre_revoke(path) \ + __sanitizer_syscall_pre_impl_revoke((long long)(path)) +#define __sanitizer_syscall_post_revoke(res, path) \ + __sanitizer_syscall_post_impl_revoke(res, (long long)(path)) +#define __sanitizer_syscall_pre_symlink(path, link) \ + __sanitizer_syscall_pre_impl_symlink((long long)(path), (long long)(link)) +#define __sanitizer_syscall_post_symlink(res, path, link) \ + __sanitizer_syscall_post_impl_symlink(res, (long long)(path), \ + (long long)(link)) +#define __sanitizer_syscall_pre_readlink(path, buf, count) \ + __sanitizer_syscall_pre_impl_readlink((long long)(path), (long long)(buf), \ + (long long)(count)) +#define __sanitizer_syscall_post_readlink(res, path, buf, count) \ + __sanitizer_syscall_post_impl_readlink(res, (long long)(path), \ + (long long)(buf), (long long)(count)) +#define __sanitizer_syscall_pre_execve(path, argp, envp) \ + __sanitizer_syscall_pre_impl_execve((long long)(path), (long long)(argp), \ + (long long)(envp)) +#define __sanitizer_syscall_post_execve(res, path, argp, envp) \ + __sanitizer_syscall_post_impl_execve(res, (long long)(path), \ + (long long)(argp), (long long)(envp)) +#define __sanitizer_syscall_pre_umask(newmask) \ + __sanitizer_syscall_pre_impl_umask((long long)(newmask)) +#define __sanitizer_syscall_post_umask(res, newmask) \ + __sanitizer_syscall_post_impl_umask(res, (long long)(newmask)) +#define __sanitizer_syscall_pre_chroot(path) \ + __sanitizer_syscall_pre_impl_chroot((long long)(path)) +#define __sanitizer_syscall_post_chroot(res, path) \ + __sanitizer_syscall_post_impl_chroot(res, (long long)(path)) +#define __sanitizer_syscall_pre_compat_43_fstat43(fd, sb) \ + __sanitizer_syscall_pre_impl_compat_43_fstat43((long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_post_compat_43_fstat43(res, fd, sb) \ + __sanitizer_syscall_post_impl_compat_43_fstat43(res, (long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_pre_compat_43_ogetkerninfo(op, where, size, arg) \ + __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo( \ + (long long)(op), (long long)(where), (long long)(size), \ + (long long)(arg)) +#define __sanitizer_syscall_post_compat_43_ogetkerninfo(res, op, where, size, \ + arg) \ + __sanitizer_syscall_post_impl_compat_43_ogetkerninfo( \ + res, (long long)(op), (long long)(where), (long long)(size), \ + (long long)(arg)) +#define __sanitizer_syscall_pre_compat_43_ogetpagesize() \ + __sanitizer_syscall_pre_impl_compat_43_ogetpagesize() +#define __sanitizer_syscall_post_compat_43_ogetpagesize(res) \ + __sanitizer_syscall_post_impl_compat_43_ogetpagesize(res) +#define __sanitizer_syscall_pre_compat_12_msync(addr, len) \ + __sanitizer_syscall_pre_impl_compat_12_msync((long long)(addr), \ + (long long)(len)) +#define __sanitizer_syscall_post_compat_12_msync(res, addr, len) \ + __sanitizer_syscall_post_impl_compat_12_msync(res, (long long)(addr), \ + (long long)(len)) +#define __sanitizer_syscall_pre_vfork() __sanitizer_syscall_pre_impl_vfork() +#define __sanitizer_syscall_post_vfork(res) \ + __sanitizer_syscall_post_impl_vfork(res) +/* syscall 67 has been skipped */ +/* syscall 68 has been skipped */ +/* syscall 69 has been skipped */ +/* syscall 70 has been skipped */ +#define __sanitizer_syscall_pre_compat_43_ommap(addr, len, prot, flags, fd, \ + pos) \ + __sanitizer_syscall_pre_impl_compat_43_ommap( \ + (long long)(addr), (long long)(len), (long long)(prot), \ + (long long)(flags), (long long)(fd), (long long)(pos)) +#define __sanitizer_syscall_post_compat_43_ommap(res, addr, len, prot, flags, \ + fd, pos) \ + __sanitizer_syscall_post_impl_compat_43_ommap( \ + res, (long long)(addr), (long long)(len), (long long)(prot), \ + (long long)(flags), (long long)(fd), (long long)(pos)) +#define __sanitizer_syscall_pre_vadvise(anom) \ + __sanitizer_syscall_pre_impl_vadvise((long long)(anom)) +#define __sanitizer_syscall_post_vadvise(res, anom) \ + __sanitizer_syscall_post_impl_vadvise(res, (long long)(anom)) +#define __sanitizer_syscall_pre_munmap(addr, len) \ + __sanitizer_syscall_pre_impl_munmap((long long)(addr), (long long)(len)) +#define __sanitizer_syscall_post_munmap(res, addr, len) \ + __sanitizer_syscall_post_impl_munmap(res, (long long)(addr), (long long)(len)) +#define __sanitizer_syscall_pre_mprotect(addr, len, prot) \ + __sanitizer_syscall_pre_impl_mprotect((long long)(addr), (long long)(len), \ + (long long)(prot)) +#define __sanitizer_syscall_post_mprotect(res, addr, len, prot) \ + __sanitizer_syscall_post_impl_mprotect(res, (long long)(addr), \ + (long long)(len), (long long)(prot)) +#define __sanitizer_syscall_pre_madvise(addr, len, behav) \ + __sanitizer_syscall_pre_impl_madvise((long long)(addr), (long long)(len), \ + (long long)(behav)) +#define __sanitizer_syscall_post_madvise(res, addr, len, behav) \ + __sanitizer_syscall_post_impl_madvise(res, (long long)(addr), \ + (long long)(len), (long long)(behav)) +/* syscall 76 has been skipped */ +/* syscall 77 has been skipped */ +#define __sanitizer_syscall_pre_mincore(addr, len, vec) \ + __sanitizer_syscall_pre_impl_mincore((long long)(addr), (long long)(len), \ + (long long)(vec)) +#define __sanitizer_syscall_post_mincore(res, addr, len, vec) \ + __sanitizer_syscall_post_impl_mincore(res, (long long)(addr), \ + (long long)(len), (long long)(vec)) +#define __sanitizer_syscall_pre_getgroups(gidsetsize, gidset) \ + __sanitizer_syscall_pre_impl_getgroups((long long)(gidsetsize), \ + (long long)(gidset)) +#define __sanitizer_syscall_post_getgroups(res, gidsetsize, gidset) \ + __sanitizer_syscall_post_impl_getgroups(res, (long long)(gidsetsize), \ + (long long)(gidset)) +#define __sanitizer_syscall_pre_setgroups(gidsetsize, gidset) \ + __sanitizer_syscall_pre_impl_setgroups((long long)(gidsetsize), \ + (long long)(gidset)) +#define __sanitizer_syscall_post_setgroups(res, gidsetsize, gidset) \ + __sanitizer_syscall_post_impl_setgroups(res, (long long)(gidsetsize), \ + (long long)(gidset)) +#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp() +#define __sanitizer_syscall_post_getpgrp(res) \ + __sanitizer_syscall_post_impl_getpgrp(res) +#define __sanitizer_syscall_pre_setpgid(pid, pgid) \ + __sanitizer_syscall_pre_impl_setpgid((long long)(pid), (long long)(pgid)) +#define __sanitizer_syscall_post_setpgid(res, pid, pgid) \ + __sanitizer_syscall_post_impl_setpgid(res, (long long)(pid), \ + (long long)(pgid)) +#define __sanitizer_syscall_pre_compat_50_setitimer(which, itv, oitv) \ + __sanitizer_syscall_pre_impl_compat_50_setitimer( \ + (long long)(which), (long long)(itv), (long long)(oitv)) +#define __sanitizer_syscall_post_compat_50_setitimer(res, which, itv, oitv) \ + __sanitizer_syscall_post_impl_compat_50_setitimer( \ + res, (long long)(which), (long long)(itv), (long long)(oitv)) +#define __sanitizer_syscall_pre_compat_43_owait() \ + __sanitizer_syscall_pre_impl_compat_43_owait() +#define __sanitizer_syscall_post_compat_43_owait(res) \ + __sanitizer_syscall_post_impl_compat_43_owait(res) +#define __sanitizer_syscall_pre_compat_12_oswapon(name) \ + __sanitizer_syscall_pre_impl_compat_12_oswapon((long long)(name)) +#define __sanitizer_syscall_post_compat_12_oswapon(res, name) \ + __sanitizer_syscall_post_impl_compat_12_oswapon(res, (long long)(name)) +#define __sanitizer_syscall_pre_compat_50_getitimer(which, itv) \ + __sanitizer_syscall_pre_impl_compat_50_getitimer((long long)(which), \ + (long long)(itv)) +#define __sanitizer_syscall_post_compat_50_getitimer(res, which, itv) \ + __sanitizer_syscall_post_impl_compat_50_getitimer(res, (long long)(which), \ + (long long)(itv)) +#define __sanitizer_syscall_pre_compat_43_ogethostname(hostname, len) \ + __sanitizer_syscall_pre_impl_compat_43_ogethostname((long long)(hostname), \ + (long long)(len)) +#define __sanitizer_syscall_post_compat_43_ogethostname(res, hostname, len) \ + __sanitizer_syscall_post_impl_compat_43_ogethostname( \ + res, (long long)(hostname), (long long)(len)) +#define __sanitizer_syscall_pre_compat_43_osethostname(hostname, len) \ + __sanitizer_syscall_pre_impl_compat_43_osethostname((long long)(hostname), \ + (long long)(len)) +#define __sanitizer_syscall_post_compat_43_osethostname(res, hostname, len) \ + __sanitizer_syscall_post_impl_compat_43_osethostname( \ + res, (long long)(hostname), (long long)(len)) +#define __sanitizer_syscall_pre_compat_43_ogetdtablesize() \ + __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize() +#define __sanitizer_syscall_post_compat_43_ogetdtablesize(res) \ + __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(res) +#define __sanitizer_syscall_pre_dup2(from, to) \ + __sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to)) +#define __sanitizer_syscall_post_dup2(res, from, to) \ + __sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to)) +#define __sanitizer_syscall_pre_getrandom(buf, buflen, flags) \ + __sanitizer_syscall_pre_impl_getrandom( \ + (long long)(buf), (long long)(buflen), (long long)(flags)) +#define __sanitizer_syscall_post_getrandom(res, buf, buflen, flags) \ + __sanitizer_syscall_post_impl_getrandom( \ + res, (long long)(buf), (long long)(buflen), (long long)(flags)) +#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \ + __sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg) \ + __sanitizer_syscall_post_impl_fcntl(res, (long long)(fd), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_pre_compat_50_select(nd, in, ou, ex, tv) \ + __sanitizer_syscall_pre_impl_compat_50_select( \ + (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \ + (long long)(tv)) +#define __sanitizer_syscall_post_compat_50_select(res, nd, in, ou, ex, tv) \ + __sanitizer_syscall_post_impl_compat_50_select( \ + res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \ + (long long)(tv)) +/* syscall 94 has been skipped */ +#define __sanitizer_syscall_pre_fsync(fd) \ + __sanitizer_syscall_pre_impl_fsync((long long)(fd)) +#define __sanitizer_syscall_post_fsync(res, fd) \ + __sanitizer_syscall_post_impl_fsync(res, (long long)(fd)) +#define __sanitizer_syscall_pre_setpriority(which, who, prio) \ + __sanitizer_syscall_pre_impl_setpriority( \ + (long long)(which), (long long)(who), (long long)(prio)) +#define __sanitizer_syscall_post_setpriority(res, which, who, prio) \ + __sanitizer_syscall_post_impl_setpriority( \ + res, (long long)(which), (long long)(who), (long long)(prio)) +#define __sanitizer_syscall_pre_compat_30_socket(domain, type, protocol) \ + __sanitizer_syscall_pre_impl_compat_30_socket( \ + (long long)(domain), (long long)(type), (long long)(protocol)) +#define __sanitizer_syscall_post_compat_30_socket(res, domain, type, protocol) \ + __sanitizer_syscall_post_impl_compat_30_socket( \ + res, (long long)(domain), (long long)(type), (long long)(protocol)) +#define __sanitizer_syscall_pre_connect(s, name, namelen) \ + __sanitizer_syscall_pre_impl_connect((long long)(s), (long long)(name), \ + (long long)(namelen)) +#define __sanitizer_syscall_post_connect(res, s, name, namelen) \ + __sanitizer_syscall_post_impl_connect( \ + res, (long long)(s), (long long)(name), (long long)(namelen)) +#define __sanitizer_syscall_pre_compat_43_oaccept(s, name, anamelen) \ + __sanitizer_syscall_pre_impl_compat_43_oaccept( \ + (long long)(s), (long long)(name), (long long)(anamelen)) +#define __sanitizer_syscall_post_compat_43_oaccept(res, s, name, anamelen) \ + __sanitizer_syscall_post_impl_compat_43_oaccept( \ + res, (long long)(s), (long long)(name), (long long)(anamelen)) +#define __sanitizer_syscall_pre_getpriority(which, who) \ + __sanitizer_syscall_pre_impl_getpriority((long long)(which), (long long)(who)) +#define __sanitizer_syscall_post_getpriority(res, which, who) \ + __sanitizer_syscall_post_impl_getpriority(res, (long long)(which), \ + (long long)(who)) +#define __sanitizer_syscall_pre_compat_43_osend(s, buf, len, flags) \ + __sanitizer_syscall_pre_impl_compat_43_osend( \ + (long long)(s), (long long)(buf), (long long)(len), (long long)(flags)) +#define __sanitizer_syscall_post_compat_43_osend(res, s, buf, len, flags) \ + __sanitizer_syscall_post_impl_compat_43_osend( \ + res, (long long)(s), (long long)(buf), (long long)(len), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_compat_43_orecv(s, buf, len, flags) \ + __sanitizer_syscall_pre_impl_compat_43_orecv( \ + (long long)(s), (long long)(buf), (long long)(len), (long long)(flags)) +#define __sanitizer_syscall_post_compat_43_orecv(res, s, buf, len, flags) \ + __sanitizer_syscall_post_impl_compat_43_orecv( \ + res, (long long)(s), (long long)(buf), (long long)(len), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_compat_13_sigreturn13(sigcntxp) \ + __sanitizer_syscall_pre_impl_compat_13_sigreturn13((long long)(sigcntxp)) +#define __sanitizer_syscall_post_compat_13_sigreturn13(res, sigcntxp) \ + __sanitizer_syscall_post_impl_compat_13_sigreturn13(res, \ + (long long)(sigcntxp)) +#define __sanitizer_syscall_pre_bind(s, name, namelen) \ + __sanitizer_syscall_pre_impl_bind((long long)(s), (long long)(name), \ + (long long)(namelen)) +#define __sanitizer_syscall_post_bind(res, s, name, namelen) \ + __sanitizer_syscall_post_impl_bind(res, (long long)(s), (long long)(name), \ + (long long)(namelen)) +#define __sanitizer_syscall_pre_setsockopt(s, level, name, val, valsize) \ + __sanitizer_syscall_pre_impl_setsockopt((long long)(s), (long long)(level), \ + (long long)(name), (long long)(val), \ + (long long)(valsize)) +#define __sanitizer_syscall_post_setsockopt(res, s, level, name, val, valsize) \ + __sanitizer_syscall_post_impl_setsockopt( \ + res, (long long)(s), (long long)(level), (long long)(name), \ + (long long)(val), (long long)(valsize)) +#define __sanitizer_syscall_pre_listen(s, backlog) \ + __sanitizer_syscall_pre_impl_listen((long long)(s), (long long)(backlog)) +#define __sanitizer_syscall_post_listen(res, s, backlog) \ + __sanitizer_syscall_post_impl_listen(res, (long long)(s), \ + (long long)(backlog)) +/* syscall 107 has been skipped */ +#define __sanitizer_syscall_pre_compat_43_osigvec(signum, nsv, osv) \ + __sanitizer_syscall_pre_impl_compat_43_osigvec( \ + (long long)(signum), (long long)(nsv), (long long)(osv)) +#define __sanitizer_syscall_post_compat_43_osigvec(res, signum, nsv, osv) \ + __sanitizer_syscall_post_impl_compat_43_osigvec( \ + res, (long long)(signum), (long long)(nsv), (long long)(osv)) +#define __sanitizer_syscall_pre_compat_43_osigblock(mask) \ + __sanitizer_syscall_pre_impl_compat_43_osigblock((long long)(mask)) +#define __sanitizer_syscall_post_compat_43_osigblock(res, mask) \ + __sanitizer_syscall_post_impl_compat_43_osigblock(res, (long long)(mask)) +#define __sanitizer_syscall_pre_compat_43_osigsetmask(mask) \ + __sanitizer_syscall_pre_impl_compat_43_osigsetmask((long long)(mask)) +#define __sanitizer_syscall_post_compat_43_osigsetmask(res, mask) \ + __sanitizer_syscall_post_impl_compat_43_osigsetmask(res, (long long)(mask)) +#define __sanitizer_syscall_pre_compat_13_sigsuspend13(mask) \ + __sanitizer_syscall_pre_impl_compat_13_sigsuspend13((long long)(mask)) +#define __sanitizer_syscall_post_compat_13_sigsuspend13(res, mask) \ + __sanitizer_syscall_post_impl_compat_13_sigsuspend13(res, (long long)(mask)) +#define __sanitizer_syscall_pre_compat_43_osigstack(nss, oss) \ + __sanitizer_syscall_pre_impl_compat_43_osigstack((long long)(nss), \ + (long long)(oss)) +#define __sanitizer_syscall_post_compat_43_osigstack(res, nss, oss) \ + __sanitizer_syscall_post_impl_compat_43_osigstack(res, (long long)(nss), \ + (long long)(oss)) +#define __sanitizer_syscall_pre_compat_43_orecvmsg(s, msg, flags) \ + __sanitizer_syscall_pre_impl_compat_43_orecvmsg( \ + (long long)(s), (long long)(msg), (long long)(flags)) +#define __sanitizer_syscall_post_compat_43_orecvmsg(res, s, msg, flags) \ + __sanitizer_syscall_post_impl_compat_43_orecvmsg( \ + res, (long long)(s), (long long)(msg), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_43_osendmsg(s, msg, flags) \ + __sanitizer_syscall_pre_impl_compat_43_osendmsg( \ + (long long)(s), (long long)(msg), (long long)(flags)) +#define __sanitizer_syscall_post_compat_43_osendmsg(res, s, msg, flags) \ + __sanitizer_syscall_post_impl_compat_43_osendmsg( \ + res, (long long)(s), (long long)(msg), (long long)(flags)) +/* syscall 115 has been skipped */ +#define __sanitizer_syscall_pre_compat_50_gettimeofday(tp, tzp) \ + __sanitizer_syscall_pre_impl_compat_50_gettimeofday((long long)(tp), \ + (long long)(tzp)) +#define __sanitizer_syscall_post_compat_50_gettimeofday(res, tp, tzp) \ + __sanitizer_syscall_post_impl_compat_50_gettimeofday(res, (long long)(tp), \ + (long long)(tzp)) +#define __sanitizer_syscall_pre_compat_50_getrusage(who, rusage) \ + __sanitizer_syscall_pre_impl_compat_50_getrusage((long long)(who), \ + (long long)(rusage)) +#define __sanitizer_syscall_post_compat_50_getrusage(res, who, rusage) \ + __sanitizer_syscall_post_impl_compat_50_getrusage(res, (long long)(who), \ + (long long)(rusage)) +#define __sanitizer_syscall_pre_getsockopt(s, level, name, val, avalsize) \ + __sanitizer_syscall_pre_impl_getsockopt((long long)(s), (long long)(level), \ + (long long)(name), (long long)(val), \ + (long long)(avalsize)) +#define __sanitizer_syscall_post_getsockopt(res, s, level, name, val, \ + avalsize) \ + __sanitizer_syscall_post_impl_getsockopt( \ + res, (long long)(s), (long long)(level), (long long)(name), \ + (long long)(val), (long long)(avalsize)) +/* syscall 119 has been skipped */ +#define __sanitizer_syscall_pre_readv(fd, iovp, iovcnt) \ + __sanitizer_syscall_pre_impl_readv((long long)(fd), (long long)(iovp), \ + (long long)(iovcnt)) +#define __sanitizer_syscall_post_readv(res, fd, iovp, iovcnt) \ + __sanitizer_syscall_post_impl_readv(res, (long long)(fd), (long long)(iovp), \ + (long long)(iovcnt)) +#define __sanitizer_syscall_pre_writev(fd, iovp, iovcnt) \ + __sanitizer_syscall_pre_impl_writev((long long)(fd), (long long)(iovp), \ + (long long)(iovcnt)) +#define __sanitizer_syscall_post_writev(res, fd, iovp, iovcnt) \ + __sanitizer_syscall_post_impl_writev(res, (long long)(fd), \ + (long long)(iovp), (long long)(iovcnt)) +#define __sanitizer_syscall_pre_compat_50_settimeofday(tv, tzp) \ + __sanitizer_syscall_pre_impl_compat_50_settimeofday((long long)(tv), \ + (long long)(tzp)) +#define __sanitizer_syscall_post_compat_50_settimeofday(res, tv, tzp) \ + __sanitizer_syscall_post_impl_compat_50_settimeofday(res, (long long)(tv), \ + (long long)(tzp)) +#define __sanitizer_syscall_pre_fchown(fd, uid, gid) \ + __sanitizer_syscall_pre_impl_fchown((long long)(fd), (long long)(uid), \ + (long long)(gid)) +#define __sanitizer_syscall_post_fchown(res, fd, uid, gid) \ + __sanitizer_syscall_post_impl_fchown(res, (long long)(fd), (long long)(uid), \ + (long long)(gid)) +#define __sanitizer_syscall_pre_fchmod(fd, mode) \ + __sanitizer_syscall_pre_impl_fchmod((long long)(fd), (long long)(mode)) +#define __sanitizer_syscall_post_fchmod(res, fd, mode) \ + __sanitizer_syscall_post_impl_fchmod(res, (long long)(fd), (long long)(mode)) +#define __sanitizer_syscall_pre_compat_43_orecvfrom(s, buf, len, flags, from, \ + fromlenaddr) \ + __sanitizer_syscall_pre_impl_compat_43_orecvfrom( \ + (long long)(s), (long long)(buf), (long long)(len), (long long)(flags), \ + (long long)(from), (long long)(fromlenaddr)) +#define __sanitizer_syscall_post_compat_43_orecvfrom(res, s, buf, len, flags, \ + from, fromlenaddr) \ + __sanitizer_syscall_post_impl_compat_43_orecvfrom( \ + res, (long long)(s), (long long)(buf), (long long)(len), \ + (long long)(flags), (long long)(from), (long long)(fromlenaddr)) +#define __sanitizer_syscall_pre_setreuid(ruid, euid) \ + __sanitizer_syscall_pre_impl_setreuid((long long)(ruid), (long long)(euid)) +#define __sanitizer_syscall_post_setreuid(res, ruid, euid) \ + __sanitizer_syscall_post_impl_setreuid(res, (long long)(ruid), \ + (long long)(euid)) +#define __sanitizer_syscall_pre_setregid(rgid, egid) \ + __sanitizer_syscall_pre_impl_setregid((long long)(rgid), (long long)(egid)) +#define __sanitizer_syscall_post_setregid(res, rgid, egid) \ + __sanitizer_syscall_post_impl_setregid(res, (long long)(rgid), \ + (long long)(egid)) +#define __sanitizer_syscall_pre_rename(from, to) \ + __sanitizer_syscall_pre_impl_rename((long long)(from), (long long)(to)) +#define __sanitizer_syscall_post_rename(res, from, to) \ + __sanitizer_syscall_post_impl_rename(res, (long long)(from), (long long)(to)) +#define __sanitizer_syscall_pre_compat_43_otruncate(path, length) \ + __sanitizer_syscall_pre_impl_compat_43_otruncate((long long)(path), \ + (long long)(length)) +#define __sanitizer_syscall_post_compat_43_otruncate(res, path, length) \ + __sanitizer_syscall_post_impl_compat_43_otruncate(res, (long long)(path), \ + (long long)(length)) +#define __sanitizer_syscall_pre_compat_43_oftruncate(fd, length) \ + __sanitizer_syscall_pre_impl_compat_43_oftruncate((long long)(fd), \ + (long long)(length)) +#define __sanitizer_syscall_post_compat_43_oftruncate(res, fd, length) \ + __sanitizer_syscall_post_impl_compat_43_oftruncate(res, (long long)(fd), \ + (long long)(length)) +#define __sanitizer_syscall_pre_flock(fd, how) \ + __sanitizer_syscall_pre_impl_flock((long long)(fd), (long long)(how)) +#define __sanitizer_syscall_post_flock(res, fd, how) \ + __sanitizer_syscall_post_impl_flock(res, (long long)(fd), (long long)(how)) +#define __sanitizer_syscall_pre_mkfifo(path, mode) \ + __sanitizer_syscall_pre_impl_mkfifo((long long)(path), (long long)(mode)) +#define __sanitizer_syscall_post_mkfifo(res, path, mode) \ + __sanitizer_syscall_post_impl_mkfifo(res, (long long)(path), \ + (long long)(mode)) +#define __sanitizer_syscall_pre_sendto(s, buf, len, flags, to, tolen) \ + __sanitizer_syscall_pre_impl_sendto((long long)(s), (long long)(buf), \ + (long long)(len), (long long)(flags), \ + (long long)(to), (long long)(tolen)) +#define __sanitizer_syscall_post_sendto(res, s, buf, len, flags, to, tolen) \ + __sanitizer_syscall_post_impl_sendto(res, (long long)(s), (long long)(buf), \ + (long long)(len), (long long)(flags), \ + (long long)(to), (long long)(tolen)) +#define __sanitizer_syscall_pre_shutdown(s, how) \ + __sanitizer_syscall_pre_impl_shutdown((long long)(s), (long long)(how)) +#define __sanitizer_syscall_post_shutdown(res, s, how) \ + __sanitizer_syscall_post_impl_shutdown(res, (long long)(s), (long long)(how)) +#define __sanitizer_syscall_pre_socketpair(domain, type, protocol, rsv) \ + __sanitizer_syscall_pre_impl_socketpair( \ + (long long)(domain), (long long)(type), (long long)(protocol), \ + (long long)(rsv)) +#define __sanitizer_syscall_post_socketpair(res, domain, type, protocol, rsv) \ + __sanitizer_syscall_post_impl_socketpair( \ + res, (long long)(domain), (long long)(type), (long long)(protocol), \ + (long long)(rsv)) +#define __sanitizer_syscall_pre_mkdir(path, mode) \ + __sanitizer_syscall_pre_impl_mkdir((long long)(path), (long long)(mode)) +#define __sanitizer_syscall_post_mkdir(res, path, mode) \ + __sanitizer_syscall_post_impl_mkdir(res, (long long)(path), (long long)(mode)) +#define __sanitizer_syscall_pre_rmdir(path) \ + __sanitizer_syscall_pre_impl_rmdir((long long)(path)) +#define __sanitizer_syscall_post_rmdir(res, path) \ + __sanitizer_syscall_post_impl_rmdir(res, (long long)(path)) +#define __sanitizer_syscall_pre_compat_50_utimes(path, tptr) \ + __sanitizer_syscall_pre_impl_compat_50_utimes((long long)(path), \ + (long long)(tptr)) +#define __sanitizer_syscall_post_compat_50_utimes(res, path, tptr) \ + __sanitizer_syscall_post_impl_compat_50_utimes(res, (long long)(path), \ + (long long)(tptr)) +/* syscall 139 has been skipped */ +#define __sanitizer_syscall_pre_compat_50_adjtime(delta, olddelta) \ + __sanitizer_syscall_pre_impl_compat_50_adjtime((long long)(delta), \ + (long long)(olddelta)) +#define __sanitizer_syscall_post_compat_50_adjtime(res, delta, olddelta) \ + __sanitizer_syscall_post_impl_compat_50_adjtime(res, (long long)(delta), \ + (long long)(olddelta)) +#define __sanitizer_syscall_pre_compat_43_ogetpeername(fdes, asa, alen) \ + __sanitizer_syscall_pre_impl_compat_43_ogetpeername( \ + (long long)(fdes), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_post_compat_43_ogetpeername(res, fdes, asa, alen) \ + __sanitizer_syscall_post_impl_compat_43_ogetpeername( \ + res, (long long)(fdes), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_pre_compat_43_ogethostid() \ + __sanitizer_syscall_pre_impl_compat_43_ogethostid() +#define __sanitizer_syscall_post_compat_43_ogethostid(res) \ + __sanitizer_syscall_post_impl_compat_43_ogethostid(res) +#define __sanitizer_syscall_pre_compat_43_osethostid(hostid) \ + __sanitizer_syscall_pre_impl_compat_43_osethostid((long long)(hostid)) +#define __sanitizer_syscall_post_compat_43_osethostid(res, hostid) \ + __sanitizer_syscall_post_impl_compat_43_osethostid(res, (long long)(hostid)) +#define __sanitizer_syscall_pre_compat_43_ogetrlimit(which, rlp) \ + __sanitizer_syscall_pre_impl_compat_43_ogetrlimit((long long)(which), \ + (long long)(rlp)) +#define __sanitizer_syscall_post_compat_43_ogetrlimit(res, which, rlp) \ + __sanitizer_syscall_post_impl_compat_43_ogetrlimit(res, (long long)(which), \ + (long long)(rlp)) +#define __sanitizer_syscall_pre_compat_43_osetrlimit(which, rlp) \ + __sanitizer_syscall_pre_impl_compat_43_osetrlimit((long long)(which), \ + (long long)(rlp)) +#define __sanitizer_syscall_post_compat_43_osetrlimit(res, which, rlp) \ + __sanitizer_syscall_post_impl_compat_43_osetrlimit(res, (long long)(which), \ + (long long)(rlp)) +#define __sanitizer_syscall_pre_compat_43_okillpg(pgid, signum) \ + __sanitizer_syscall_pre_impl_compat_43_okillpg((long long)(pgid), \ + (long long)(signum)) +#define __sanitizer_syscall_post_compat_43_okillpg(res, pgid, signum) \ + __sanitizer_syscall_post_impl_compat_43_okillpg(res, (long long)(pgid), \ + (long long)(signum)) +#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid() +#define __sanitizer_syscall_post_setsid(res) \ + __sanitizer_syscall_post_impl_setsid(res) +#define __sanitizer_syscall_pre_compat_50_quotactl(path, cmd, uid, arg) \ + __sanitizer_syscall_pre_impl_compat_50_quotactl( \ + (long long)(path), (long long)(cmd), (long long)(uid), (long long)(arg)) +#define __sanitizer_syscall_post_compat_50_quotactl(res, path, cmd, uid, arg) \ + __sanitizer_syscall_post_impl_compat_50_quotactl( \ + res, (long long)(path), (long long)(cmd), (long long)(uid), \ + (long long)(arg)) +#define __sanitizer_syscall_pre_compat_43_oquota() \ + __sanitizer_syscall_pre_impl_compat_43_oquota() +#define __sanitizer_syscall_post_compat_43_oquota(res) \ + __sanitizer_syscall_post_impl_compat_43_oquota(res) +#define __sanitizer_syscall_pre_compat_43_ogetsockname(fdec, asa, alen) \ + __sanitizer_syscall_pre_impl_compat_43_ogetsockname( \ + (long long)(fdec), (long long)(asa), (long long)(alen)) +#define __sanitizer_syscall_post_compat_43_ogetsockname(res, fdec, asa, alen) \ + __sanitizer_syscall_post_impl_compat_43_ogetsockname( \ + res, (long long)(fdec), (long long)(asa), (long long)(alen)) +/* syscall 151 has been skipped */ +/* syscall 152 has been skipped */ +/* syscall 153 has been skipped */ +/* syscall 154 has been skipped */ +#define __sanitizer_syscall_pre_nfssvc(flag, argp) \ + __sanitizer_syscall_pre_impl_nfssvc((long long)(flag), (long long)(argp)) +#define __sanitizer_syscall_post_nfssvc(res, flag, argp) \ + __sanitizer_syscall_post_impl_nfssvc(res, (long long)(flag), \ + (long long)(argp)) +#define __sanitizer_syscall_pre_compat_43_ogetdirentries(fd, buf, count, \ + basep) \ + __sanitizer_syscall_pre_impl_compat_43_ogetdirentries( \ + (long long)(fd), (long long)(buf), (long long)(count), \ + (long long)(basep)) +#define __sanitizer_syscall_post_compat_43_ogetdirentries(res, fd, buf, count, \ + basep) \ + __sanitizer_syscall_post_impl_compat_43_ogetdirentries( \ + res, (long long)(fd), (long long)(buf), (long long)(count), \ + (long long)(basep)) +#define __sanitizer_syscall_pre_compat_20_statfs(path, buf) \ + __sanitizer_syscall_pre_impl_compat_20_statfs((long long)(path), \ + (long long)(buf)) +#define __sanitizer_syscall_post_compat_20_statfs(res, path, buf) \ + __sanitizer_syscall_post_impl_compat_20_statfs(res, (long long)(path), \ + (long long)(buf)) +#define __sanitizer_syscall_pre_compat_20_fstatfs(fd, buf) \ + __sanitizer_syscall_pre_impl_compat_20_fstatfs((long long)(fd), \ + (long long)(buf)) +#define __sanitizer_syscall_post_compat_20_fstatfs(res, fd, buf) \ + __sanitizer_syscall_post_impl_compat_20_fstatfs(res, (long long)(fd), \ + (long long)(buf)) +/* syscall 159 has been skipped */ +/* syscall 160 has been skipped */ +#define __sanitizer_syscall_pre_compat_30_getfh(fname, fhp) \ + __sanitizer_syscall_pre_impl_compat_30_getfh((long long)(fname), \ + (long long)(fhp)) +#define __sanitizer_syscall_post_compat_30_getfh(res, fname, fhp) \ + __sanitizer_syscall_post_impl_compat_30_getfh(res, (long long)(fname), \ + (long long)(fhp)) +#define __sanitizer_syscall_pre_compat_09_ogetdomainname(domainname, len) \ + __sanitizer_syscall_pre_impl_compat_09_ogetdomainname( \ + (long long)(domainname), (long long)(len)) +#define __sanitizer_syscall_post_compat_09_ogetdomainname(res, domainname, \ + len) \ + __sanitizer_syscall_post_impl_compat_09_ogetdomainname( \ + res, (long long)(domainname), (long long)(len)) +#define __sanitizer_syscall_pre_compat_09_osetdomainname(domainname, len) \ + __sanitizer_syscall_pre_impl_compat_09_osetdomainname( \ + (long long)(domainname), (long long)(len)) +#define __sanitizer_syscall_post_compat_09_osetdomainname(res, domainname, \ + len) \ + __sanitizer_syscall_post_impl_compat_09_osetdomainname( \ + res, (long long)(domainname), (long long)(len)) +#define __sanitizer_syscall_pre_compat_09_ouname(name) \ + __sanitizer_syscall_pre_impl_compat_09_ouname((long long)(name)) +#define __sanitizer_syscall_post_compat_09_ouname(res, name) \ + __sanitizer_syscall_post_impl_compat_09_ouname(res, (long long)(name)) +#define __sanitizer_syscall_pre_sysarch(op, parms) \ + __sanitizer_syscall_pre_impl_sysarch((long long)(op), (long long)(parms)) +#define __sanitizer_syscall_post_sysarch(res, op, parms) \ + __sanitizer_syscall_post_impl_sysarch(res, (long long)(op), \ + (long long)(parms)) +#define __sanitizer_syscall_pre___futex(uaddr, op, val, timeout, uaddr2, val2, \ + val3) \ + __sanitizer_syscall_pre_impl___futex((long long)(uaddr), (long long)(op), \ + (long long)(val), (long long)(timeout), \ + (long long)(uaddr2), (long long)(val2), \ + (long long)(val3)) +#define __sanitizer_syscall_post___futex(res, uaddr, op, val, timeout, uaddr2, \ + val2, val3) \ + __sanitizer_syscall_post_impl___futex( \ + res, (long long)(uaddr), (long long)(op), (long long)(val), \ + (long long)(timeout), (long long)(uaddr2), (long long)(val2), \ + (long long)(val3)) +#define __sanitizer_syscall_pre___futex_set_robust_list(head, len) \ + __sanitizer_syscall_pre_impl___futex_set_robust_list((long long)(head), \ + (long long)(len)) +#define __sanitizer_syscall_post___futex_set_robust_list(res, head, len) \ + __sanitizer_syscall_post_impl___futex_set_robust_list( \ + res, (long long)(head), (long long)(len)) +#define __sanitizer_syscall_pre___futex_get_robust_list(lwpid, headp, lenp) \ + __sanitizer_syscall_pre_impl___futex_get_robust_list( \ + (long long)(lwpid), (long long)(headp), (long long)(lenp)) +#define __sanitizer_syscall_post___futex_get_robust_list(res, lwpid, headp, \ + lenp) \ + __sanitizer_syscall_post_impl___futex_get_robust_list( \ + res, (long long)(lwpid), (long long)(headp), (long long)(lenp)) +#if !defined(_LP64) +#define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5) \ + __sanitizer_syscall_pre_impl_compat_10_osemsys( \ + (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4), \ + (long long)(a5)) +#define __sanitizer_syscall_post_compat_10_osemsys(res, which, a2, a3, a4, a5) \ + __sanitizer_syscall_post_impl_compat_10_osemsys( \ + res, (long long)(which), (long long)(a2), (long long)(a3), \ + (long long)(a4), (long long)(a5)) +#else +/* syscall 169 has been skipped */ +#endif +#if !defined(_LP64) +#define __sanitizer_syscall_pre_compat_10_omsgsys(which, a2, a3, a4, a5, a6) \ + __sanitizer_syscall_pre_impl_compat_10_omsgsys( \ + (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4), \ + (long long)(a5), (long long)(a6)) +#define __sanitizer_syscall_post_compat_10_omsgsys(res, which, a2, a3, a4, a5, \ + a6) \ + __sanitizer_syscall_post_impl_compat_10_omsgsys( \ + res, (long long)(which), (long long)(a2), (long long)(a3), \ + (long long)(a4), (long long)(a5), (long long)(a6)) +#else +/* syscall 170 has been skipped */ +#endif +#if !defined(_LP64) +#define __sanitizer_syscall_pre_compat_10_oshmsys(which, a2, a3, a4) \ + __sanitizer_syscall_pre_impl_compat_10_oshmsys( \ + (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4)) +#define __sanitizer_syscall_post_compat_10_oshmsys(res, which, a2, a3, a4) \ + __sanitizer_syscall_post_impl_compat_10_oshmsys( \ + res, (long long)(which), (long long)(a2), (long long)(a3), \ + (long long)(a4)) +#else +/* syscall 171 has been skipped */ +#endif +/* syscall 172 has been skipped */ +#define __sanitizer_syscall_pre_pread(fd, buf, nbyte, PAD, offset) \ + __sanitizer_syscall_pre_impl_pread((long long)(fd), (long long)(buf), \ + (long long)(nbyte), (long long)(PAD), \ + (long long)(offset)) +#define __sanitizer_syscall_post_pread(res, fd, buf, nbyte, PAD, offset) \ + __sanitizer_syscall_post_impl_pread(res, (long long)(fd), (long long)(buf), \ + (long long)(nbyte), (long long)(PAD), \ + (long long)(offset)) +#define __sanitizer_syscall_pre_pwrite(fd, buf, nbyte, PAD, offset) \ + __sanitizer_syscall_pre_impl_pwrite((long long)(fd), (long long)(buf), \ + (long long)(nbyte), (long long)(PAD), \ + (long long)(offset)) +#define __sanitizer_syscall_post_pwrite(res, fd, buf, nbyte, PAD, offset) \ + __sanitizer_syscall_post_impl_pwrite(res, (long long)(fd), (long long)(buf), \ + (long long)(nbyte), (long long)(PAD), \ + (long long)(offset)) +#define __sanitizer_syscall_pre_compat_30_ntp_gettime(ntvp) \ + __sanitizer_syscall_pre_impl_compat_30_ntp_gettime((long long)(ntvp)) +#define __sanitizer_syscall_post_compat_30_ntp_gettime(res, ntvp) \ + __sanitizer_syscall_post_impl_compat_30_ntp_gettime(res, (long long)(ntvp)) +#if defined(NTP) || !defined(_KERNEL_OPT) +#define __sanitizer_syscall_pre_ntp_adjtime(tp) \ + __sanitizer_syscall_pre_impl_ntp_adjtime((long long)(tp)) +#define __sanitizer_syscall_post_ntp_adjtime(res, tp) \ + __sanitizer_syscall_post_impl_ntp_adjtime(res, (long long)(tp)) +#else +/* syscall 176 has been skipped */ +#endif +/* syscall 177 has been skipped */ +/* syscall 178 has been skipped */ +/* syscall 179 has been skipped */ +/* syscall 180 has been skipped */ +#define __sanitizer_syscall_pre_setgid(gid) \ + __sanitizer_syscall_pre_impl_setgid((long long)(gid)) +#define __sanitizer_syscall_post_setgid(res, gid) \ + __sanitizer_syscall_post_impl_setgid(res, (long long)(gid)) +#define __sanitizer_syscall_pre_setegid(egid) \ + __sanitizer_syscall_pre_impl_setegid((long long)(egid)) +#define __sanitizer_syscall_post_setegid(res, egid) \ + __sanitizer_syscall_post_impl_setegid(res, (long long)(egid)) +#define __sanitizer_syscall_pre_seteuid(euid) \ + __sanitizer_syscall_pre_impl_seteuid((long long)(euid)) +#define __sanitizer_syscall_post_seteuid(res, euid) \ + __sanitizer_syscall_post_impl_seteuid(res, (long long)(euid)) +#define __sanitizer_syscall_pre_lfs_bmapv(fsidp, blkiov, blkcnt) \ + __sanitizer_syscall_pre_impl_lfs_bmapv( \ + (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt)) +#define __sanitizer_syscall_post_lfs_bmapv(res, fsidp, blkiov, blkcnt) \ + __sanitizer_syscall_post_impl_lfs_bmapv( \ + res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt)) +#define __sanitizer_syscall_pre_lfs_markv(fsidp, blkiov, blkcnt) \ + __sanitizer_syscall_pre_impl_lfs_markv( \ + (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt)) +#define __sanitizer_syscall_post_lfs_markv(res, fsidp, blkiov, blkcnt) \ + __sanitizer_syscall_post_impl_lfs_markv( \ + res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt)) +#define __sanitizer_syscall_pre_lfs_segclean(fsidp, segment) \ + __sanitizer_syscall_pre_impl_lfs_segclean((long long)(fsidp), \ + (long long)(segment)) +#define __sanitizer_syscall_post_lfs_segclean(res, fsidp, segment) \ + __sanitizer_syscall_post_impl_lfs_segclean(res, (long long)(fsidp), \ + (long long)(segment)) +#define __sanitizer_syscall_pre_compat_50_lfs_segwait(fsidp, tv) \ + __sanitizer_syscall_pre_impl_compat_50_lfs_segwait((long long)(fsidp), \ + (long long)(tv)) +#define __sanitizer_syscall_post_compat_50_lfs_segwait(res, fsidp, tv) \ + __sanitizer_syscall_post_impl_compat_50_lfs_segwait(res, (long long)(fsidp), \ + (long long)(tv)) +#define __sanitizer_syscall_pre_compat_12_stat12(path, ub) \ + __sanitizer_syscall_pre_impl_compat_12_stat12((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_12_stat12(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_12_stat12(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_compat_12_fstat12(fd, sb) \ + __sanitizer_syscall_pre_impl_compat_12_fstat12((long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_post_compat_12_fstat12(res, fd, sb) \ + __sanitizer_syscall_post_impl_compat_12_fstat12(res, (long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_pre_compat_12_lstat12(path, ub) \ + __sanitizer_syscall_pre_impl_compat_12_lstat12((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_12_lstat12(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_12_lstat12(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_pathconf(path, name) \ + __sanitizer_syscall_pre_impl_pathconf((long long)(path), (long long)(name)) +#define __sanitizer_syscall_post_pathconf(res, path, name) \ + __sanitizer_syscall_post_impl_pathconf(res, (long long)(path), \ + (long long)(name)) +#define __sanitizer_syscall_pre_fpathconf(fd, name) \ + __sanitizer_syscall_pre_impl_fpathconf((long long)(fd), (long long)(name)) +#define __sanitizer_syscall_post_fpathconf(res, fd, name) \ + __sanitizer_syscall_post_impl_fpathconf(res, (long long)(fd), \ + (long long)(name)) +#define __sanitizer_syscall_pre_getsockopt2(s, level, name, val, avalsize) \ + __sanitizer_syscall_pre_impl_getsockopt2( \ + (long long)(s), (long long)(level), (long long)(name), (long long)(val), \ + (long long)(avalsize)) +#define __sanitizer_syscall_post_getsockopt2(res, s, level, name, val, \ + avalsize) \ + __sanitizer_syscall_post_impl_getsockopt2( \ + res, (long long)(s), (long long)(level), (long long)(name), \ + (long long)(val), (long long)(avalsize)) +#define __sanitizer_syscall_pre_getrlimit(which, rlp) \ + __sanitizer_syscall_pre_impl_getrlimit((long long)(which), (long long)(rlp)) +#define __sanitizer_syscall_post_getrlimit(res, which, rlp) \ + __sanitizer_syscall_post_impl_getrlimit(res, (long long)(which), \ + (long long)(rlp)) +#define __sanitizer_syscall_pre_setrlimit(which, rlp) \ + __sanitizer_syscall_pre_impl_setrlimit((long long)(which), (long long)(rlp)) +#define __sanitizer_syscall_post_setrlimit(res, which, rlp) \ + __sanitizer_syscall_post_impl_setrlimit(res, (long long)(which), \ + (long long)(rlp)) +#define __sanitizer_syscall_pre_compat_12_getdirentries(fd, buf, count, basep) \ + __sanitizer_syscall_pre_impl_compat_12_getdirentries( \ + (long long)(fd), (long long)(buf), (long long)(count), \ + (long long)(basep)) +#define __sanitizer_syscall_post_compat_12_getdirentries(res, fd, buf, count, \ + basep) \ + __sanitizer_syscall_post_impl_compat_12_getdirentries( \ + res, (long long)(fd), (long long)(buf), (long long)(count), \ + (long long)(basep)) +#define __sanitizer_syscall_pre_mmap(addr, len, prot, flags, fd, PAD, pos) \ + __sanitizer_syscall_pre_impl_mmap( \ + (long long)(addr), (long long)(len), (long long)(prot), \ + (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos)) +#define __sanitizer_syscall_post_mmap(res, addr, len, prot, flags, fd, PAD, \ + pos) \ + __sanitizer_syscall_post_impl_mmap( \ + res, (long long)(addr), (long long)(len), (long long)(prot), \ + (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos)) +#define __sanitizer_syscall_pre___syscall(code, arg0, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) \ + __sanitizer_syscall_pre_impl___syscall( \ + (long long)(code), (long long)(arg0), (long long)(arg1), \ + (long long)(arg2), (long long)(arg3), (long long)(arg4), \ + (long long)(arg5), (long long)(arg6), (long long)(arg7)) +#define __sanitizer_syscall_post___syscall(res, code, arg0, arg1, arg2, arg3, \ + arg4, arg5, arg6, arg7) \ + __sanitizer_syscall_post_impl___syscall( \ + res, (long long)(code), (long long)(arg0), (long long)(arg1), \ + (long long)(arg2), (long long)(arg3), (long long)(arg4), \ + (long long)(arg5), (long long)(arg6), (long long)(arg7)) +#define __sanitizer_syscall_pre_lseek(fd, PAD, offset, whence) \ + __sanitizer_syscall_pre_impl_lseek((long long)(fd), (long long)(PAD), \ + (long long)(offset), (long long)(whence)) +#define __sanitizer_syscall_post_lseek(res, fd, PAD, offset, whence) \ + __sanitizer_syscall_post_impl_lseek(res, (long long)(fd), (long long)(PAD), \ + (long long)(offset), \ + (long long)(whence)) +#define __sanitizer_syscall_pre_truncate(path, PAD, length) \ + __sanitizer_syscall_pre_impl_truncate((long long)(path), (long long)(PAD), \ + (long long)(length)) +#define __sanitizer_syscall_post_truncate(res, path, PAD, length) \ + __sanitizer_syscall_post_impl_truncate( \ + res, (long long)(path), (long long)(PAD), (long long)(length)) +#define __sanitizer_syscall_pre_ftruncate(fd, PAD, length) \ + __sanitizer_syscall_pre_impl_ftruncate((long long)(fd), (long long)(PAD), \ + (long long)(length)) +#define __sanitizer_syscall_post_ftruncate(res, fd, PAD, length) \ + __sanitizer_syscall_post_impl_ftruncate( \ + res, (long long)(fd), (long long)(PAD), (long long)(length)) +#define __sanitizer_syscall_pre___sysctl(name, namelen, oldv, oldlenp, newv, \ + newlen) \ + __sanitizer_syscall_pre_impl___sysctl( \ + (long long)(name), (long long)(namelen), (long long)(oldv), \ + (long long)(oldlenp), (long long)(newv), (long long)(newlen)) +#define __sanitizer_syscall_post___sysctl(res, name, namelen, oldv, oldlenp, \ + newv, newlen) \ + __sanitizer_syscall_post_impl___sysctl( \ + res, (long long)(name), (long long)(namelen), (long long)(oldv), \ + (long long)(oldlenp), (long long)(newv), (long long)(newlen)) +#define __sanitizer_syscall_pre_mlock(addr, len) \ + __sanitizer_syscall_pre_impl_mlock((long long)(addr), (long long)(len)) +#define __sanitizer_syscall_post_mlock(res, addr, len) \ + __sanitizer_syscall_post_impl_mlock(res, (long long)(addr), (long long)(len)) +#define __sanitizer_syscall_pre_munlock(addr, len) \ + __sanitizer_syscall_pre_impl_munlock((long long)(addr), (long long)(len)) +#define __sanitizer_syscall_post_munlock(res, addr, len) \ + __sanitizer_syscall_post_impl_munlock(res, (long long)(addr), \ + (long long)(len)) +#define __sanitizer_syscall_pre_undelete(path) \ + __sanitizer_syscall_pre_impl_undelete((long long)(path)) +#define __sanitizer_syscall_post_undelete(res, path) \ + __sanitizer_syscall_post_impl_undelete(res, (long long)(path)) +#define __sanitizer_syscall_pre_compat_50_futimes(fd, tptr) \ + __sanitizer_syscall_pre_impl_compat_50_futimes((long long)(fd), \ + (long long)(tptr)) +#define __sanitizer_syscall_post_compat_50_futimes(res, fd, tptr) \ + __sanitizer_syscall_post_impl_compat_50_futimes(res, (long long)(fd), \ + (long long)(tptr)) +#define __sanitizer_syscall_pre_getpgid(pid) \ + __sanitizer_syscall_pre_impl_getpgid((long long)(pid)) +#define __sanitizer_syscall_post_getpgid(res, pid) \ + __sanitizer_syscall_post_impl_getpgid(res, (long long)(pid)) +#define __sanitizer_syscall_pre_reboot(opt, bootstr) \ + __sanitizer_syscall_pre_impl_reboot((long long)(opt), (long long)(bootstr)) +#define __sanitizer_syscall_post_reboot(res, opt, bootstr) \ + __sanitizer_syscall_post_impl_reboot(res, (long long)(opt), \ + (long long)(bootstr)) +#define __sanitizer_syscall_pre_poll(fds, nfds, timeout) \ + __sanitizer_syscall_pre_impl_poll((long long)(fds), (long long)(nfds), \ + (long long)(timeout)) +#define __sanitizer_syscall_post_poll(res, fds, nfds, timeout) \ + __sanitizer_syscall_post_impl_poll(res, (long long)(fds), (long long)(nfds), \ + (long long)(timeout)) +#define __sanitizer_syscall_pre_afssys(id, a1, a2, a3, a4, a5, a6) \ + __sanitizer_syscall_pre_impl_afssys( \ + (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3), \ + (long long)(a4), (long long)(a5), (long long)(a6)) +#define __sanitizer_syscall_post_afssys(res, id, a1, a2, a3, a4, a5, a6) \ + __sanitizer_syscall_post_impl_afssys( \ + res, (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3), \ + (long long)(a4), (long long)(a5), (long long)(a6)) +/* syscall 211 has been skipped */ +/* syscall 212 has been skipped */ +/* syscall 213 has been skipped */ +/* syscall 214 has been skipped */ +/* syscall 215 has been skipped */ +/* syscall 216 has been skipped */ +/* syscall 217 has been skipped */ +/* syscall 218 has been skipped */ +/* syscall 219 has been skipped */ +#define __sanitizer_syscall_pre_compat_14___semctl(semid, semnum, cmd, arg) \ + __sanitizer_syscall_pre_impl_compat_14___semctl( \ + (long long)(semid), (long long)(semnum), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_post_compat_14___semctl(res, semid, semnum, cmd, \ + arg) \ + __sanitizer_syscall_post_impl_compat_14___semctl( \ + res, (long long)(semid), (long long)(semnum), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_pre_semget(key, nsems, semflg) \ + __sanitizer_syscall_pre_impl_semget((long long)(key), (long long)(nsems), \ + (long long)(semflg)) +#define __sanitizer_syscall_post_semget(res, key, nsems, semflg) \ + __sanitizer_syscall_post_impl_semget( \ + res, (long long)(key), (long long)(nsems), (long long)(semflg)) +#define __sanitizer_syscall_pre_semop(semid, sops, nsops) \ + __sanitizer_syscall_pre_impl_semop((long long)(semid), (long long)(sops), \ + (long long)(nsops)) +#define __sanitizer_syscall_post_semop(res, semid, sops, nsops) \ + __sanitizer_syscall_post_impl_semop(res, (long long)(semid), \ + (long long)(sops), (long long)(nsops)) +#define __sanitizer_syscall_pre_semconfig(flag) \ + __sanitizer_syscall_pre_impl_semconfig((long long)(flag)) +#define __sanitizer_syscall_post_semconfig(res, flag) \ + __sanitizer_syscall_post_impl_semconfig(res, (long long)(flag)) +#define __sanitizer_syscall_pre_compat_14_msgctl(msqid, cmd, buf) \ + __sanitizer_syscall_pre_impl_compat_14_msgctl( \ + (long long)(msqid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_post_compat_14_msgctl(res, msqid, cmd, buf) \ + __sanitizer_syscall_post_impl_compat_14_msgctl( \ + res, (long long)(msqid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_pre_msgget(key, msgflg) \ + __sanitizer_syscall_pre_impl_msgget((long long)(key), (long long)(msgflg)) +#define __sanitizer_syscall_post_msgget(res, key, msgflg) \ + __sanitizer_syscall_post_impl_msgget(res, (long long)(key), \ + (long long)(msgflg)) +#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg) \ + __sanitizer_syscall_pre_impl_msgsnd((long long)(msqid), (long long)(msgp), \ + (long long)(msgsz), (long long)(msgflg)) +#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg) \ + __sanitizer_syscall_post_impl_msgsnd(res, (long long)(msqid), \ + (long long)(msgp), (long long)(msgsz), \ + (long long)(msgflg)) +#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg) \ + __sanitizer_syscall_pre_impl_msgrcv((long long)(msqid), (long long)(msgp), \ + (long long)(msgsz), (long long)(msgtyp), \ + (long long)(msgflg)) +#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp, \ + msgflg) \ + __sanitizer_syscall_post_impl_msgrcv( \ + res, (long long)(msqid), (long long)(msgp), (long long)(msgsz), \ + (long long)(msgtyp), (long long)(msgflg)) +#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg) \ + __sanitizer_syscall_pre_impl_shmat((long long)(shmid), (long long)(shmaddr), \ + (long long)(shmflg)) +#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg) \ + __sanitizer_syscall_post_impl_shmat( \ + res, (long long)(shmid), (long long)(shmaddr), (long long)(shmflg)) +#define __sanitizer_syscall_pre_compat_14_shmctl(shmid, cmd, buf) \ + __sanitizer_syscall_pre_impl_compat_14_shmctl( \ + (long long)(shmid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_post_compat_14_shmctl(res, shmid, cmd, buf) \ + __sanitizer_syscall_post_impl_compat_14_shmctl( \ + res, (long long)(shmid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_pre_shmdt(shmaddr) \ + __sanitizer_syscall_pre_impl_shmdt((long long)(shmaddr)) +#define __sanitizer_syscall_post_shmdt(res, shmaddr) \ + __sanitizer_syscall_post_impl_shmdt(res, (long long)(shmaddr)) +#define __sanitizer_syscall_pre_shmget(key, size, shmflg) \ + __sanitizer_syscall_pre_impl_shmget((long long)(key), (long long)(size), \ + (long long)(shmflg)) +#define __sanitizer_syscall_post_shmget(res, key, size, shmflg) \ + __sanitizer_syscall_post_impl_shmget(res, (long long)(key), \ + (long long)(size), (long long)(shmflg)) +#define __sanitizer_syscall_pre_compat_50_clock_gettime(clock_id, tp) \ + __sanitizer_syscall_pre_impl_compat_50_clock_gettime((long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_post_compat_50_clock_gettime(res, clock_id, tp) \ + __sanitizer_syscall_post_impl_compat_50_clock_gettime( \ + res, (long long)(clock_id), (long long)(tp)) +#define __sanitizer_syscall_pre_compat_50_clock_settime(clock_id, tp) \ + __sanitizer_syscall_pre_impl_compat_50_clock_settime((long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_post_compat_50_clock_settime(res, clock_id, tp) \ + __sanitizer_syscall_post_impl_compat_50_clock_settime( \ + res, (long long)(clock_id), (long long)(tp)) +#define __sanitizer_syscall_pre_compat_50_clock_getres(clock_id, tp) \ + __sanitizer_syscall_pre_impl_compat_50_clock_getres((long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_post_compat_50_clock_getres(res, clock_id, tp) \ + __sanitizer_syscall_post_impl_compat_50_clock_getres( \ + res, (long long)(clock_id), (long long)(tp)) +#define __sanitizer_syscall_pre_timer_create(clock_id, evp, timerid) \ + __sanitizer_syscall_pre_impl_timer_create( \ + (long long)(clock_id), (long long)(evp), (long long)(timerid)) +#define __sanitizer_syscall_post_timer_create(res, clock_id, evp, timerid) \ + __sanitizer_syscall_post_impl_timer_create( \ + res, (long long)(clock_id), (long long)(evp), (long long)(timerid)) +#define __sanitizer_syscall_pre_timer_delete(timerid) \ + __sanitizer_syscall_pre_impl_timer_delete((long long)(timerid)) +#define __sanitizer_syscall_post_timer_delete(res, timerid) \ + __sanitizer_syscall_post_impl_timer_delete(res, (long long)(timerid)) +#define __sanitizer_syscall_pre_compat_50_timer_settime(timerid, flags, value, \ + ovalue) \ + __sanitizer_syscall_pre_impl_compat_50_timer_settime( \ + (long long)(timerid), (long long)(flags), (long long)(value), \ + (long long)(ovalue)) +#define __sanitizer_syscall_post_compat_50_timer_settime(res, timerid, flags, \ + value, ovalue) \ + __sanitizer_syscall_post_impl_compat_50_timer_settime( \ + res, (long long)(timerid), (long long)(flags), (long long)(value), \ + (long long)(ovalue)) +#define __sanitizer_syscall_pre_compat_50_timer_gettime(timerid, value) \ + __sanitizer_syscall_pre_impl_compat_50_timer_gettime((long long)(timerid), \ + (long long)(value)) +#define __sanitizer_syscall_post_compat_50_timer_gettime(res, timerid, value) \ + __sanitizer_syscall_post_impl_compat_50_timer_gettime( \ + res, (long long)(timerid), (long long)(value)) +#define __sanitizer_syscall_pre_timer_getoverrun(timerid) \ + __sanitizer_syscall_pre_impl_timer_getoverrun((long long)(timerid)) +#define __sanitizer_syscall_post_timer_getoverrun(res, timerid) \ + __sanitizer_syscall_post_impl_timer_getoverrun(res, (long long)(timerid)) +#define __sanitizer_syscall_pre_compat_50_nanosleep(rqtp, rmtp) \ + __sanitizer_syscall_pre_impl_compat_50_nanosleep((long long)(rqtp), \ + (long long)(rmtp)) +#define __sanitizer_syscall_post_compat_50_nanosleep(res, rqtp, rmtp) \ + __sanitizer_syscall_post_impl_compat_50_nanosleep(res, (long long)(rqtp), \ + (long long)(rmtp)) +#define __sanitizer_syscall_pre_fdatasync(fd) \ + __sanitizer_syscall_pre_impl_fdatasync((long long)(fd)) +#define __sanitizer_syscall_post_fdatasync(res, fd) \ + __sanitizer_syscall_post_impl_fdatasync(res, (long long)(fd)) +#define __sanitizer_syscall_pre_mlockall(flags) \ + __sanitizer_syscall_pre_impl_mlockall((long long)(flags)) +#define __sanitizer_syscall_post_mlockall(res, flags) \ + __sanitizer_syscall_post_impl_mlockall(res, (long long)(flags)) +#define __sanitizer_syscall_pre_munlockall() \ + __sanitizer_syscall_pre_impl_munlockall() +#define __sanitizer_syscall_post_munlockall(res) \ + __sanitizer_syscall_post_impl_munlockall(res) +#define __sanitizer_syscall_pre_compat_50___sigtimedwait(set, info, timeout) \ + __sanitizer_syscall_pre_impl_compat_50___sigtimedwait( \ + (long long)(set), (long long)(info), (long long)(timeout)) +#define __sanitizer_syscall_post_compat_50___sigtimedwait(res, set, info, \ + timeout) \ + __sanitizer_syscall_post_impl_compat_50___sigtimedwait( \ + res, (long long)(set), (long long)(info), (long long)(timeout)) +#define __sanitizer_syscall_pre_sigqueueinfo(pid, info) \ + __sanitizer_syscall_pre_impl_sigqueueinfo((long long)(pid), (long long)(info)) +#define __sanitizer_syscall_post_sigqueueinfo(res, pid, info) \ + __sanitizer_syscall_post_impl_sigqueueinfo(res, (long long)(pid), \ + (long long)(info)) +#define __sanitizer_syscall_pre_modctl(cmd, arg) \ + __sanitizer_syscall_pre_impl_modctl((long long)(cmd), (long long)(arg)) +#define __sanitizer_syscall_post_modctl(res, cmd, arg) \ + __sanitizer_syscall_post_impl_modctl(res, (long long)(cmd), (long long)(arg)) +#define __sanitizer_syscall_pre__ksem_init(value, idp) \ + __sanitizer_syscall_pre_impl__ksem_init((long long)(value), (long long)(idp)) +#define __sanitizer_syscall_post__ksem_init(res, value, idp) \ + __sanitizer_syscall_post_impl__ksem_init(res, (long long)(value), \ + (long long)(idp)) +#define __sanitizer_syscall_pre__ksem_open(name, oflag, mode, value, idp) \ + __sanitizer_syscall_pre_impl__ksem_open( \ + (long long)(name), (long long)(oflag), (long long)(mode), \ + (long long)(value), (long long)(idp)) +#define __sanitizer_syscall_post__ksem_open(res, name, oflag, mode, value, \ + idp) \ + __sanitizer_syscall_post_impl__ksem_open( \ + res, (long long)(name), (long long)(oflag), (long long)(mode), \ + (long long)(value), (long long)(idp)) +#define __sanitizer_syscall_pre__ksem_unlink(name) \ + __sanitizer_syscall_pre_impl__ksem_unlink((long long)(name)) +#define __sanitizer_syscall_post__ksem_unlink(res, name) \ + __sanitizer_syscall_post_impl__ksem_unlink(res, (long long)(name)) +#define __sanitizer_syscall_pre__ksem_close(id) \ + __sanitizer_syscall_pre_impl__ksem_close((long long)(id)) +#define __sanitizer_syscall_post__ksem_close(res, id) \ + __sanitizer_syscall_post_impl__ksem_close(res, (long long)(id)) +#define __sanitizer_syscall_pre__ksem_post(id) \ + __sanitizer_syscall_pre_impl__ksem_post((long long)(id)) +#define __sanitizer_syscall_post__ksem_post(res, id) \ + __sanitizer_syscall_post_impl__ksem_post(res, (long long)(id)) +#define __sanitizer_syscall_pre__ksem_wait(id) \ + __sanitizer_syscall_pre_impl__ksem_wait((long long)(id)) +#define __sanitizer_syscall_post__ksem_wait(res, id) \ + __sanitizer_syscall_post_impl__ksem_wait(res, (long long)(id)) +#define __sanitizer_syscall_pre__ksem_trywait(id) \ + __sanitizer_syscall_pre_impl__ksem_trywait((long long)(id)) +#define __sanitizer_syscall_post__ksem_trywait(res, id) \ + __sanitizer_syscall_post_impl__ksem_trywait(res, (long long)(id)) +#define __sanitizer_syscall_pre__ksem_getvalue(id, value) \ + __sanitizer_syscall_pre_impl__ksem_getvalue((long long)(id), \ + (long long)(value)) +#define __sanitizer_syscall_post__ksem_getvalue(res, id, value) \ + __sanitizer_syscall_post_impl__ksem_getvalue(res, (long long)(id), \ + (long long)(value)) +#define __sanitizer_syscall_pre__ksem_destroy(id) \ + __sanitizer_syscall_pre_impl__ksem_destroy((long long)(id)) +#define __sanitizer_syscall_post__ksem_destroy(res, id) \ + __sanitizer_syscall_post_impl__ksem_destroy(res, (long long)(id)) +#define __sanitizer_syscall_pre__ksem_timedwait(id, abstime) \ + __sanitizer_syscall_pre_impl__ksem_timedwait((long long)(id), \ + (long long)(abstime)) +#define __sanitizer_syscall_post__ksem_timedwait(res, id, abstime) \ + __sanitizer_syscall_post_impl__ksem_timedwait(res, (long long)(id), \ + (long long)(abstime)) +#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr) \ + __sanitizer_syscall_pre_impl_mq_open((long long)(name), (long long)(oflag), \ + (long long)(mode), (long long)(attr)) +#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr) \ + __sanitizer_syscall_post_impl_mq_open(res, (long long)(name), \ + (long long)(oflag), (long long)(mode), \ + (long long)(attr)) +#define __sanitizer_syscall_pre_mq_close(mqdes) \ + __sanitizer_syscall_pre_impl_mq_close((long long)(mqdes)) +#define __sanitizer_syscall_post_mq_close(res, mqdes) \ + __sanitizer_syscall_post_impl_mq_close(res, (long long)(mqdes)) +#define __sanitizer_syscall_pre_mq_unlink(name) \ + __sanitizer_syscall_pre_impl_mq_unlink((long long)(name)) +#define __sanitizer_syscall_post_mq_unlink(res, name) \ + __sanitizer_syscall_post_impl_mq_unlink(res, (long long)(name)) +#define __sanitizer_syscall_pre_mq_getattr(mqdes, mqstat) \ + __sanitizer_syscall_pre_impl_mq_getattr((long long)(mqdes), \ + (long long)(mqstat)) +#define __sanitizer_syscall_post_mq_getattr(res, mqdes, mqstat) \ + __sanitizer_syscall_post_impl_mq_getattr(res, (long long)(mqdes), \ + (long long)(mqstat)) +#define __sanitizer_syscall_pre_mq_setattr(mqdes, mqstat, omqstat) \ + __sanitizer_syscall_pre_impl_mq_setattr( \ + (long long)(mqdes), (long long)(mqstat), (long long)(omqstat)) +#define __sanitizer_syscall_post_mq_setattr(res, mqdes, mqstat, omqstat) \ + __sanitizer_syscall_post_impl_mq_setattr( \ + res, (long long)(mqdes), (long long)(mqstat), (long long)(omqstat)) +#define __sanitizer_syscall_pre_mq_notify(mqdes, notification) \ + __sanitizer_syscall_pre_impl_mq_notify((long long)(mqdes), \ + (long long)(notification)) +#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification) \ + __sanitizer_syscall_post_impl_mq_notify(res, (long long)(mqdes), \ + (long long)(notification)) +#define __sanitizer_syscall_pre_mq_send(mqdes, msg_ptr, msg_len, msg_prio) \ + __sanitizer_syscall_pre_impl_mq_send( \ + (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio)) +#define __sanitizer_syscall_post_mq_send(res, mqdes, msg_ptr, msg_len, \ + msg_prio) \ + __sanitizer_syscall_post_impl_mq_send( \ + res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio)) +#define __sanitizer_syscall_pre_mq_receive(mqdes, msg_ptr, msg_len, msg_prio) \ + __sanitizer_syscall_pre_impl_mq_receive( \ + (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio)) +#define __sanitizer_syscall_post_mq_receive(res, mqdes, msg_ptr, msg_len, \ + msg_prio) \ + __sanitizer_syscall_post_impl_mq_receive( \ + res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio)) +#define __sanitizer_syscall_pre_compat_50_mq_timedsend( \ + mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \ + __sanitizer_syscall_pre_impl_compat_50_mq_timedsend( \ + (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_post_compat_50_mq_timedsend( \ + res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \ + __sanitizer_syscall_post_impl_compat_50_mq_timedsend( \ + res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_pre_compat_50_mq_timedreceive( \ + mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \ + __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive( \ + (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_post_compat_50_mq_timedreceive( \ + res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \ + __sanitizer_syscall_post_impl_compat_50_mq_timedreceive( \ + res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +/* syscall 267 has been skipped */ +/* syscall 268 has been skipped */ +/* syscall 269 has been skipped */ +#define __sanitizer_syscall_pre___posix_rename(from, to) \ + __sanitizer_syscall_pre_impl___posix_rename((long long)(from), \ + (long long)(to)) +#define __sanitizer_syscall_post___posix_rename(res, from, to) \ + __sanitizer_syscall_post_impl___posix_rename(res, (long long)(from), \ + (long long)(to)) +#define __sanitizer_syscall_pre_swapctl(cmd, arg, misc) \ + __sanitizer_syscall_pre_impl_swapctl((long long)(cmd), (long long)(arg), \ + (long long)(misc)) +#define __sanitizer_syscall_post_swapctl(res, cmd, arg, misc) \ + __sanitizer_syscall_post_impl_swapctl(res, (long long)(cmd), \ + (long long)(arg), (long long)(misc)) +#define __sanitizer_syscall_pre_compat_30_getdents(fd, buf, count) \ + __sanitizer_syscall_pre_impl_compat_30_getdents( \ + (long long)(fd), (long long)(buf), (long long)(count)) +#define __sanitizer_syscall_post_compat_30_getdents(res, fd, buf, count) \ + __sanitizer_syscall_post_impl_compat_30_getdents( \ + res, (long long)(fd), (long long)(buf), (long long)(count)) +#define __sanitizer_syscall_pre_minherit(addr, len, inherit) \ + __sanitizer_syscall_pre_impl_minherit((long long)(addr), (long long)(len), \ + (long long)(inherit)) +#define __sanitizer_syscall_post_minherit(res, addr, len, inherit) \ + __sanitizer_syscall_post_impl_minherit( \ + res, (long long)(addr), (long long)(len), (long long)(inherit)) +#define __sanitizer_syscall_pre_lchmod(path, mode) \ + __sanitizer_syscall_pre_impl_lchmod((long long)(path), (long long)(mode)) +#define __sanitizer_syscall_post_lchmod(res, path, mode) \ + __sanitizer_syscall_post_impl_lchmod(res, (long long)(path), \ + (long long)(mode)) +#define __sanitizer_syscall_pre_lchown(path, uid, gid) \ + __sanitizer_syscall_pre_impl_lchown((long long)(path), (long long)(uid), \ + (long long)(gid)) +#define __sanitizer_syscall_post_lchown(res, path, uid, gid) \ + __sanitizer_syscall_post_impl_lchown(res, (long long)(path), \ + (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_pre_compat_50_lutimes(path, tptr) \ + __sanitizer_syscall_pre_impl_compat_50_lutimes((long long)(path), \ + (long long)(tptr)) +#define __sanitizer_syscall_post_compat_50_lutimes(res, path, tptr) \ + __sanitizer_syscall_post_impl_compat_50_lutimes(res, (long long)(path), \ + (long long)(tptr)) +#define __sanitizer_syscall_pre___msync13(addr, len, flags) \ + __sanitizer_syscall_pre_impl___msync13((long long)(addr), (long long)(len), \ + (long long)(flags)) +#define __sanitizer_syscall_post___msync13(res, addr, len, flags) \ + __sanitizer_syscall_post_impl___msync13( \ + res, (long long)(addr), (long long)(len), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_30___stat13(path, ub) \ + __sanitizer_syscall_pre_impl_compat_30___stat13((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_30___stat13(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_30___stat13(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_compat_30___fstat13(fd, sb) \ + __sanitizer_syscall_pre_impl_compat_30___fstat13((long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_post_compat_30___fstat13(res, fd, sb) \ + __sanitizer_syscall_post_impl_compat_30___fstat13(res, (long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_pre_compat_30___lstat13(path, ub) \ + __sanitizer_syscall_pre_impl_compat_30___lstat13((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_30___lstat13(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_30___lstat13(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre___sigaltstack14(nss, oss) \ + __sanitizer_syscall_pre_impl___sigaltstack14((long long)(nss), \ + (long long)(oss)) +#define __sanitizer_syscall_post___sigaltstack14(res, nss, oss) \ + __sanitizer_syscall_post_impl___sigaltstack14(res, (long long)(nss), \ + (long long)(oss)) +#define __sanitizer_syscall_pre___vfork14() \ + __sanitizer_syscall_pre_impl___vfork14() +#define __sanitizer_syscall_post___vfork14(res) \ + __sanitizer_syscall_post_impl___vfork14(res) +#define __sanitizer_syscall_pre___posix_chown(path, uid, gid) \ + __sanitizer_syscall_pre_impl___posix_chown( \ + (long long)(path), (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_post___posix_chown(res, path, uid, gid) \ + __sanitizer_syscall_post_impl___posix_chown( \ + res, (long long)(path), (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_pre___posix_fchown(fd, uid, gid) \ + __sanitizer_syscall_pre_impl___posix_fchown( \ + (long long)(fd), (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_post___posix_fchown(res, fd, uid, gid) \ + __sanitizer_syscall_post_impl___posix_fchown( \ + res, (long long)(fd), (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_pre___posix_lchown(path, uid, gid) \ + __sanitizer_syscall_pre_impl___posix_lchown( \ + (long long)(path), (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_post___posix_lchown(res, path, uid, gid) \ + __sanitizer_syscall_post_impl___posix_lchown( \ + res, (long long)(path), (long long)(uid), (long long)(gid)) +#define __sanitizer_syscall_pre_getsid(pid) \ + __sanitizer_syscall_pre_impl_getsid((long long)(pid)) +#define __sanitizer_syscall_post_getsid(res, pid) \ + __sanitizer_syscall_post_impl_getsid(res, (long long)(pid)) +#define __sanitizer_syscall_pre___clone(flags, stack) \ + __sanitizer_syscall_pre_impl___clone((long long)(flags), (long long)(stack)) +#define __sanitizer_syscall_post___clone(res, flags, stack) \ + __sanitizer_syscall_post_impl___clone(res, (long long)(flags), \ + (long long)(stack)) +#define __sanitizer_syscall_pre_fktrace(fd, ops, facs, pid) \ + __sanitizer_syscall_pre_impl_fktrace((long long)(fd), (long long)(ops), \ + (long long)(facs), (long long)(pid)) +#define __sanitizer_syscall_post_fktrace(res, fd, ops, facs, pid) \ + __sanitizer_syscall_post_impl_fktrace(res, (long long)(fd), \ + (long long)(ops), (long long)(facs), \ + (long long)(pid)) +#define __sanitizer_syscall_pre_preadv(fd, iovp, iovcnt, PAD, offset) \ + __sanitizer_syscall_pre_impl_preadv((long long)(fd), (long long)(iovp), \ + (long long)(iovcnt), (long long)(PAD), \ + (long long)(offset)) +#define __sanitizer_syscall_post_preadv(res, fd, iovp, iovcnt, PAD, offset) \ + __sanitizer_syscall_post_impl_preadv(res, (long long)(fd), \ + (long long)(iovp), (long long)(iovcnt), \ + (long long)(PAD), (long long)(offset)) +#define __sanitizer_syscall_pre_pwritev(fd, iovp, iovcnt, PAD, offset) \ + __sanitizer_syscall_pre_impl_pwritev((long long)(fd), (long long)(iovp), \ + (long long)(iovcnt), (long long)(PAD), \ + (long long)(offset)) +#define __sanitizer_syscall_post_pwritev(res, fd, iovp, iovcnt, PAD, offset) \ + __sanitizer_syscall_post_impl_pwritev( \ + res, (long long)(fd), (long long)(iovp), (long long)(iovcnt), \ + (long long)(PAD), (long long)(offset)) +#define __sanitizer_syscall_pre_compat_16___sigaction14(signum, nsa, osa) \ + __sanitizer_syscall_pre_impl_compat_16___sigaction14( \ + (long long)(signum), (long long)(nsa), (long long)(osa)) +#define __sanitizer_syscall_post_compat_16___sigaction14(res, signum, nsa, \ + osa) \ + __sanitizer_syscall_post_impl_compat_16___sigaction14( \ + res, (long long)(signum), (long long)(nsa), (long long)(osa)) +#define __sanitizer_syscall_pre___sigpending14(set) \ + __sanitizer_syscall_pre_impl___sigpending14((long long)(set)) +#define __sanitizer_syscall_post___sigpending14(res, set) \ + __sanitizer_syscall_post_impl___sigpending14(res, (long long)(set)) +#define __sanitizer_syscall_pre___sigprocmask14(how, set, oset) \ + __sanitizer_syscall_pre_impl___sigprocmask14( \ + (long long)(how), (long long)(set), (long long)(oset)) +#define __sanitizer_syscall_post___sigprocmask14(res, how, set, oset) \ + __sanitizer_syscall_post_impl___sigprocmask14( \ + res, (long long)(how), (long long)(set), (long long)(oset)) +#define __sanitizer_syscall_pre___sigsuspend14(set) \ + __sanitizer_syscall_pre_impl___sigsuspend14((long long)(set)) +#define __sanitizer_syscall_post___sigsuspend14(res, set) \ + __sanitizer_syscall_post_impl___sigsuspend14(res, (long long)(set)) +#define __sanitizer_syscall_pre_compat_16___sigreturn14(sigcntxp) \ + __sanitizer_syscall_pre_impl_compat_16___sigreturn14((long long)(sigcntxp)) +#define __sanitizer_syscall_post_compat_16___sigreturn14(res, sigcntxp) \ + __sanitizer_syscall_post_impl_compat_16___sigreturn14(res, \ + (long long)(sigcntxp)) +#define __sanitizer_syscall_pre___getcwd(bufp, length) \ + __sanitizer_syscall_pre_impl___getcwd((long long)(bufp), (long long)(length)) +#define __sanitizer_syscall_post___getcwd(res, bufp, length) \ + __sanitizer_syscall_post_impl___getcwd(res, (long long)(bufp), \ + (long long)(length)) +#define __sanitizer_syscall_pre_fchroot(fd) \ + __sanitizer_syscall_pre_impl_fchroot((long long)(fd)) +#define __sanitizer_syscall_post_fchroot(res, fd) \ + __sanitizer_syscall_post_impl_fchroot(res, (long long)(fd)) +#define __sanitizer_syscall_pre_compat_30_fhopen(fhp, flags) \ + __sanitizer_syscall_pre_impl_compat_30_fhopen((long long)(fhp), \ + (long long)(flags)) +#define __sanitizer_syscall_post_compat_30_fhopen(res, fhp, flags) \ + __sanitizer_syscall_post_impl_compat_30_fhopen(res, (long long)(fhp), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_compat_30_fhstat(fhp, sb) \ + __sanitizer_syscall_pre_impl_compat_30_fhstat((long long)(fhp), \ + (long long)(sb)) +#define __sanitizer_syscall_post_compat_30_fhstat(res, fhp, sb) \ + __sanitizer_syscall_post_impl_compat_30_fhstat(res, (long long)(fhp), \ + (long long)(sb)) +#define __sanitizer_syscall_pre_compat_20_fhstatfs(fhp, buf) \ + __sanitizer_syscall_pre_impl_compat_20_fhstatfs((long long)(fhp), \ + (long long)(buf)) +#define __sanitizer_syscall_post_compat_20_fhstatfs(res, fhp, buf) \ + __sanitizer_syscall_post_impl_compat_20_fhstatfs(res, (long long)(fhp), \ + (long long)(buf)) +#define __sanitizer_syscall_pre_compat_50_____semctl13(semid, semnum, cmd, \ + arg) \ + __sanitizer_syscall_pre_impl_compat_50_____semctl13( \ + (long long)(semid), (long long)(semnum), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_post_compat_50_____semctl13(res, semid, semnum, \ + cmd, arg) \ + __sanitizer_syscall_post_impl_compat_50_____semctl13( \ + res, (long long)(semid), (long long)(semnum), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_pre_compat_50___msgctl13(msqid, cmd, buf) \ + __sanitizer_syscall_pre_impl_compat_50___msgctl13( \ + (long long)(msqid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_post_compat_50___msgctl13(res, msqid, cmd, buf) \ + __sanitizer_syscall_post_impl_compat_50___msgctl13( \ + res, (long long)(msqid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_pre_compat_50___shmctl13(shmid, cmd, buf) \ + __sanitizer_syscall_pre_impl_compat_50___shmctl13( \ + (long long)(shmid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_post_compat_50___shmctl13(res, shmid, cmd, buf) \ + __sanitizer_syscall_post_impl_compat_50___shmctl13( \ + res, (long long)(shmid), (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_pre_lchflags(path, flags) \ + __sanitizer_syscall_pre_impl_lchflags((long long)(path), (long long)(flags)) +#define __sanitizer_syscall_post_lchflags(res, path, flags) \ + __sanitizer_syscall_post_impl_lchflags(res, (long long)(path), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_issetugid() \ + __sanitizer_syscall_pre_impl_issetugid() +#define __sanitizer_syscall_post_issetugid(res) \ + __sanitizer_syscall_post_impl_issetugid(res) +#define __sanitizer_syscall_pre_utrace(label, addr, len) \ + __sanitizer_syscall_pre_impl_utrace((long long)(label), (long long)(addr), \ + (long long)(len)) +#define __sanitizer_syscall_post_utrace(res, label, addr, len) \ + __sanitizer_syscall_post_impl_utrace(res, (long long)(label), \ + (long long)(addr), (long long)(len)) +#define __sanitizer_syscall_pre_getcontext(ucp) \ + __sanitizer_syscall_pre_impl_getcontext((long long)(ucp)) +#define __sanitizer_syscall_post_getcontext(res, ucp) \ + __sanitizer_syscall_post_impl_getcontext(res, (long long)(ucp)) +#define __sanitizer_syscall_pre_setcontext(ucp) \ + __sanitizer_syscall_pre_impl_setcontext((long long)(ucp)) +#define __sanitizer_syscall_post_setcontext(res, ucp) \ + __sanitizer_syscall_post_impl_setcontext(res, (long long)(ucp)) +#define __sanitizer_syscall_pre__lwp_create(ucp, flags, new_lwp) \ + __sanitizer_syscall_pre_impl__lwp_create( \ + (long long)(ucp), (long long)(flags), (long long)(new_lwp)) +#define __sanitizer_syscall_post__lwp_create(res, ucp, flags, new_lwp) \ + __sanitizer_syscall_post_impl__lwp_create( \ + res, (long long)(ucp), (long long)(flags), (long long)(new_lwp)) +#define __sanitizer_syscall_pre__lwp_exit() \ + __sanitizer_syscall_pre_impl__lwp_exit() +#define __sanitizer_syscall_post__lwp_exit(res) \ + __sanitizer_syscall_post_impl__lwp_exit(res) +#define __sanitizer_syscall_pre__lwp_self() \ + __sanitizer_syscall_pre_impl__lwp_self() +#define __sanitizer_syscall_post__lwp_self(res) \ + __sanitizer_syscall_post_impl__lwp_self(res) +#define __sanitizer_syscall_pre__lwp_wait(wait_for, departed) \ + __sanitizer_syscall_pre_impl__lwp_wait((long long)(wait_for), \ + (long long)(departed)) +#define __sanitizer_syscall_post__lwp_wait(res, wait_for, departed) \ + __sanitizer_syscall_post_impl__lwp_wait(res, (long long)(wait_for), \ + (long long)(departed)) +#define __sanitizer_syscall_pre__lwp_suspend(target) \ + __sanitizer_syscall_pre_impl__lwp_suspend((long long)(target)) +#define __sanitizer_syscall_post__lwp_suspend(res, target) \ + __sanitizer_syscall_post_impl__lwp_suspend(res, (long long)(target)) +#define __sanitizer_syscall_pre__lwp_continue(target) \ + __sanitizer_syscall_pre_impl__lwp_continue((long long)(target)) +#define __sanitizer_syscall_post__lwp_continue(res, target) \ + __sanitizer_syscall_post_impl__lwp_continue(res, (long long)(target)) +#define __sanitizer_syscall_pre__lwp_wakeup(target) \ + __sanitizer_syscall_pre_impl__lwp_wakeup((long long)(target)) +#define __sanitizer_syscall_post__lwp_wakeup(res, target) \ + __sanitizer_syscall_post_impl__lwp_wakeup(res, (long long)(target)) +#define __sanitizer_syscall_pre__lwp_getprivate() \ + __sanitizer_syscall_pre_impl__lwp_getprivate() +#define __sanitizer_syscall_post__lwp_getprivate(res) \ + __sanitizer_syscall_post_impl__lwp_getprivate(res) +#define __sanitizer_syscall_pre__lwp_setprivate(ptr) \ + __sanitizer_syscall_pre_impl__lwp_setprivate((long long)(ptr)) +#define __sanitizer_syscall_post__lwp_setprivate(res, ptr) \ + __sanitizer_syscall_post_impl__lwp_setprivate(res, (long long)(ptr)) +#define __sanitizer_syscall_pre__lwp_kill(target, signo) \ + __sanitizer_syscall_pre_impl__lwp_kill((long long)(target), \ + (long long)(signo)) +#define __sanitizer_syscall_post__lwp_kill(res, target, signo) \ + __sanitizer_syscall_post_impl__lwp_kill(res, (long long)(target), \ + (long long)(signo)) +#define __sanitizer_syscall_pre__lwp_detach(target) \ + __sanitizer_syscall_pre_impl__lwp_detach((long long)(target)) +#define __sanitizer_syscall_post__lwp_detach(res, target) \ + __sanitizer_syscall_post_impl__lwp_detach(res, (long long)(target)) +#define __sanitizer_syscall_pre_compat_50__lwp_park(ts, unpark, hint, \ + unparkhint) \ + __sanitizer_syscall_pre_impl_compat_50__lwp_park( \ + (long long)(ts), (long long)(unpark), (long long)(hint), \ + (long long)(unparkhint)) +#define __sanitizer_syscall_post_compat_50__lwp_park(res, ts, unpark, hint, \ + unparkhint) \ + __sanitizer_syscall_post_impl_compat_50__lwp_park( \ + res, (long long)(ts), (long long)(unpark), (long long)(hint), \ + (long long)(unparkhint)) +#define __sanitizer_syscall_pre__lwp_unpark(target, hint) \ + __sanitizer_syscall_pre_impl__lwp_unpark((long long)(target), \ + (long long)(hint)) +#define __sanitizer_syscall_post__lwp_unpark(res, target, hint) \ + __sanitizer_syscall_post_impl__lwp_unpark(res, (long long)(target), \ + (long long)(hint)) +#define __sanitizer_syscall_pre__lwp_unpark_all(targets, ntargets, hint) \ + __sanitizer_syscall_pre_impl__lwp_unpark_all( \ + (long long)(targets), (long long)(ntargets), (long long)(hint)) +#define __sanitizer_syscall_post__lwp_unpark_all(res, targets, ntargets, hint) \ + __sanitizer_syscall_post_impl__lwp_unpark_all( \ + res, (long long)(targets), (long long)(ntargets), (long long)(hint)) +#define __sanitizer_syscall_pre__lwp_setname(target, name) \ + __sanitizer_syscall_pre_impl__lwp_setname((long long)(target), \ + (long long)(name)) +#define __sanitizer_syscall_post__lwp_setname(res, target, name) \ + __sanitizer_syscall_post_impl__lwp_setname(res, (long long)(target), \ + (long long)(name)) +#define __sanitizer_syscall_pre__lwp_getname(target, name, len) \ + __sanitizer_syscall_pre_impl__lwp_getname( \ + (long long)(target), (long long)(name), (long long)(len)) +#define __sanitizer_syscall_post__lwp_getname(res, target, name, len) \ + __sanitizer_syscall_post_impl__lwp_getname( \ + res, (long long)(target), (long long)(name), (long long)(len)) +#define __sanitizer_syscall_pre__lwp_ctl(features, address) \ + __sanitizer_syscall_pre_impl__lwp_ctl((long long)(features), \ + (long long)(address)) +#define __sanitizer_syscall_post__lwp_ctl(res, features, address) \ + __sanitizer_syscall_post_impl__lwp_ctl(res, (long long)(features), \ + (long long)(address)) +/* syscall 326 has been skipped */ +/* syscall 327 has been skipped */ +/* syscall 328 has been skipped */ +/* syscall 329 has been skipped */ +#define __sanitizer_syscall_pre_compat_60_sa_register(newv, oldv, flags, \ + stackinfo_offset) \ + __sanitizer_syscall_pre_impl_compat_60_sa_register( \ + (long long)(newv), (long long)(oldv), (long long)(flags), \ + (long long)(stackinfo_offset)) +#define __sanitizer_syscall_post_compat_60_sa_register(res, newv, oldv, flags, \ + stackinfo_offset) \ + __sanitizer_syscall_post_impl_compat_60_sa_register( \ + res, (long long)(newv), (long long)(oldv), (long long)(flags), \ + (long long)(stackinfo_offset)) +#define __sanitizer_syscall_pre_compat_60_sa_stacks(num, stacks) \ + __sanitizer_syscall_pre_impl_compat_60_sa_stacks((long long)(num), \ + (long long)(stacks)) +#define __sanitizer_syscall_post_compat_60_sa_stacks(res, num, stacks) \ + __sanitizer_syscall_post_impl_compat_60_sa_stacks(res, (long long)(num), \ + (long long)(stacks)) +#define __sanitizer_syscall_pre_compat_60_sa_enable() \ + __sanitizer_syscall_pre_impl_compat_60_sa_enable() +#define __sanitizer_syscall_post_compat_60_sa_enable(res) \ + __sanitizer_syscall_post_impl_compat_60_sa_enable(res) +#define __sanitizer_syscall_pre_compat_60_sa_setconcurrency(concurrency) \ + __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency( \ + (long long)(concurrency)) +#define __sanitizer_syscall_post_compat_60_sa_setconcurrency(res, concurrency) \ + __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency( \ + res, (long long)(concurrency)) +#define __sanitizer_syscall_pre_compat_60_sa_yield() \ + __sanitizer_syscall_pre_impl_compat_60_sa_yield() +#define __sanitizer_syscall_post_compat_60_sa_yield(res) \ + __sanitizer_syscall_post_impl_compat_60_sa_yield(res) +#define __sanitizer_syscall_pre_compat_60_sa_preempt(sa_id) \ + __sanitizer_syscall_pre_impl_compat_60_sa_preempt((long long)(sa_id)) +#define __sanitizer_syscall_post_compat_60_sa_preempt(res, sa_id) \ + __sanitizer_syscall_post_impl_compat_60_sa_preempt(res, (long long)(sa_id)) +/* syscall 336 has been skipped */ +/* syscall 337 has been skipped */ +/* syscall 338 has been skipped */ +/* syscall 339 has been skipped */ +#define __sanitizer_syscall_pre___sigaction_sigtramp(signum, nsa, osa, tramp, \ + vers) \ + __sanitizer_syscall_pre_impl___sigaction_sigtramp( \ + (long long)(signum), (long long)(nsa), (long long)(osa), \ + (long long)(tramp), (long long)(vers)) +#define __sanitizer_syscall_post___sigaction_sigtramp(res, signum, nsa, osa, \ + tramp, vers) \ + __sanitizer_syscall_post_impl___sigaction_sigtramp( \ + res, (long long)(signum), (long long)(nsa), (long long)(osa), \ + (long long)(tramp), (long long)(vers)) +/* syscall 341 has been skipped */ +/* syscall 342 has been skipped */ +#define __sanitizer_syscall_pre_rasctl(addr, len, op) \ + __sanitizer_syscall_pre_impl_rasctl((long long)(addr), (long long)(len), \ + (long long)(op)) +#define __sanitizer_syscall_post_rasctl(res, addr, len, op) \ + __sanitizer_syscall_post_impl_rasctl(res, (long long)(addr), \ + (long long)(len), (long long)(op)) +#define __sanitizer_syscall_pre_kqueue() __sanitizer_syscall_pre_impl_kqueue() +#define __sanitizer_syscall_post_kqueue(res) \ + __sanitizer_syscall_post_impl_kqueue(res) +#define __sanitizer_syscall_pre_compat_50_kevent(fd, changelist, nchanges, \ + eventlist, nevents, timeout) \ + __sanitizer_syscall_pre_impl_compat_50_kevent( \ + (long long)(fd), (long long)(changelist), (long long)(nchanges), \ + (long long)(eventlist), (long long)(nevents), (long long)(timeout)) +#define __sanitizer_syscall_post_compat_50_kevent( \ + res, fd, changelist, nchanges, eventlist, nevents, timeout) \ + __sanitizer_syscall_post_impl_compat_50_kevent( \ + res, (long long)(fd), (long long)(changelist), (long long)(nchanges), \ + (long long)(eventlist), (long long)(nevents), (long long)(timeout)) +#define __sanitizer_syscall_pre__sched_setparam(pid, lid, policy, params) \ + __sanitizer_syscall_pre_impl__sched_setparam( \ + (long long)(pid), (long long)(lid), (long long)(policy), \ + (long long)(params)) +#define __sanitizer_syscall_post__sched_setparam(res, pid, lid, policy, \ + params) \ + __sanitizer_syscall_post_impl__sched_setparam( \ + res, (long long)(pid), (long long)(lid), (long long)(policy), \ + (long long)(params)) +#define __sanitizer_syscall_pre__sched_getparam(pid, lid, policy, params) \ + __sanitizer_syscall_pre_impl__sched_getparam( \ + (long long)(pid), (long long)(lid), (long long)(policy), \ + (long long)(params)) +#define __sanitizer_syscall_post__sched_getparam(res, pid, lid, policy, \ + params) \ + __sanitizer_syscall_post_impl__sched_getparam( \ + res, (long long)(pid), (long long)(lid), (long long)(policy), \ + (long long)(params)) +#define __sanitizer_syscall_pre__sched_setaffinity(pid, lid, size, cpuset) \ + __sanitizer_syscall_pre_impl__sched_setaffinity( \ + (long long)(pid), (long long)(lid), (long long)(size), \ + (long long)(cpuset)) +#define __sanitizer_syscall_post__sched_setaffinity(res, pid, lid, size, \ + cpuset) \ + __sanitizer_syscall_post_impl__sched_setaffinity( \ + res, (long long)(pid), (long long)(lid), (long long)(size), \ + (long long)(cpuset)) +#define __sanitizer_syscall_pre__sched_getaffinity(pid, lid, size, cpuset) \ + __sanitizer_syscall_pre_impl__sched_getaffinity( \ + (long long)(pid), (long long)(lid), (long long)(size), \ + (long long)(cpuset)) +#define __sanitizer_syscall_post__sched_getaffinity(res, pid, lid, size, \ + cpuset) \ + __sanitizer_syscall_post_impl__sched_getaffinity( \ + res, (long long)(pid), (long long)(lid), (long long)(size), \ + (long long)(cpuset)) +#define __sanitizer_syscall_pre_sched_yield() \ + __sanitizer_syscall_pre_impl_sched_yield() +#define __sanitizer_syscall_post_sched_yield(res) \ + __sanitizer_syscall_post_impl_sched_yield(res) +#define __sanitizer_syscall_pre__sched_protect(priority) \ + __sanitizer_syscall_pre_impl__sched_protect((long long)(priority)) +#define __sanitizer_syscall_post__sched_protect(res, priority) \ + __sanitizer_syscall_post_impl__sched_protect(res, (long long)(priority)) +/* syscall 352 has been skipped */ +/* syscall 353 has been skipped */ +#define __sanitizer_syscall_pre_fsync_range(fd, flags, start, length) \ + __sanitizer_syscall_pre_impl_fsync_range( \ + (long long)(fd), (long long)(flags), (long long)(start), \ + (long long)(length)) +#define __sanitizer_syscall_post_fsync_range(res, fd, flags, start, length) \ + __sanitizer_syscall_post_impl_fsync_range( \ + res, (long long)(fd), (long long)(flags), (long long)(start), \ + (long long)(length)) +#define __sanitizer_syscall_pre_uuidgen(store, count) \ + __sanitizer_syscall_pre_impl_uuidgen((long long)(store), (long long)(count)) +#define __sanitizer_syscall_post_uuidgen(res, store, count) \ + __sanitizer_syscall_post_impl_uuidgen(res, (long long)(store), \ + (long long)(count)) +#define __sanitizer_syscall_pre_compat_90_getvfsstat(buf, bufsize, flags) \ + __sanitizer_syscall_pre_impl_compat_90_getvfsstat( \ + (long long)(buf), (long long)(bufsize), (long long)(flags)) +#define __sanitizer_syscall_post_compat_90_getvfsstat(res, buf, bufsize, \ + flags) \ + __sanitizer_syscall_post_impl_compat_90_getvfsstat( \ + res, (long long)(buf), (long long)(bufsize), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_90_statvfs1(path, buf, flags) \ + __sanitizer_syscall_pre_impl_compat_90_statvfs1( \ + (long long)(path), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_post_compat_90_statvfs1(res, path, buf, flags) \ + __sanitizer_syscall_post_impl_compat_90_statvfs1( \ + res, (long long)(path), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_90_fstatvfs1(fd, buf, flags) \ + __sanitizer_syscall_pre_impl_compat_90_fstatvfs1( \ + (long long)(fd), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_post_compat_90_fstatvfs1(res, fd, buf, flags) \ + __sanitizer_syscall_post_impl_compat_90_fstatvfs1( \ + res, (long long)(fd), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_30_fhstatvfs1(fhp, buf, flags) \ + __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1( \ + (long long)(fhp), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_post_compat_30_fhstatvfs1(res, fhp, buf, flags) \ + __sanitizer_syscall_post_impl_compat_30_fhstatvfs1( \ + res, (long long)(fhp), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_pre_extattrctl(path, cmd, filename, attrnamespace, \ + attrname) \ + __sanitizer_syscall_pre_impl_extattrctl( \ + (long long)(path), (long long)(cmd), (long long)(filename), \ + (long long)(attrnamespace), (long long)(attrname)) +#define __sanitizer_syscall_post_extattrctl(res, path, cmd, filename, \ + attrnamespace, attrname) \ + __sanitizer_syscall_post_impl_extattrctl( \ + res, (long long)(path), (long long)(cmd), (long long)(filename), \ + (long long)(attrnamespace), (long long)(attrname)) +#define __sanitizer_syscall_pre_extattr_set_file(path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_pre_impl_extattr_set_file( \ + (long long)(path), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_set_file(res, path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_set_file( \ + res, (long long)(path), (long long)(attrnamespace), \ + (long long)(attrname), (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_get_file(path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_pre_impl_extattr_get_file( \ + (long long)(path), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_get_file(res, path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_get_file( \ + res, (long long)(path), (long long)(attrnamespace), \ + (long long)(attrname), (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_delete_file(path, attrnamespace, \ + attrname) \ + __sanitizer_syscall_pre_impl_extattr_delete_file( \ + (long long)(path), (long long)(attrnamespace), (long long)(attrname)) +#define __sanitizer_syscall_post_extattr_delete_file(res, path, attrnamespace, \ + attrname) \ + __sanitizer_syscall_post_impl_extattr_delete_file( \ + res, (long long)(path), (long long)(attrnamespace), \ + (long long)(attrname)) +#define __sanitizer_syscall_pre_extattr_set_fd(fd, attrnamespace, attrname, \ + data, nbytes) \ + __sanitizer_syscall_pre_impl_extattr_set_fd( \ + (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_set_fd(res, fd, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_set_fd( \ + res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_get_fd(fd, attrnamespace, attrname, \ + data, nbytes) \ + __sanitizer_syscall_pre_impl_extattr_get_fd( \ + (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_get_fd(res, fd, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_get_fd( \ + res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_delete_fd(fd, attrnamespace, attrname) \ + __sanitizer_syscall_pre_impl_extattr_delete_fd( \ + (long long)(fd), (long long)(attrnamespace), (long long)(attrname)) +#define __sanitizer_syscall_post_extattr_delete_fd(res, fd, attrnamespace, \ + attrname) \ + __sanitizer_syscall_post_impl_extattr_delete_fd( \ + res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname)) +#define __sanitizer_syscall_pre_extattr_set_link(path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_pre_impl_extattr_set_link( \ + (long long)(path), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_set_link(res, path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_set_link( \ + res, (long long)(path), (long long)(attrnamespace), \ + (long long)(attrname), (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_get_link(path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_pre_impl_extattr_get_link( \ + (long long)(path), (long long)(attrnamespace), (long long)(attrname), \ + (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_get_link(res, path, attrnamespace, \ + attrname, data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_get_link( \ + res, (long long)(path), (long long)(attrnamespace), \ + (long long)(attrname), (long long)(data), (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_delete_link(path, attrnamespace, \ + attrname) \ + __sanitizer_syscall_pre_impl_extattr_delete_link( \ + (long long)(path), (long long)(attrnamespace), (long long)(attrname)) +#define __sanitizer_syscall_post_extattr_delete_link(res, path, attrnamespace, \ + attrname) \ + __sanitizer_syscall_post_impl_extattr_delete_link( \ + res, (long long)(path), (long long)(attrnamespace), \ + (long long)(attrname)) +#define __sanitizer_syscall_pre_extattr_list_fd(fd, attrnamespace, data, \ + nbytes) \ + __sanitizer_syscall_pre_impl_extattr_list_fd( \ + (long long)(fd), (long long)(attrnamespace), (long long)(data), \ + (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_list_fd(res, fd, attrnamespace, data, \ + nbytes) \ + __sanitizer_syscall_post_impl_extattr_list_fd( \ + res, (long long)(fd), (long long)(attrnamespace), (long long)(data), \ + (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_list_file(path, attrnamespace, data, \ + nbytes) \ + __sanitizer_syscall_pre_impl_extattr_list_file( \ + (long long)(path), (long long)(attrnamespace), (long long)(data), \ + (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_list_file(res, path, attrnamespace, \ + data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_list_file( \ + res, (long long)(path), (long long)(attrnamespace), (long long)(data), \ + (long long)(nbytes)) +#define __sanitizer_syscall_pre_extattr_list_link(path, attrnamespace, data, \ + nbytes) \ + __sanitizer_syscall_pre_impl_extattr_list_link( \ + (long long)(path), (long long)(attrnamespace), (long long)(data), \ + (long long)(nbytes)) +#define __sanitizer_syscall_post_extattr_list_link(res, path, attrnamespace, \ + data, nbytes) \ + __sanitizer_syscall_post_impl_extattr_list_link( \ + res, (long long)(path), (long long)(attrnamespace), (long long)(data), \ + (long long)(nbytes)) +#define __sanitizer_syscall_pre_compat_50_pselect(nd, in, ou, ex, ts, mask) \ + __sanitizer_syscall_pre_impl_compat_50_pselect( \ + (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \ + (long long)(ts), (long long)(mask)) +#define __sanitizer_syscall_post_compat_50_pselect(res, nd, in, ou, ex, ts, \ + mask) \ + __sanitizer_syscall_post_impl_compat_50_pselect( \ + res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \ + (long long)(ts), (long long)(mask)) +#define __sanitizer_syscall_pre_compat_50_pollts(fds, nfds, ts, mask) \ + __sanitizer_syscall_pre_impl_compat_50_pollts( \ + (long long)(fds), (long long)(nfds), (long long)(ts), (long long)(mask)) +#define __sanitizer_syscall_post_compat_50_pollts(res, fds, nfds, ts, mask) \ + __sanitizer_syscall_post_impl_compat_50_pollts( \ + res, (long long)(fds), (long long)(nfds), (long long)(ts), \ + (long long)(mask)) +#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags) \ + __sanitizer_syscall_pre_impl_setxattr((long long)(path), (long long)(name), \ + (long long)(value), (long long)(size), \ + (long long)(flags)) +#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \ + __sanitizer_syscall_post_impl_setxattr( \ + res, (long long)(path), (long long)(name), (long long)(value), \ + (long long)(size), (long long)(flags)) +#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags) \ + __sanitizer_syscall_pre_impl_lsetxattr( \ + (long long)(path), (long long)(name), (long long)(value), \ + (long long)(size), (long long)(flags)) +#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size, \ + flags) \ + __sanitizer_syscall_post_impl_lsetxattr( \ + res, (long long)(path), (long long)(name), (long long)(value), \ + (long long)(size), (long long)(flags)) +#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags) \ + __sanitizer_syscall_pre_impl_fsetxattr( \ + (long long)(fd), (long long)(name), (long long)(value), \ + (long long)(size), (long long)(flags)) +#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags) \ + __sanitizer_syscall_post_impl_fsetxattr( \ + res, (long long)(fd), (long long)(name), (long long)(value), \ + (long long)(size), (long long)(flags)) +#define __sanitizer_syscall_pre_getxattr(path, name, value, size) \ + __sanitizer_syscall_pre_impl_getxattr((long long)(path), (long long)(name), \ + (long long)(value), (long long)(size)) +#define __sanitizer_syscall_post_getxattr(res, path, name, value, size) \ + __sanitizer_syscall_post_impl_getxattr( \ + res, (long long)(path), (long long)(name), (long long)(value), \ + (long long)(size)) +#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size) \ + __sanitizer_syscall_pre_impl_lgetxattr((long long)(path), (long long)(name), \ + (long long)(value), \ + (long long)(size)) +#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size) \ + __sanitizer_syscall_post_impl_lgetxattr( \ + res, (long long)(path), (long long)(name), (long long)(value), \ + (long long)(size)) +#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size) \ + __sanitizer_syscall_pre_impl_fgetxattr((long long)(fd), (long long)(name), \ + (long long)(value), \ + (long long)(size)) +#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size) \ + __sanitizer_syscall_post_impl_fgetxattr( \ + res, (long long)(fd), (long long)(name), (long long)(value), \ + (long long)(size)) +#define __sanitizer_syscall_pre_listxattr(path, list, size) \ + __sanitizer_syscall_pre_impl_listxattr((long long)(path), (long long)(list), \ + (long long)(size)) +#define __sanitizer_syscall_post_listxattr(res, path, list, size) \ + __sanitizer_syscall_post_impl_listxattr( \ + res, (long long)(path), (long long)(list), (long long)(size)) +#define __sanitizer_syscall_pre_llistxattr(path, list, size) \ + __sanitizer_syscall_pre_impl_llistxattr( \ + (long long)(path), (long long)(list), (long long)(size)) +#define __sanitizer_syscall_post_llistxattr(res, path, list, size) \ + __sanitizer_syscall_post_impl_llistxattr( \ + res, (long long)(path), (long long)(list), (long long)(size)) +#define __sanitizer_syscall_pre_flistxattr(fd, list, size) \ + __sanitizer_syscall_pre_impl_flistxattr((long long)(fd), (long long)(list), \ + (long long)(size)) +#define __sanitizer_syscall_post_flistxattr(res, fd, list, size) \ + __sanitizer_syscall_post_impl_flistxattr( \ + res, (long long)(fd), (long long)(list), (long long)(size)) +#define __sanitizer_syscall_pre_removexattr(path, name) \ + __sanitizer_syscall_pre_impl_removexattr((long long)(path), (long long)(name)) +#define __sanitizer_syscall_post_removexattr(res, path, name) \ + __sanitizer_syscall_post_impl_removexattr(res, (long long)(path), \ + (long long)(name)) +#define __sanitizer_syscall_pre_lremovexattr(path, name) \ + __sanitizer_syscall_pre_impl_lremovexattr((long long)(path), \ + (long long)(name)) +#define __sanitizer_syscall_post_lremovexattr(res, path, name) \ + __sanitizer_syscall_post_impl_lremovexattr(res, (long long)(path), \ + (long long)(name)) +#define __sanitizer_syscall_pre_fremovexattr(fd, name) \ + __sanitizer_syscall_pre_impl_fremovexattr((long long)(fd), (long long)(name)) +#define __sanitizer_syscall_post_fremovexattr(res, fd, name) \ + __sanitizer_syscall_post_impl_fremovexattr(res, (long long)(fd), \ + (long long)(name)) +#define __sanitizer_syscall_pre_compat_50___stat30(path, ub) \ + __sanitizer_syscall_pre_impl_compat_50___stat30((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_50___stat30(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_50___stat30(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_compat_50___fstat30(fd, sb) \ + __sanitizer_syscall_pre_impl_compat_50___fstat30((long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_post_compat_50___fstat30(res, fd, sb) \ + __sanitizer_syscall_post_impl_compat_50___fstat30(res, (long long)(fd), \ + (long long)(sb)) +#define __sanitizer_syscall_pre_compat_50___lstat30(path, ub) \ + __sanitizer_syscall_pre_impl_compat_50___lstat30((long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_post_compat_50___lstat30(res, path, ub) \ + __sanitizer_syscall_post_impl_compat_50___lstat30(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre___getdents30(fd, buf, count) \ + __sanitizer_syscall_pre_impl___getdents30((long long)(fd), (long long)(buf), \ + (long long)(count)) +#define __sanitizer_syscall_post___getdents30(res, fd, buf, count) \ + __sanitizer_syscall_post_impl___getdents30( \ + res, (long long)(fd), (long long)(buf), (long long)(count)) +#define __sanitizer_syscall_pre_posix_fadvise() \ + __sanitizer_syscall_pre_impl_posix_fadvise((long long)()) +#define __sanitizer_syscall_post_posix_fadvise(res) \ + __sanitizer_syscall_post_impl_posix_fadvise(res, (long long)()) +#define __sanitizer_syscall_pre_compat_30___fhstat30(fhp, sb) \ + __sanitizer_syscall_pre_impl_compat_30___fhstat30((long long)(fhp), \ + (long long)(sb)) +#define __sanitizer_syscall_post_compat_30___fhstat30(res, fhp, sb) \ + __sanitizer_syscall_post_impl_compat_30___fhstat30(res, (long long)(fhp), \ + (long long)(sb)) +#define __sanitizer_syscall_pre_compat_50___ntp_gettime30(ntvp) \ + __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30((long long)(ntvp)) +#define __sanitizer_syscall_post_compat_50___ntp_gettime30(res, ntvp) \ + __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(res, \ + (long long)(ntvp)) +#define __sanitizer_syscall_pre___socket30(domain, type, protocol) \ + __sanitizer_syscall_pre_impl___socket30( \ + (long long)(domain), (long long)(type), (long long)(protocol)) +#define __sanitizer_syscall_post___socket30(res, domain, type, protocol) \ + __sanitizer_syscall_post_impl___socket30( \ + res, (long long)(domain), (long long)(type), (long long)(protocol)) +#define __sanitizer_syscall_pre___getfh30(fname, fhp, fh_size) \ + __sanitizer_syscall_pre_impl___getfh30((long long)(fname), (long long)(fhp), \ + (long long)(fh_size)) +#define __sanitizer_syscall_post___getfh30(res, fname, fhp, fh_size) \ + __sanitizer_syscall_post_impl___getfh30( \ + res, (long long)(fname), (long long)(fhp), (long long)(fh_size)) +#define __sanitizer_syscall_pre___fhopen40(fhp, fh_size, flags) \ + __sanitizer_syscall_pre_impl___fhopen40( \ + (long long)(fhp), (long long)(fh_size), (long long)(flags)) +#define __sanitizer_syscall_post___fhopen40(res, fhp, fh_size, flags) \ + __sanitizer_syscall_post_impl___fhopen40( \ + res, (long long)(fhp), (long long)(fh_size), (long long)(flags)) +#define __sanitizer_syscall_pre_compat_90_fhstatvfs1(fhp, fh_size, buf, flags) \ + __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1( \ + (long long)(fhp), (long long)(fh_size), (long long)(buf), \ + (long long)(flags)) +#define __sanitizer_syscall_post_compat_90_fhstatvfs1(res, fhp, fh_size, buf, \ + flags) \ + __sanitizer_syscall_post_impl_compat_90_fhstatvfs1( \ + res, (long long)(fhp), (long long)(fh_size), (long long)(buf), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_compat_50___fhstat40(fhp, fh_size, sb) \ + __sanitizer_syscall_pre_impl_compat_50___fhstat40( \ + (long long)(fhp), (long long)(fh_size), (long long)(sb)) +#define __sanitizer_syscall_post_compat_50___fhstat40(res, fhp, fh_size, sb) \ + __sanitizer_syscall_post_impl_compat_50___fhstat40( \ + res, (long long)(fhp), (long long)(fh_size), (long long)(sb)) +#define __sanitizer_syscall_pre_aio_cancel(fildes, aiocbp) \ + __sanitizer_syscall_pre_impl_aio_cancel((long long)(fildes), \ + (long long)(aiocbp)) +#define __sanitizer_syscall_post_aio_cancel(res, fildes, aiocbp) \ + __sanitizer_syscall_post_impl_aio_cancel(res, (long long)(fildes), \ + (long long)(aiocbp)) +#define __sanitizer_syscall_pre_aio_error(aiocbp) \ + __sanitizer_syscall_pre_impl_aio_error((long long)(aiocbp)) +#define __sanitizer_syscall_post_aio_error(res, aiocbp) \ + __sanitizer_syscall_post_impl_aio_error(res, (long long)(aiocbp)) +#define __sanitizer_syscall_pre_aio_fsync(op, aiocbp) \ + __sanitizer_syscall_pre_impl_aio_fsync((long long)(op), (long long)(aiocbp)) +#define __sanitizer_syscall_post_aio_fsync(res, op, aiocbp) \ + __sanitizer_syscall_post_impl_aio_fsync(res, (long long)(op), \ + (long long)(aiocbp)) +#define __sanitizer_syscall_pre_aio_read(aiocbp) \ + __sanitizer_syscall_pre_impl_aio_read((long long)(aiocbp)) +#define __sanitizer_syscall_post_aio_read(res, aiocbp) \ + __sanitizer_syscall_post_impl_aio_read(res, (long long)(aiocbp)) +#define __sanitizer_syscall_pre_aio_return(aiocbp) \ + __sanitizer_syscall_pre_impl_aio_return((long long)(aiocbp)) +#define __sanitizer_syscall_post_aio_return(res, aiocbp) \ + __sanitizer_syscall_post_impl_aio_return(res, (long long)(aiocbp)) +#define __sanitizer_syscall_pre_compat_50_aio_suspend(list, nent, timeout) \ + __sanitizer_syscall_pre_impl_compat_50_aio_suspend( \ + (long long)(list), (long long)(nent), (long long)(timeout)) +#define __sanitizer_syscall_post_compat_50_aio_suspend(res, list, nent, \ + timeout) \ + __sanitizer_syscall_post_impl_compat_50_aio_suspend( \ + res, (long long)(list), (long long)(nent), (long long)(timeout)) +#define __sanitizer_syscall_pre_aio_write(aiocbp) \ + __sanitizer_syscall_pre_impl_aio_write((long long)(aiocbp)) +#define __sanitizer_syscall_post_aio_write(res, aiocbp) \ + __sanitizer_syscall_post_impl_aio_write(res, (long long)(aiocbp)) +#define __sanitizer_syscall_pre_lio_listio(mode, list, nent, sig) \ + __sanitizer_syscall_pre_impl_lio_listio((long long)(mode), \ + (long long)(list), \ + (long long)(nent), (long long)(sig)) +#define __sanitizer_syscall_post_lio_listio(res, mode, list, nent, sig) \ + __sanitizer_syscall_post_impl_lio_listio( \ + res, (long long)(mode), (long long)(list), (long long)(nent), \ + (long long)(sig)) +/* syscall 407 has been skipped */ +/* syscall 408 has been skipped */ +/* syscall 409 has been skipped */ +#define __sanitizer_syscall_pre___mount50(type, path, flags, data, data_len) \ + __sanitizer_syscall_pre_impl___mount50( \ + (long long)(type), (long long)(path), (long long)(flags), \ + (long long)(data), (long long)(data_len)) +#define __sanitizer_syscall_post___mount50(res, type, path, flags, data, \ + data_len) \ + __sanitizer_syscall_post_impl___mount50( \ + res, (long long)(type), (long long)(path), (long long)(flags), \ + (long long)(data), (long long)(data_len)) +#define __sanitizer_syscall_pre_mremap(old_address, old_size, new_address, \ + new_size, flags) \ + __sanitizer_syscall_pre_impl_mremap( \ + (long long)(old_address), (long long)(old_size), \ + (long long)(new_address), (long long)(new_size), (long long)(flags)) +#define __sanitizer_syscall_post_mremap(res, old_address, old_size, \ + new_address, new_size, flags) \ + __sanitizer_syscall_post_impl_mremap( \ + res, (long long)(old_address), (long long)(old_size), \ + (long long)(new_address), (long long)(new_size), (long long)(flags)) +#define __sanitizer_syscall_pre_pset_create(psid) \ + __sanitizer_syscall_pre_impl_pset_create((long long)(psid)) +#define __sanitizer_syscall_post_pset_create(res, psid) \ + __sanitizer_syscall_post_impl_pset_create(res, (long long)(psid)) +#define __sanitizer_syscall_pre_pset_destroy(psid) \ + __sanitizer_syscall_pre_impl_pset_destroy((long long)(psid)) +#define __sanitizer_syscall_post_pset_destroy(res, psid) \ + __sanitizer_syscall_post_impl_pset_destroy(res, (long long)(psid)) +#define __sanitizer_syscall_pre_pset_assign(psid, cpuid, opsid) \ + __sanitizer_syscall_pre_impl_pset_assign( \ + (long long)(psid), (long long)(cpuid), (long long)(opsid)) +#define __sanitizer_syscall_post_pset_assign(res, psid, cpuid, opsid) \ + __sanitizer_syscall_post_impl_pset_assign( \ + res, (long long)(psid), (long long)(cpuid), (long long)(opsid)) +#define __sanitizer_syscall_pre__pset_bind(idtype, first_id, second_id, psid, \ + opsid) \ + __sanitizer_syscall_pre_impl__pset_bind( \ + (long long)(idtype), (long long)(first_id), (long long)(second_id), \ + (long long)(psid), (long long)(opsid)) +#define __sanitizer_syscall_post__pset_bind(res, idtype, first_id, second_id, \ + psid, opsid) \ + __sanitizer_syscall_post_impl__pset_bind( \ + res, (long long)(idtype), (long long)(first_id), (long long)(second_id), \ + (long long)(psid), (long long)(opsid)) +#define __sanitizer_syscall_pre___posix_fadvise50(fd, PAD, offset, len, \ + advice) \ + __sanitizer_syscall_pre_impl___posix_fadvise50( \ + (long long)(fd), (long long)(PAD), (long long)(offset), \ + (long long)(len), (long long)(advice)) +#define __sanitizer_syscall_post___posix_fadvise50(res, fd, PAD, offset, len, \ + advice) \ + __sanitizer_syscall_post_impl___posix_fadvise50( \ + res, (long long)(fd), (long long)(PAD), (long long)(offset), \ + (long long)(len), (long long)(advice)) +#define __sanitizer_syscall_pre___select50(nd, in, ou, ex, tv) \ + __sanitizer_syscall_pre_impl___select50((long long)(nd), (long long)(in), \ + (long long)(ou), (long long)(ex), \ + (long long)(tv)) +#define __sanitizer_syscall_post___select50(res, nd, in, ou, ex, tv) \ + __sanitizer_syscall_post_impl___select50(res, (long long)(nd), \ + (long long)(in), (long long)(ou), \ + (long long)(ex), (long long)(tv)) +#define __sanitizer_syscall_pre___gettimeofday50(tp, tzp) \ + __sanitizer_syscall_pre_impl___gettimeofday50((long long)(tp), \ + (long long)(tzp)) +#define __sanitizer_syscall_post___gettimeofday50(res, tp, tzp) \ + __sanitizer_syscall_post_impl___gettimeofday50(res, (long long)(tp), \ + (long long)(tzp)) +#define __sanitizer_syscall_pre___settimeofday50(tv, tzp) \ + __sanitizer_syscall_pre_impl___settimeofday50((long long)(tv), \ + (long long)(tzp)) +#define __sanitizer_syscall_post___settimeofday50(res, tv, tzp) \ + __sanitizer_syscall_post_impl___settimeofday50(res, (long long)(tv), \ + (long long)(tzp)) +#define __sanitizer_syscall_pre___utimes50(path, tptr) \ + __sanitizer_syscall_pre_impl___utimes50((long long)(path), (long long)(tptr)) +#define __sanitizer_syscall_post___utimes50(res, path, tptr) \ + __sanitizer_syscall_post_impl___utimes50(res, (long long)(path), \ + (long long)(tptr)) +#define __sanitizer_syscall_pre___adjtime50(delta, olddelta) \ + __sanitizer_syscall_pre_impl___adjtime50((long long)(delta), \ + (long long)(olddelta)) +#define __sanitizer_syscall_post___adjtime50(res, delta, olddelta) \ + __sanitizer_syscall_post_impl___adjtime50(res, (long long)(delta), \ + (long long)(olddelta)) +#define __sanitizer_syscall_pre___lfs_segwait50(fsidp, tv) \ + __sanitizer_syscall_pre_impl___lfs_segwait50((long long)(fsidp), \ + (long long)(tv)) +#define __sanitizer_syscall_post___lfs_segwait50(res, fsidp, tv) \ + __sanitizer_syscall_post_impl___lfs_segwait50(res, (long long)(fsidp), \ + (long long)(tv)) +#define __sanitizer_syscall_pre___futimes50(fd, tptr) \ + __sanitizer_syscall_pre_impl___futimes50((long long)(fd), (long long)(tptr)) +#define __sanitizer_syscall_post___futimes50(res, fd, tptr) \ + __sanitizer_syscall_post_impl___futimes50(res, (long long)(fd), \ + (long long)(tptr)) +#define __sanitizer_syscall_pre___lutimes50(path, tptr) \ + __sanitizer_syscall_pre_impl___lutimes50((long long)(path), (long long)(tptr)) +#define __sanitizer_syscall_post___lutimes50(res, path, tptr) \ + __sanitizer_syscall_post_impl___lutimes50(res, (long long)(path), \ + (long long)(tptr)) +#define __sanitizer_syscall_pre___setitimer50(which, itv, oitv) \ + __sanitizer_syscall_pre_impl___setitimer50( \ + (long long)(which), (long long)(itv), (long long)(oitv)) +#define __sanitizer_syscall_post___setitimer50(res, which, itv, oitv) \ + __sanitizer_syscall_post_impl___setitimer50( \ + res, (long long)(which), (long long)(itv), (long long)(oitv)) +#define __sanitizer_syscall_pre___getitimer50(which, itv) \ + __sanitizer_syscall_pre_impl___getitimer50((long long)(which), \ + (long long)(itv)) +#define __sanitizer_syscall_post___getitimer50(res, which, itv) \ + __sanitizer_syscall_post_impl___getitimer50(res, (long long)(which), \ + (long long)(itv)) +#define __sanitizer_syscall_pre___clock_gettime50(clock_id, tp) \ + __sanitizer_syscall_pre_impl___clock_gettime50((long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_post___clock_gettime50(res, clock_id, tp) \ + __sanitizer_syscall_post_impl___clock_gettime50(res, (long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_pre___clock_settime50(clock_id, tp) \ + __sanitizer_syscall_pre_impl___clock_settime50((long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_post___clock_settime50(res, clock_id, tp) \ + __sanitizer_syscall_post_impl___clock_settime50(res, (long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_pre___clock_getres50(clock_id, tp) \ + __sanitizer_syscall_pre_impl___clock_getres50((long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_post___clock_getres50(res, clock_id, tp) \ + __sanitizer_syscall_post_impl___clock_getres50(res, (long long)(clock_id), \ + (long long)(tp)) +#define __sanitizer_syscall_pre___nanosleep50(rqtp, rmtp) \ + __sanitizer_syscall_pre_impl___nanosleep50((long long)(rqtp), \ + (long long)(rmtp)) +#define __sanitizer_syscall_post___nanosleep50(res, rqtp, rmtp) \ + __sanitizer_syscall_post_impl___nanosleep50(res, (long long)(rqtp), \ + (long long)(rmtp)) +#define __sanitizer_syscall_pre_____sigtimedwait50(set, info, timeout) \ + __sanitizer_syscall_pre_impl_____sigtimedwait50( \ + (long long)(set), (long long)(info), (long long)(timeout)) +#define __sanitizer_syscall_post_____sigtimedwait50(res, set, info, timeout) \ + __sanitizer_syscall_post_impl_____sigtimedwait50( \ + res, (long long)(set), (long long)(info), (long long)(timeout)) +#define __sanitizer_syscall_pre___mq_timedsend50(mqdes, msg_ptr, msg_len, \ + msg_prio, abs_timeout) \ + __sanitizer_syscall_pre_impl___mq_timedsend50( \ + (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_post___mq_timedsend50( \ + res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \ + __sanitizer_syscall_post_impl___mq_timedsend50( \ + res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_pre___mq_timedreceive50(mqdes, msg_ptr, msg_len, \ + msg_prio, abs_timeout) \ + __sanitizer_syscall_pre_impl___mq_timedreceive50( \ + (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_post___mq_timedreceive50( \ + res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \ + __sanitizer_syscall_post_impl___mq_timedreceive50( \ + res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \ + (long long)(msg_prio), (long long)(abs_timeout)) +#define __sanitizer_syscall_pre_compat_60__lwp_park(ts, unpark, hint, \ + unparkhint) \ + __sanitizer_syscall_pre_impl_compat_60__lwp_park( \ + (long long)(ts), (long long)(unpark), (long long)(hint), \ + (long long)(unparkhint)) +#define __sanitizer_syscall_post_compat_60__lwp_park(res, ts, unpark, hint, \ + unparkhint) \ + __sanitizer_syscall_post_impl_compat_60__lwp_park( \ + res, (long long)(ts), (long long)(unpark), (long long)(hint), \ + (long long)(unparkhint)) +#define __sanitizer_syscall_pre___kevent50(fd, changelist, nchanges, \ + eventlist, nevents, timeout) \ + __sanitizer_syscall_pre_impl___kevent50( \ + (long long)(fd), (long long)(changelist), (long long)(nchanges), \ + (long long)(eventlist), (long long)(nevents), (long long)(timeout)) +#define __sanitizer_syscall_post___kevent50(res, fd, changelist, nchanges, \ + eventlist, nevents, timeout) \ + __sanitizer_syscall_post_impl___kevent50( \ + res, (long long)(fd), (long long)(changelist), (long long)(nchanges), \ + (long long)(eventlist), (long long)(nevents), (long long)(timeout)) +#define __sanitizer_syscall_pre___pselect50(nd, in, ou, ex, ts, mask) \ + __sanitizer_syscall_pre_impl___pselect50((long long)(nd), (long long)(in), \ + (long long)(ou), (long long)(ex), \ + (long long)(ts), (long long)(mask)) +#define __sanitizer_syscall_post___pselect50(res, nd, in, ou, ex, ts, mask) \ + __sanitizer_syscall_post_impl___pselect50( \ + res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \ + (long long)(ts), (long long)(mask)) +#define __sanitizer_syscall_pre___pollts50(fds, nfds, ts, mask) \ + __sanitizer_syscall_pre_impl___pollts50((long long)(fds), (long long)(nfds), \ + (long long)(ts), (long long)(mask)) +#define __sanitizer_syscall_post___pollts50(res, fds, nfds, ts, mask) \ + __sanitizer_syscall_post_impl___pollts50(res, (long long)(fds), \ + (long long)(nfds), (long long)(ts), \ + (long long)(mask)) +#define __sanitizer_syscall_pre___aio_suspend50(list, nent, timeout) \ + __sanitizer_syscall_pre_impl___aio_suspend50( \ + (long long)(list), (long long)(nent), (long long)(timeout)) +#define __sanitizer_syscall_post___aio_suspend50(res, list, nent, timeout) \ + __sanitizer_syscall_post_impl___aio_suspend50( \ + res, (long long)(list), (long long)(nent), (long long)(timeout)) +#define __sanitizer_syscall_pre___stat50(path, ub) \ + __sanitizer_syscall_pre_impl___stat50((long long)(path), (long long)(ub)) +#define __sanitizer_syscall_post___stat50(res, path, ub) \ + __sanitizer_syscall_post_impl___stat50(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre___fstat50(fd, sb) \ + __sanitizer_syscall_pre_impl___fstat50((long long)(fd), (long long)(sb)) +#define __sanitizer_syscall_post___fstat50(res, fd, sb) \ + __sanitizer_syscall_post_impl___fstat50(res, (long long)(fd), (long long)(sb)) +#define __sanitizer_syscall_pre___lstat50(path, ub) \ + __sanitizer_syscall_pre_impl___lstat50((long long)(path), (long long)(ub)) +#define __sanitizer_syscall_post___lstat50(res, path, ub) \ + __sanitizer_syscall_post_impl___lstat50(res, (long long)(path), \ + (long long)(ub)) +#define __sanitizer_syscall_pre_____semctl50(semid, semnum, cmd, arg) \ + __sanitizer_syscall_pre_impl_____semctl50( \ + (long long)(semid), (long long)(semnum), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_post_____semctl50(res, semid, semnum, cmd, arg) \ + __sanitizer_syscall_post_impl_____semctl50( \ + res, (long long)(semid), (long long)(semnum), (long long)(cmd), \ + (long long)(arg)) +#define __sanitizer_syscall_pre___shmctl50(shmid, cmd, buf) \ + __sanitizer_syscall_pre_impl___shmctl50((long long)(shmid), \ + (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_post___shmctl50(res, shmid, cmd, buf) \ + __sanitizer_syscall_post_impl___shmctl50(res, (long long)(shmid), \ + (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_pre___msgctl50(msqid, cmd, buf) \ + __sanitizer_syscall_pre_impl___msgctl50((long long)(msqid), \ + (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_post___msgctl50(res, msqid, cmd, buf) \ + __sanitizer_syscall_post_impl___msgctl50(res, (long long)(msqid), \ + (long long)(cmd), (long long)(buf)) +#define __sanitizer_syscall_pre___getrusage50(who, rusage) \ + __sanitizer_syscall_pre_impl___getrusage50((long long)(who), \ + (long long)(rusage)) +#define __sanitizer_syscall_post___getrusage50(res, who, rusage) \ + __sanitizer_syscall_post_impl___getrusage50(res, (long long)(who), \ + (long long)(rusage)) +#define __sanitizer_syscall_pre___timer_settime50(timerid, flags, value, \ + ovalue) \ + __sanitizer_syscall_pre_impl___timer_settime50( \ + (long long)(timerid), (long long)(flags), (long long)(value), \ + (long long)(ovalue)) +#define __sanitizer_syscall_post___timer_settime50(res, timerid, flags, value, \ + ovalue) \ + __sanitizer_syscall_post_impl___timer_settime50( \ + res, (long long)(timerid), (long long)(flags), (long long)(value), \ + (long long)(ovalue)) +#define __sanitizer_syscall_pre___timer_gettime50(timerid, value) \ + __sanitizer_syscall_pre_impl___timer_gettime50((long long)(timerid), \ + (long long)(value)) +#define __sanitizer_syscall_post___timer_gettime50(res, timerid, value) \ + __sanitizer_syscall_post_impl___timer_gettime50(res, (long long)(timerid), \ + (long long)(value)) +#if defined(NTP) || !defined(_KERNEL_OPT) +#define __sanitizer_syscall_pre___ntp_gettime50(ntvp) \ + __sanitizer_syscall_pre_impl___ntp_gettime50((long long)(ntvp)) +#define __sanitizer_syscall_post___ntp_gettime50(res, ntvp) \ + __sanitizer_syscall_post_impl___ntp_gettime50(res, (long long)(ntvp)) +#else +/* syscall 448 has been skipped */ +#endif +#define __sanitizer_syscall_pre___wait450(pid, status, options, rusage) \ + __sanitizer_syscall_pre_impl___wait450( \ + (long long)(pid), (long long)(status), (long long)(options), \ + (long long)(rusage)) +#define __sanitizer_syscall_post___wait450(res, pid, status, options, rusage) \ + __sanitizer_syscall_post_impl___wait450( \ + res, (long long)(pid), (long long)(status), (long long)(options), \ + (long long)(rusage)) +#define __sanitizer_syscall_pre___mknod50(path, mode, dev) \ + __sanitizer_syscall_pre_impl___mknod50((long long)(path), (long long)(mode), \ + (long long)(dev)) +#define __sanitizer_syscall_post___mknod50(res, path, mode, dev) \ + __sanitizer_syscall_post_impl___mknod50(res, (long long)(path), \ + (long long)(mode), (long long)(dev)) +#define __sanitizer_syscall_pre___fhstat50(fhp, fh_size, sb) \ + __sanitizer_syscall_pre_impl___fhstat50( \ + (long long)(fhp), (long long)(fh_size), (long long)(sb)) +#define __sanitizer_syscall_post___fhstat50(res, fhp, fh_size, sb) \ + __sanitizer_syscall_post_impl___fhstat50( \ + res, (long long)(fhp), (long long)(fh_size), (long long)(sb)) +/* syscall 452 has been skipped */ +#define __sanitizer_syscall_pre_pipe2(fildes, flags) \ + __sanitizer_syscall_pre_impl_pipe2((long long)(fildes), (long long)(flags)) +#define __sanitizer_syscall_post_pipe2(res, fildes, flags) \ + __sanitizer_syscall_post_impl_pipe2(res, (long long)(fildes), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_dup3(from, to, flags) \ + __sanitizer_syscall_pre_impl_dup3((long long)(from), (long long)(to), \ + (long long)(flags)) +#define __sanitizer_syscall_post_dup3(res, from, to, flags) \ + __sanitizer_syscall_post_impl_dup3(res, (long long)(from), (long long)(to), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_kqueue1(flags) \ + __sanitizer_syscall_pre_impl_kqueue1((long long)(flags)) +#define __sanitizer_syscall_post_kqueue1(res, flags) \ + __sanitizer_syscall_post_impl_kqueue1(res, (long long)(flags)) +#define __sanitizer_syscall_pre_paccept(s, name, anamelen, mask, flags) \ + __sanitizer_syscall_pre_impl_paccept((long long)(s), (long long)(name), \ + (long long)(anamelen), \ + (long long)(mask), (long long)(flags)) +#define __sanitizer_syscall_post_paccept(res, s, name, anamelen, mask, flags) \ + __sanitizer_syscall_post_impl_paccept( \ + res, (long long)(s), (long long)(name), (long long)(anamelen), \ + (long long)(mask), (long long)(flags)) +#define __sanitizer_syscall_pre_linkat(fd1, name1, fd2, name2, flags) \ + __sanitizer_syscall_pre_impl_linkat((long long)(fd1), (long long)(name1), \ + (long long)(fd2), (long long)(name2), \ + (long long)(flags)) +#define __sanitizer_syscall_post_linkat(res, fd1, name1, fd2, name2, flags) \ + __sanitizer_syscall_post_impl_linkat(res, (long long)(fd1), \ + (long long)(name1), (long long)(fd2), \ + (long long)(name2), (long long)(flags)) +#define __sanitizer_syscall_pre_renameat(fromfd, from, tofd, to) \ + __sanitizer_syscall_pre_impl_renameat((long long)(fromfd), \ + (long long)(from), (long long)(tofd), \ + (long long)(to)) +#define __sanitizer_syscall_post_renameat(res, fromfd, from, tofd, to) \ + __sanitizer_syscall_post_impl_renameat(res, (long long)(fromfd), \ + (long long)(from), (long long)(tofd), \ + (long long)(to)) +#define __sanitizer_syscall_pre_mkfifoat(fd, path, mode) \ + __sanitizer_syscall_pre_impl_mkfifoat((long long)(fd), (long long)(path), \ + (long long)(mode)) +#define __sanitizer_syscall_post_mkfifoat(res, fd, path, mode) \ + __sanitizer_syscall_post_impl_mkfifoat(res, (long long)(fd), \ + (long long)(path), (long long)(mode)) +#define __sanitizer_syscall_pre_mknodat(fd, path, mode, PAD, dev) \ + __sanitizer_syscall_pre_impl_mknodat((long long)(fd), (long long)(path), \ + (long long)(mode), (long long)(PAD), \ + (long long)(dev)) +#define __sanitizer_syscall_post_mknodat(res, fd, path, mode, PAD, dev) \ + __sanitizer_syscall_post_impl_mknodat(res, (long long)(fd), \ + (long long)(path), (long long)(mode), \ + (long long)(PAD), (long long)(dev)) +#define __sanitizer_syscall_pre_mkdirat(fd, path, mode) \ + __sanitizer_syscall_pre_impl_mkdirat((long long)(fd), (long long)(path), \ + (long long)(mode)) +#define __sanitizer_syscall_post_mkdirat(res, fd, path, mode) \ + __sanitizer_syscall_post_impl_mkdirat(res, (long long)(fd), \ + (long long)(path), (long long)(mode)) +#define __sanitizer_syscall_pre_faccessat(fd, path, amode, flag) \ + __sanitizer_syscall_pre_impl_faccessat((long long)(fd), (long long)(path), \ + (long long)(amode), \ + (long long)(flag)) +#define __sanitizer_syscall_post_faccessat(res, fd, path, amode, flag) \ + __sanitizer_syscall_post_impl_faccessat( \ + res, (long long)(fd), (long long)(path), (long long)(amode), \ + (long long)(flag)) +#define __sanitizer_syscall_pre_fchmodat(fd, path, mode, flag) \ + __sanitizer_syscall_pre_impl_fchmodat((long long)(fd), (long long)(path), \ + (long long)(mode), (long long)(flag)) +#define __sanitizer_syscall_post_fchmodat(res, fd, path, mode, flag) \ + __sanitizer_syscall_post_impl_fchmodat(res, (long long)(fd), \ + (long long)(path), (long long)(mode), \ + (long long)(flag)) +#define __sanitizer_syscall_pre_fchownat(fd, path, owner, group, flag) \ + __sanitizer_syscall_pre_impl_fchownat((long long)(fd), (long long)(path), \ + (long long)(owner), \ + (long long)(group), (long long)(flag)) +#define __sanitizer_syscall_post_fchownat(res, fd, path, owner, group, flag) \ + __sanitizer_syscall_post_impl_fchownat( \ + res, (long long)(fd), (long long)(path), (long long)(owner), \ + (long long)(group), (long long)(flag)) +#define __sanitizer_syscall_pre_fexecve(fd, argp, envp) \ + __sanitizer_syscall_pre_impl_fexecve((long long)(fd), (long long)(argp), \ + (long long)(envp)) +#define __sanitizer_syscall_post_fexecve(res, fd, argp, envp) \ + __sanitizer_syscall_post_impl_fexecve(res, (long long)(fd), \ + (long long)(argp), (long long)(envp)) +#define __sanitizer_syscall_pre_fstatat(fd, path, buf, flag) \ + __sanitizer_syscall_pre_impl_fstatat((long long)(fd), (long long)(path), \ + (long long)(buf), (long long)(flag)) +#define __sanitizer_syscall_post_fstatat(res, fd, path, buf, flag) \ + __sanitizer_syscall_post_impl_fstatat(res, (long long)(fd), \ + (long long)(path), (long long)(buf), \ + (long long)(flag)) +#define __sanitizer_syscall_pre_utimensat(fd, path, tptr, flag) \ + __sanitizer_syscall_pre_impl_utimensat((long long)(fd), (long long)(path), \ + (long long)(tptr), (long long)(flag)) +#define __sanitizer_syscall_post_utimensat(res, fd, path, tptr, flag) \ + __sanitizer_syscall_post_impl_utimensat( \ + res, (long long)(fd), (long long)(path), (long long)(tptr), \ + (long long)(flag)) +#define __sanitizer_syscall_pre_openat(fd, path, oflags, mode) \ + __sanitizer_syscall_pre_impl_openat((long long)(fd), (long long)(path), \ + (long long)(oflags), (long long)(mode)) +#define __sanitizer_syscall_post_openat(res, fd, path, oflags, mode) \ + __sanitizer_syscall_post_impl_openat(res, (long long)(fd), \ + (long long)(path), (long long)(oflags), \ + (long long)(mode)) +#define __sanitizer_syscall_pre_readlinkat(fd, path, buf, bufsize) \ + __sanitizer_syscall_pre_impl_readlinkat((long long)(fd), (long long)(path), \ + (long long)(buf), \ + (long long)(bufsize)) +#define __sanitizer_syscall_post_readlinkat(res, fd, path, buf, bufsize) \ + __sanitizer_syscall_post_impl_readlinkat( \ + res, (long long)(fd), (long long)(path), (long long)(buf), \ + (long long)(bufsize)) +#define __sanitizer_syscall_pre_symlinkat(path1, fd, path2) \ + __sanitizer_syscall_pre_impl_symlinkat((long long)(path1), (long long)(fd), \ + (long long)(path2)) +#define __sanitizer_syscall_post_symlinkat(res, path1, fd, path2) \ + __sanitizer_syscall_post_impl_symlinkat(res, (long long)(path1), \ + (long long)(fd), (long long)(path2)) +#define __sanitizer_syscall_pre_unlinkat(fd, path, flag) \ + __sanitizer_syscall_pre_impl_unlinkat((long long)(fd), (long long)(path), \ + (long long)(flag)) +#define __sanitizer_syscall_post_unlinkat(res, fd, path, flag) \ + __sanitizer_syscall_post_impl_unlinkat(res, (long long)(fd), \ + (long long)(path), (long long)(flag)) +#define __sanitizer_syscall_pre_futimens(fd, tptr) \ + __sanitizer_syscall_pre_impl_futimens((long long)(fd), (long long)(tptr)) +#define __sanitizer_syscall_post_futimens(res, fd, tptr) \ + __sanitizer_syscall_post_impl_futimens(res, (long long)(fd), \ + (long long)(tptr)) +#define __sanitizer_syscall_pre___quotactl(path, args) \ + __sanitizer_syscall_pre_impl___quotactl((long long)(path), (long long)(args)) +#define __sanitizer_syscall_post___quotactl(res, path, args) \ + __sanitizer_syscall_post_impl___quotactl(res, (long long)(path), \ + (long long)(args)) +#define __sanitizer_syscall_pre_posix_spawn(pid, path, file_actions, attrp, \ + argv, envp) \ + __sanitizer_syscall_pre_impl_posix_spawn( \ + (long long)(pid), (long long)(path), (long long)(file_actions), \ + (long long)(attrp), (long long)(argv), (long long)(envp)) +#define __sanitizer_syscall_post_posix_spawn(res, pid, path, file_actions, \ + attrp, argv, envp) \ + __sanitizer_syscall_post_impl_posix_spawn( \ + res, (long long)(pid), (long long)(path), (long long)(file_actions), \ + (long long)(attrp), (long long)(argv), (long long)(envp)) +#define __sanitizer_syscall_pre_recvmmsg(s, mmsg, vlen, flags, timeout) \ + __sanitizer_syscall_pre_impl_recvmmsg((long long)(s), (long long)(mmsg), \ + (long long)(vlen), (long long)(flags), \ + (long long)(timeout)) +#define __sanitizer_syscall_post_recvmmsg(res, s, mmsg, vlen, flags, timeout) \ + __sanitizer_syscall_post_impl_recvmmsg( \ + res, (long long)(s), (long long)(mmsg), (long long)(vlen), \ + (long long)(flags), (long long)(timeout)) +#define __sanitizer_syscall_pre_sendmmsg(s, mmsg, vlen, flags) \ + __sanitizer_syscall_pre_impl_sendmmsg((long long)(s), (long long)(mmsg), \ + (long long)(vlen), (long long)(flags)) +#define __sanitizer_syscall_post_sendmmsg(res, s, mmsg, vlen, flags) \ + __sanitizer_syscall_post_impl_sendmmsg(res, (long long)(s), \ + (long long)(mmsg), (long long)(vlen), \ + (long long)(flags)) +#define __sanitizer_syscall_pre_clock_nanosleep(clock_id, flags, rqtp, rmtp) \ + __sanitizer_syscall_pre_impl_clock_nanosleep( \ + (long long)(clock_id), (long long)(flags), (long long)(rqtp), \ + (long long)(rmtp)) +#define __sanitizer_syscall_post_clock_nanosleep(res, clock_id, flags, rqtp, \ + rmtp) \ + __sanitizer_syscall_post_impl_clock_nanosleep( \ + res, (long long)(clock_id), (long long)(flags), (long long)(rqtp), \ + (long long)(rmtp)) +#define __sanitizer_syscall_pre____lwp_park60(clock_id, flags, ts, unpark, \ + hint, unparkhint) \ + __sanitizer_syscall_pre_impl____lwp_park60( \ + (long long)(clock_id), (long long)(flags), (long long)(ts), \ + (long long)(unpark), (long long)(hint), (long long)(unparkhint)) +#define __sanitizer_syscall_post____lwp_park60(res, clock_id, flags, ts, \ + unpark, hint, unparkhint) \ + __sanitizer_syscall_post_impl____lwp_park60( \ + res, (long long)(clock_id), (long long)(flags), (long long)(ts), \ + (long long)(unpark), (long long)(hint), (long long)(unparkhint)) +#define __sanitizer_syscall_pre_posix_fallocate(fd, PAD, pos, len) \ + __sanitizer_syscall_pre_impl_posix_fallocate( \ + (long long)(fd), (long long)(PAD), (long long)(pos), (long long)(len)) +#define __sanitizer_syscall_post_posix_fallocate(res, fd, PAD, pos, len) \ + __sanitizer_syscall_post_impl_posix_fallocate( \ + res, (long long)(fd), (long long)(PAD), (long long)(pos), \ + (long long)(len)) +#define __sanitizer_syscall_pre_fdiscard(fd, PAD, pos, len) \ + __sanitizer_syscall_pre_impl_fdiscard((long long)(fd), (long long)(PAD), \ + (long long)(pos), (long long)(len)) +#define __sanitizer_syscall_post_fdiscard(res, fd, PAD, pos, len) \ + __sanitizer_syscall_post_impl_fdiscard(res, (long long)(fd), \ + (long long)(PAD), (long long)(pos), \ + (long long)(len)) +#define __sanitizer_syscall_pre_wait6(idtype, id, status, options, wru, info) \ + __sanitizer_syscall_pre_impl_wait6( \ + (long long)(idtype), (long long)(id), (long long)(status), \ + (long long)(options), (long long)(wru), (long long)(info)) +#define __sanitizer_syscall_post_wait6(res, idtype, id, status, options, wru, \ + info) \ + __sanitizer_syscall_post_impl_wait6( \ + res, (long long)(idtype), (long long)(id), (long long)(status), \ + (long long)(options), (long long)(wru), (long long)(info)) +#define __sanitizer_syscall_pre_clock_getcpuclockid2(idtype, id, clock_id) \ + __sanitizer_syscall_pre_impl_clock_getcpuclockid2( \ + (long long)(idtype), (long long)(id), (long long)(clock_id)) +#define __sanitizer_syscall_post_clock_getcpuclockid2(res, idtype, id, \ + clock_id) \ + __sanitizer_syscall_post_impl_clock_getcpuclockid2( \ + res, (long long)(idtype), (long long)(id), (long long)(clock_id)) +#define __sanitizer_syscall_pre___getvfsstat90(buf, bufsize, flags) \ + __sanitizer_syscall_pre_impl___getvfsstat90( \ + (long long)(buf), (long long)(bufsize), (long long)(flags)) +#define __sanitizer_syscall_post___getvfsstat90(res, buf, bufsize, flags) \ + __sanitizer_syscall_post_impl___getvfsstat90( \ + res, (long long)(buf), (long long)(bufsize), (long long)(flags)) +#define __sanitizer_syscall_pre___statvfs190(path, buf, flags) \ + __sanitizer_syscall_pre_impl___statvfs190( \ + (long long)(path), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_post___statvfs190(res, path, buf, flags) \ + __sanitizer_syscall_post_impl___statvfs190( \ + res, (long long)(path), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_pre___fstatvfs190(fd, buf, flags) \ + __sanitizer_syscall_pre_impl___fstatvfs190( \ + (long long)(fd), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_post___fstatvfs190(res, fd, buf, flags) \ + __sanitizer_syscall_post_impl___fstatvfs190( \ + res, (long long)(fd), (long long)(buf), (long long)(flags)) +#define __sanitizer_syscall_pre___fhstatvfs190(fhp, fh_size, buf, flags) \ + __sanitizer_syscall_pre_impl___fhstatvfs190( \ + (long long)(fhp), (long long)(fh_size), (long long)(buf), \ + (long long)(flags)) +#define __sanitizer_syscall_post___fhstatvfs190(res, fhp, fh_size, buf, flags) \ + __sanitizer_syscall_post_impl___fhstatvfs190( \ + res, (long long)(fhp), (long long)(fh_size), (long long)(buf), \ + (long long)(flags)) +#define __sanitizer_syscall_pre___acl_get_link(path, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_get_link( \ + (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_get_link(res, path, type, aclp) \ + __sanitizer_syscall_post_impl___acl_get_link( \ + res, (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_set_link(path, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_set_link( \ + (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_set_link(res, path, type, aclp) \ + __sanitizer_syscall_post_impl___acl_set_link( \ + res, (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_delete_link(path, type) \ + __sanitizer_syscall_pre_impl___acl_delete_link((long long)(path), \ + (long long)(type)) +#define __sanitizer_syscall_post___acl_delete_link(res, path, type) \ + __sanitizer_syscall_post_impl___acl_delete_link(res, (long long)(path), \ + (long long)(type)) +#define __sanitizer_syscall_pre___acl_aclcheck_link(path, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_aclcheck_link( \ + (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_aclcheck_link(res, path, type, aclp) \ + __sanitizer_syscall_post_impl___acl_aclcheck_link( \ + res, (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_get_file(path, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_get_file( \ + (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_get_file(res, path, type, aclp) \ + __sanitizer_syscall_post_impl___acl_get_file( \ + res, (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_set_file(path, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_set_file( \ + (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_set_file(res, path, type, aclp) \ + __sanitizer_syscall_post_impl___acl_set_file( \ + res, (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_get_fd(filedes, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_get_fd( \ + (long long)(filedes), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_get_fd(res, filedes, type, aclp) \ + __sanitizer_syscall_post_impl___acl_get_fd( \ + res, (long long)(filedes), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_set_fd(filedes, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_set_fd( \ + (long long)(filedes), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_set_fd(res, filedes, type, aclp) \ + __sanitizer_syscall_post_impl___acl_set_fd( \ + res, (long long)(filedes), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_delete_file(path, type) \ + __sanitizer_syscall_pre_impl___acl_delete_file((long long)(path), \ + (long long)(type)) +#define __sanitizer_syscall_post___acl_delete_file(res, path, type) \ + __sanitizer_syscall_post_impl___acl_delete_file(res, (long long)(path), \ + (long long)(type)) +#define __sanitizer_syscall_pre___acl_delete_fd(filedes, type) \ + __sanitizer_syscall_pre_impl___acl_delete_fd((long long)(filedes), \ + (long long)(type)) +#define __sanitizer_syscall_post___acl_delete_fd(res, filedes, type) \ + __sanitizer_syscall_post_impl___acl_delete_fd(res, (long long)(filedes), \ + (long long)(type)) +#define __sanitizer_syscall_pre___acl_aclcheck_file(path, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_aclcheck_file( \ + (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_aclcheck_file(res, path, type, aclp) \ + __sanitizer_syscall_post_impl___acl_aclcheck_file( \ + res, (long long)(path), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre___acl_aclcheck_fd(filedes, type, aclp) \ + __sanitizer_syscall_pre_impl___acl_aclcheck_fd( \ + (long long)(filedes), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_post___acl_aclcheck_fd(res, filedes, type, aclp) \ + __sanitizer_syscall_post_impl___acl_aclcheck_fd( \ + res, (long long)(filedes), (long long)(type), (long long)(aclp)) +#define __sanitizer_syscall_pre_lpathconf(path, name) \ + __sanitizer_syscall_pre_impl_lpathconf((long long)(path), (long long)(name)) +#define __sanitizer_syscall_post_lpathconf(res, path, name) \ + __sanitizer_syscall_post_impl_lpathconf(res, (long long)(path), \ + (long long)(name)) + +/* Compat with older releases */ +#define __sanitizer_syscall_pre_getvfsstat \ + __sanitizer_syscall_pre_compat_90_getvfsstat +#define __sanitizer_syscall_post_getvfsstat \ + __sanitizer_syscall_post_compat_90_getvfsstat + +#define __sanitizer_syscall_pre_statvfs1 \ + __sanitizer_syscall_pre_compat_90_statvfs1 +#define __sanitizer_syscall_post_statvfs1 \ + __sanitizer_syscall_post_compat_90_statvfs1 + +#define __sanitizer_syscall_pre_fstatvfs1 \ + __sanitizer_syscall_pre_compat_90_fstatvfs1 +#define __sanitizer_syscall_post_fstatvfs1 \ + __sanitizer_syscall_post_compat_90_fstatvfs1 + +#define __sanitizer_syscall_pre___fhstatvfs140 \ + __sanitizer_syscall_pre_compat_90_fhstatvfs1 +#define __sanitizer_syscall_post___fhstatvfs140 \ + __sanitizer_syscall_post_compat_90_fhstatvfs1 + +#ifdef __cplusplus +extern "C" { +#endif + +// Private declarations. Do not call directly from user code. Use macros above. + +// DO NOT EDIT! THIS FILE HAS BEEN GENERATED! + +void __sanitizer_syscall_pre_impl_syscall(long long code, long long arg0, + long long arg1, long long arg2, + long long arg3, long long arg4, + long long arg5, long long arg6, + long long arg7); +void __sanitizer_syscall_post_impl_syscall(long long res, long long code, + long long arg0, long long arg1, + long long arg2, long long arg3, + long long arg4, long long arg5, + long long arg6, long long arg7); +void __sanitizer_syscall_pre_impl_exit(long long rval); +void __sanitizer_syscall_post_impl_exit(long long res, long long rval); +void __sanitizer_syscall_pre_impl_fork(void); +void __sanitizer_syscall_post_impl_fork(long long res); +void __sanitizer_syscall_pre_impl_read(long long fd, long long buf, + long long nbyte); +void __sanitizer_syscall_post_impl_read(long long res, long long fd, + long long buf, long long nbyte); +void __sanitizer_syscall_pre_impl_write(long long fd, long long buf, + long long nbyte); +void __sanitizer_syscall_post_impl_write(long long res, long long fd, + long long buf, long long nbyte); +void __sanitizer_syscall_pre_impl_open(long long path, long long flags, + long long mode); +void __sanitizer_syscall_post_impl_open(long long res, long long path, + long long flags, long long mode); +void __sanitizer_syscall_pre_impl_close(long long fd); +void __sanitizer_syscall_post_impl_close(long long res, long long fd); +void __sanitizer_syscall_pre_impl_compat_50_wait4(long long pid, + long long status, + long long options, + long long rusage); +void __sanitizer_syscall_post_impl_compat_50_wait4(long long res, long long pid, + long long status, + long long options, + long long rusage); +void __sanitizer_syscall_pre_impl_compat_43_ocreat(long long path, + long long mode); +void __sanitizer_syscall_post_impl_compat_43_ocreat(long long res, + long long path, + long long mode); +void __sanitizer_syscall_pre_impl_link(long long path, long long link); +void __sanitizer_syscall_post_impl_link(long long res, long long path, + long long link); +void __sanitizer_syscall_pre_impl_unlink(long long path); +void __sanitizer_syscall_post_impl_unlink(long long res, long long path); +/* syscall 11 has been skipped */ +void __sanitizer_syscall_pre_impl_chdir(long long path); +void __sanitizer_syscall_post_impl_chdir(long long res, long long path); +void __sanitizer_syscall_pre_impl_fchdir(long long fd); +void __sanitizer_syscall_post_impl_fchdir(long long res, long long fd); +void __sanitizer_syscall_pre_impl_compat_50_mknod(long long path, + long long mode, + long long dev); +void __sanitizer_syscall_post_impl_compat_50_mknod(long long res, + long long path, + long long mode, + long long dev); +void __sanitizer_syscall_pre_impl_chmod(long long path, long long mode); +void __sanitizer_syscall_post_impl_chmod(long long res, long long path, + long long mode); +void __sanitizer_syscall_pre_impl_chown(long long path, long long uid, + long long gid); +void __sanitizer_syscall_post_impl_chown(long long res, long long path, + long long uid, long long gid); +void __sanitizer_syscall_pre_impl_break(long long nsize); +void __sanitizer_syscall_post_impl_break(long long res, long long nsize); +void __sanitizer_syscall_pre_impl_compat_20_getfsstat(long long buf, + long long bufsize, + long long flags); +void __sanitizer_syscall_post_impl_compat_20_getfsstat(long long res, + long long buf, + long long bufsize, + long long flags); +void __sanitizer_syscall_pre_impl_compat_43_olseek(long long fd, + long long offset, + long long whence); +void __sanitizer_syscall_post_impl_compat_43_olseek(long long res, long long fd, + long long offset, + long long whence); +void __sanitizer_syscall_pre_impl_getpid(void); +void __sanitizer_syscall_post_impl_getpid(long long res); +void __sanitizer_syscall_pre_impl_compat_40_mount(long long type, + long long path, + long long flags, + long long data); +void __sanitizer_syscall_post_impl_compat_40_mount(long long res, + long long type, + long long path, + long long flags, + long long data); +void __sanitizer_syscall_pre_impl_unmount(long long path, long long flags); +void __sanitizer_syscall_post_impl_unmount(long long res, long long path, + long long flags); +void __sanitizer_syscall_pre_impl_setuid(long long uid); +void __sanitizer_syscall_post_impl_setuid(long long res, long long uid); +void __sanitizer_syscall_pre_impl_getuid(void); +void __sanitizer_syscall_post_impl_getuid(long long res); +void __sanitizer_syscall_pre_impl_geteuid(void); +void __sanitizer_syscall_post_impl_geteuid(long long res); +void __sanitizer_syscall_pre_impl_ptrace(long long req, long long pid, + long long addr, long long data); +void __sanitizer_syscall_post_impl_ptrace(long long res, long long req, + long long pid, long long addr, + long long data); +void __sanitizer_syscall_pre_impl_recvmsg(long long s, long long msg, + long long flags); +void __sanitizer_syscall_post_impl_recvmsg(long long res, long long s, + long long msg, long long flags); +void __sanitizer_syscall_pre_impl_sendmsg(long long s, long long msg, + long long flags); +void __sanitizer_syscall_post_impl_sendmsg(long long res, long long s, + long long msg, long long flags); +void __sanitizer_syscall_pre_impl_recvfrom(long long s, long long buf, + long long len, long long flags, + long long from, + long long fromlenaddr); +void __sanitizer_syscall_post_impl_recvfrom(long long res, long long s, + long long buf, long long len, + long long flags, long long from, + long long fromlenaddr); +void __sanitizer_syscall_pre_impl_accept(long long s, long long name, + long long anamelen); +void __sanitizer_syscall_post_impl_accept(long long res, long long s, + long long name, long long anamelen); +void __sanitizer_syscall_pre_impl_getpeername(long long fdes, long long asa, + long long alen); +void __sanitizer_syscall_post_impl_getpeername(long long res, long long fdes, + long long asa, long long alen); +void __sanitizer_syscall_pre_impl_getsockname(long long fdes, long long asa, + long long alen); +void __sanitizer_syscall_post_impl_getsockname(long long res, long long fdes, + long long asa, long long alen); +void __sanitizer_syscall_pre_impl_access(long long path, long long flags); +void __sanitizer_syscall_post_impl_access(long long res, long long path, + long long flags); +void __sanitizer_syscall_pre_impl_chflags(long long path, long long flags); +void __sanitizer_syscall_post_impl_chflags(long long res, long long path, + long long flags); +void __sanitizer_syscall_pre_impl_fchflags(long long fd, long long flags); +void __sanitizer_syscall_post_impl_fchflags(long long res, long long fd, + long long flags); +void __sanitizer_syscall_pre_impl_sync(void); +void __sanitizer_syscall_post_impl_sync(long long res); +void __sanitizer_syscall_pre_impl_kill(long long pid, long long signum); +void __sanitizer_syscall_post_impl_kill(long long res, long long pid, + long long signum); +void __sanitizer_syscall_pre_impl_compat_43_stat43(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_43_stat43(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl_getppid(void); +void __sanitizer_syscall_post_impl_getppid(long long res); +void __sanitizer_syscall_pre_impl_compat_43_lstat43(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_43_lstat43(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl_dup(long long fd); +void __sanitizer_syscall_post_impl_dup(long long res, long long fd); +void __sanitizer_syscall_pre_impl_pipe(void); +void __sanitizer_syscall_post_impl_pipe(long long res); +void __sanitizer_syscall_pre_impl_getegid(void); +void __sanitizer_syscall_post_impl_getegid(long long res); +void __sanitizer_syscall_pre_impl_profil(long long samples, long long size, + long long offset, long long scale); +void __sanitizer_syscall_post_impl_profil(long long res, long long samples, + long long size, long long offset, + long long scale); +void __sanitizer_syscall_pre_impl_ktrace(long long fname, long long ops, + long long facs, long long pid); +void __sanitizer_syscall_post_impl_ktrace(long long res, long long fname, + long long ops, long long facs, + long long pid); +void __sanitizer_syscall_pre_impl_compat_13_sigaction13(long long signum, + long long nsa, + long long osa); +void __sanitizer_syscall_post_impl_compat_13_sigaction13(long long res, + long long signum, + long long nsa, + long long osa); +void __sanitizer_syscall_pre_impl_getgid(void); +void __sanitizer_syscall_post_impl_getgid(long long res); +void __sanitizer_syscall_pre_impl_compat_13_sigprocmask13(long long how, + long long mask); +void __sanitizer_syscall_post_impl_compat_13_sigprocmask13(long long res, + long long how, + long long mask); +void __sanitizer_syscall_pre_impl___getlogin(long long namebuf, + long long namelen); +void __sanitizer_syscall_post_impl___getlogin(long long res, long long namebuf, + long long namelen); +void __sanitizer_syscall_pre_impl___setlogin(long long namebuf); +void __sanitizer_syscall_post_impl___setlogin(long long res, long long namebuf); +void __sanitizer_syscall_pre_impl_acct(long long path); +void __sanitizer_syscall_post_impl_acct(long long res, long long path); +void __sanitizer_syscall_pre_impl_compat_13_sigpending13(void); +void __sanitizer_syscall_post_impl_compat_13_sigpending13(long long res); +void __sanitizer_syscall_pre_impl_compat_13_sigaltstack13(long long nss, + long long oss); +void __sanitizer_syscall_post_impl_compat_13_sigaltstack13(long long res, + long long nss, + long long oss); +void __sanitizer_syscall_pre_impl_ioctl(long long fd, long long com, + long long data); +void __sanitizer_syscall_post_impl_ioctl(long long res, long long fd, + long long com, long long data); +void __sanitizer_syscall_pre_impl_compat_12_oreboot(long long opt); +void __sanitizer_syscall_post_impl_compat_12_oreboot(long long res, + long long opt); +void __sanitizer_syscall_pre_impl_revoke(long long path); +void __sanitizer_syscall_post_impl_revoke(long long res, long long path); +void __sanitizer_syscall_pre_impl_symlink(long long path, long long link); +void __sanitizer_syscall_post_impl_symlink(long long res, long long path, + long long link); +void __sanitizer_syscall_pre_impl_readlink(long long path, long long buf, + long long count); +void __sanitizer_syscall_post_impl_readlink(long long res, long long path, + long long buf, long long count); +void __sanitizer_syscall_pre_impl_execve(long long path, long long argp, + long long envp); +void __sanitizer_syscall_post_impl_execve(long long res, long long path, + long long argp, long long envp); +void __sanitizer_syscall_pre_impl_umask(long long newmask); +void __sanitizer_syscall_post_impl_umask(long long res, long long newmask); +void __sanitizer_syscall_pre_impl_chroot(long long path); +void __sanitizer_syscall_post_impl_chroot(long long res, long long path); +void __sanitizer_syscall_pre_impl_compat_43_fstat43(long long fd, long long sb); +void __sanitizer_syscall_post_impl_compat_43_fstat43(long long res, + long long fd, + long long sb); +void __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo(long long op, + long long where, + long long size, + long long arg); +void __sanitizer_syscall_post_impl_compat_43_ogetkerninfo(long long res, + long long op, + long long where, + long long size, + long long arg); +void __sanitizer_syscall_pre_impl_compat_43_ogetpagesize(void); +void __sanitizer_syscall_post_impl_compat_43_ogetpagesize(long long res); +void __sanitizer_syscall_pre_impl_compat_12_msync(long long addr, + long long len); +void __sanitizer_syscall_post_impl_compat_12_msync(long long res, + long long addr, + long long len); +void __sanitizer_syscall_pre_impl_vfork(void); +void __sanitizer_syscall_post_impl_vfork(long long res); +/* syscall 67 has been skipped */ +/* syscall 68 has been skipped */ +/* syscall 69 has been skipped */ +/* syscall 70 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_43_ommap(long long addr, long long len, + long long prot, + long long flags, long long fd, + long long pos); +void __sanitizer_syscall_post_impl_compat_43_ommap( + long long res, long long addr, long long len, long long prot, + long long flags, long long fd, long long pos); +void __sanitizer_syscall_pre_impl_vadvise(long long anom); +void __sanitizer_syscall_post_impl_vadvise(long long res, long long anom); +void __sanitizer_syscall_pre_impl_munmap(long long addr, long long len); +void __sanitizer_syscall_post_impl_munmap(long long res, long long addr, + long long len); +void __sanitizer_syscall_pre_impl_mprotect(long long addr, long long len, + long long prot); +void __sanitizer_syscall_post_impl_mprotect(long long res, long long addr, + long long len, long long prot); +void __sanitizer_syscall_pre_impl_madvise(long long addr, long long len, + long long behav); +void __sanitizer_syscall_post_impl_madvise(long long res, long long addr, + long long len, long long behav); +/* syscall 76 has been skipped */ +/* syscall 77 has been skipped */ +void __sanitizer_syscall_pre_impl_mincore(long long addr, long long len, + long long vec); +void __sanitizer_syscall_post_impl_mincore(long long res, long long addr, + long long len, long long vec); +void __sanitizer_syscall_pre_impl_getgroups(long long gidsetsize, + long long gidset); +void __sanitizer_syscall_post_impl_getgroups(long long res, + long long gidsetsize, + long long gidset); +void __sanitizer_syscall_pre_impl_setgroups(long long gidsetsize, + long long gidset); +void __sanitizer_syscall_post_impl_setgroups(long long res, + long long gidsetsize, + long long gidset); +void __sanitizer_syscall_pre_impl_getpgrp(void); +void __sanitizer_syscall_post_impl_getpgrp(long long res); +void __sanitizer_syscall_pre_impl_setpgid(long long pid, long long pgid); +void __sanitizer_syscall_post_impl_setpgid(long long res, long long pid, + long long pgid); +void __sanitizer_syscall_pre_impl_compat_50_setitimer(long long which, + long long itv, + long long oitv); +void __sanitizer_syscall_post_impl_compat_50_setitimer(long long res, + long long which, + long long itv, + long long oitv); +void __sanitizer_syscall_pre_impl_compat_43_owait(void); +void __sanitizer_syscall_post_impl_compat_43_owait(long long res); +void __sanitizer_syscall_pre_impl_compat_12_oswapon(long long name); +void __sanitizer_syscall_post_impl_compat_12_oswapon(long long res, + long long name); +void __sanitizer_syscall_pre_impl_compat_50_getitimer(long long which, + long long itv); +void __sanitizer_syscall_post_impl_compat_50_getitimer(long long res, + long long which, + long long itv); +void __sanitizer_syscall_pre_impl_compat_43_ogethostname(long long hostname, + long long len); +void __sanitizer_syscall_post_impl_compat_43_ogethostname(long long res, + long long hostname, + long long len); +void __sanitizer_syscall_pre_impl_compat_43_osethostname(long long hostname, + long long len); +void __sanitizer_syscall_post_impl_compat_43_osethostname(long long res, + long long hostname, + long long len); +void __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize(void); +void __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(long long res); +void __sanitizer_syscall_pre_impl_dup2(long long from, long long to); +void __sanitizer_syscall_post_impl_dup2(long long res, long long from, + long long to); +void __sanitizer_syscall_pre_impl_getrandom(long long buf, long long buflen, + long long flags); +void __sanitizer_syscall_post_impl_getrandom(long long res, long long buf, + long long buflen, long long flags); +void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd, + long long arg); +void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd, + long long cmd, long long arg); +void __sanitizer_syscall_pre_impl_compat_50_select(long long nd, long long in, + long long ou, long long ex, + long long tv); +void __sanitizer_syscall_post_impl_compat_50_select(long long res, long long nd, + long long in, long long ou, + long long ex, long long tv); +/* syscall 94 has been skipped */ +void __sanitizer_syscall_pre_impl_fsync(long long fd); +void __sanitizer_syscall_post_impl_fsync(long long res, long long fd); +void __sanitizer_syscall_pre_impl_setpriority(long long which, long long who, + long long prio); +void __sanitizer_syscall_post_impl_setpriority(long long res, long long which, + long long who, long long prio); +void __sanitizer_syscall_pre_impl_compat_30_socket(long long domain, + long long type, + long long protocol); +void __sanitizer_syscall_post_impl_compat_30_socket(long long res, + long long domain, + long long type, + long long protocol); +void __sanitizer_syscall_pre_impl_connect(long long s, long long name, + long long namelen); +void __sanitizer_syscall_post_impl_connect(long long res, long long s, + long long name, long long namelen); +void __sanitizer_syscall_pre_impl_compat_43_oaccept(long long s, long long name, + long long anamelen); +void __sanitizer_syscall_post_impl_compat_43_oaccept(long long res, long long s, + long long name, + long long anamelen); +void __sanitizer_syscall_pre_impl_getpriority(long long which, long long who); +void __sanitizer_syscall_post_impl_getpriority(long long res, long long which, + long long who); +void __sanitizer_syscall_pre_impl_compat_43_osend(long long s, long long buf, + long long len, + long long flags); +void __sanitizer_syscall_post_impl_compat_43_osend(long long res, long long s, + long long buf, long long len, + long long flags); +void __sanitizer_syscall_pre_impl_compat_43_orecv(long long s, long long buf, + long long len, + long long flags); +void __sanitizer_syscall_post_impl_compat_43_orecv(long long res, long long s, + long long buf, long long len, + long long flags); +void __sanitizer_syscall_pre_impl_compat_13_sigreturn13(long long sigcntxp); +void __sanitizer_syscall_post_impl_compat_13_sigreturn13(long long res, + long long sigcntxp); +void __sanitizer_syscall_pre_impl_bind(long long s, long long name, + long long namelen); +void __sanitizer_syscall_post_impl_bind(long long res, long long s, + long long name, long long namelen); +void __sanitizer_syscall_pre_impl_setsockopt(long long s, long long level, + long long name, long long val, + long long valsize); +void __sanitizer_syscall_post_impl_setsockopt(long long res, long long s, + long long level, long long name, + long long val, long long valsize); +void __sanitizer_syscall_pre_impl_listen(long long s, long long backlog); +void __sanitizer_syscall_post_impl_listen(long long res, long long s, + long long backlog); +/* syscall 107 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_43_osigvec(long long signum, + long long nsv, + long long osv); +void __sanitizer_syscall_post_impl_compat_43_osigvec(long long res, + long long signum, + long long nsv, + long long osv); +void __sanitizer_syscall_pre_impl_compat_43_osigblock(long long mask); +void __sanitizer_syscall_post_impl_compat_43_osigblock(long long res, + long long mask); +void __sanitizer_syscall_pre_impl_compat_43_osigsetmask(long long mask); +void __sanitizer_syscall_post_impl_compat_43_osigsetmask(long long res, + long long mask); +void __sanitizer_syscall_pre_impl_compat_13_sigsuspend13(long long mask); +void __sanitizer_syscall_post_impl_compat_13_sigsuspend13(long long res, + long long mask); +void __sanitizer_syscall_pre_impl_compat_43_osigstack(long long nss, + long long oss); +void __sanitizer_syscall_post_impl_compat_43_osigstack(long long res, + long long nss, + long long oss); +void __sanitizer_syscall_pre_impl_compat_43_orecvmsg(long long s, long long msg, + long long flags); +void __sanitizer_syscall_post_impl_compat_43_orecvmsg(long long res, + long long s, + long long msg, + long long flags); +void __sanitizer_syscall_pre_impl_compat_43_osendmsg(long long s, long long msg, + long long flags); +void __sanitizer_syscall_post_impl_compat_43_osendmsg(long long res, + long long s, + long long msg, + long long flags); +/* syscall 115 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_50_gettimeofday(long long tp, + long long tzp); +void __sanitizer_syscall_post_impl_compat_50_gettimeofday(long long res, + long long tp, + long long tzp); +void __sanitizer_syscall_pre_impl_compat_50_getrusage(long long who, + long long rusage); +void __sanitizer_syscall_post_impl_compat_50_getrusage(long long res, + long long who, + long long rusage); +void __sanitizer_syscall_pre_impl_getsockopt(long long s, long long level, + long long name, long long val, + long long avalsize); +void __sanitizer_syscall_post_impl_getsockopt(long long res, long long s, + long long level, long long name, + long long val, + long long avalsize); +/* syscall 119 has been skipped */ +void __sanitizer_syscall_pre_impl_readv(long long fd, long long iovp, + long long iovcnt); +void __sanitizer_syscall_post_impl_readv(long long res, long long fd, + long long iovp, long long iovcnt); +void __sanitizer_syscall_pre_impl_writev(long long fd, long long iovp, + long long iovcnt); +void __sanitizer_syscall_post_impl_writev(long long res, long long fd, + long long iovp, long long iovcnt); +void __sanitizer_syscall_pre_impl_compat_50_settimeofday(long long tv, + long long tzp); +void __sanitizer_syscall_post_impl_compat_50_settimeofday(long long res, + long long tv, + long long tzp); +void __sanitizer_syscall_pre_impl_fchown(long long fd, long long uid, + long long gid); +void __sanitizer_syscall_post_impl_fchown(long long res, long long fd, + long long uid, long long gid); +void __sanitizer_syscall_pre_impl_fchmod(long long fd, long long mode); +void __sanitizer_syscall_post_impl_fchmod(long long res, long long fd, + long long mode); +void __sanitizer_syscall_pre_impl_compat_43_orecvfrom( + long long s, long long buf, long long len, long long flags, long long from, + long long fromlenaddr); +void __sanitizer_syscall_post_impl_compat_43_orecvfrom( + long long res, long long s, long long buf, long long len, long long flags, + long long from, long long fromlenaddr); +void __sanitizer_syscall_pre_impl_setreuid(long long ruid, long long euid); +void __sanitizer_syscall_post_impl_setreuid(long long res, long long ruid, + long long euid); +void __sanitizer_syscall_pre_impl_setregid(long long rgid, long long egid); +void __sanitizer_syscall_post_impl_setregid(long long res, long long rgid, + long long egid); +void __sanitizer_syscall_pre_impl_rename(long long from, long long to); +void __sanitizer_syscall_post_impl_rename(long long res, long long from, + long long to); +void __sanitizer_syscall_pre_impl_compat_43_otruncate(long long path, + long long length); +void __sanitizer_syscall_post_impl_compat_43_otruncate(long long res, + long long path, + long long length); +void __sanitizer_syscall_pre_impl_compat_43_oftruncate(long long fd, + long long length); +void __sanitizer_syscall_post_impl_compat_43_oftruncate(long long res, + long long fd, + long long length); +void __sanitizer_syscall_pre_impl_flock(long long fd, long long how); +void __sanitizer_syscall_post_impl_flock(long long res, long long fd, + long long how); +void __sanitizer_syscall_pre_impl_mkfifo(long long path, long long mode); +void __sanitizer_syscall_post_impl_mkfifo(long long res, long long path, + long long mode); +void __sanitizer_syscall_pre_impl_sendto(long long s, long long buf, + long long len, long long flags, + long long to, long long tolen); +void __sanitizer_syscall_post_impl_sendto(long long res, long long s, + long long buf, long long len, + long long flags, long long to, + long long tolen); +void __sanitizer_syscall_pre_impl_shutdown(long long s, long long how); +void __sanitizer_syscall_post_impl_shutdown(long long res, long long s, + long long how); +void __sanitizer_syscall_pre_impl_socketpair(long long domain, long long type, + long long protocol, long long rsv); +void __sanitizer_syscall_post_impl_socketpair(long long res, long long domain, + long long type, + long long protocol, + long long rsv); +void __sanitizer_syscall_pre_impl_mkdir(long long path, long long mode); +void __sanitizer_syscall_post_impl_mkdir(long long res, long long path, + long long mode); +void __sanitizer_syscall_pre_impl_rmdir(long long path); +void __sanitizer_syscall_post_impl_rmdir(long long res, long long path); +void __sanitizer_syscall_pre_impl_compat_50_utimes(long long path, + long long tptr); +void __sanitizer_syscall_post_impl_compat_50_utimes(long long res, + long long path, + long long tptr); +/* syscall 139 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_50_adjtime(long long delta, + long long olddelta); +void __sanitizer_syscall_post_impl_compat_50_adjtime(long long res, + long long delta, + long long olddelta); +void __sanitizer_syscall_pre_impl_compat_43_ogetpeername(long long fdes, + long long asa, + long long alen); +void __sanitizer_syscall_post_impl_compat_43_ogetpeername(long long res, + long long fdes, + long long asa, + long long alen); +void __sanitizer_syscall_pre_impl_compat_43_ogethostid(void); +void __sanitizer_syscall_post_impl_compat_43_ogethostid(long long res); +void __sanitizer_syscall_pre_impl_compat_43_osethostid(long long hostid); +void __sanitizer_syscall_post_impl_compat_43_osethostid(long long res, + long long hostid); +void __sanitizer_syscall_pre_impl_compat_43_ogetrlimit(long long which, + long long rlp); +void __sanitizer_syscall_post_impl_compat_43_ogetrlimit(long long res, + long long which, + long long rlp); +void __sanitizer_syscall_pre_impl_compat_43_osetrlimit(long long which, + long long rlp); +void __sanitizer_syscall_post_impl_compat_43_osetrlimit(long long res, + long long which, + long long rlp); +void __sanitizer_syscall_pre_impl_compat_43_okillpg(long long pgid, + long long signum); +void __sanitizer_syscall_post_impl_compat_43_okillpg(long long res, + long long pgid, + long long signum); +void __sanitizer_syscall_pre_impl_setsid(void); +void __sanitizer_syscall_post_impl_setsid(long long res); +void __sanitizer_syscall_pre_impl_compat_50_quotactl(long long path, + long long cmd, + long long uid, + long long arg); +void __sanitizer_syscall_post_impl_compat_50_quotactl( + long long res, long long path, long long cmd, long long uid, long long arg); +void __sanitizer_syscall_pre_impl_compat_43_oquota(void); +void __sanitizer_syscall_post_impl_compat_43_oquota(long long res); +void __sanitizer_syscall_pre_impl_compat_43_ogetsockname(long long fdec, + long long asa, + long long alen); +void __sanitizer_syscall_post_impl_compat_43_ogetsockname(long long res, + long long fdec, + long long asa, + long long alen); +/* syscall 151 has been skipped */ +/* syscall 152 has been skipped */ +/* syscall 153 has been skipped */ +/* syscall 154 has been skipped */ +void __sanitizer_syscall_pre_impl_nfssvc(long long flag, long long argp); +void __sanitizer_syscall_post_impl_nfssvc(long long res, long long flag, + long long argp); +void __sanitizer_syscall_pre_impl_compat_43_ogetdirentries(long long fd, + long long buf, + long long count, + long long basep); +void __sanitizer_syscall_post_impl_compat_43_ogetdirentries(long long res, + long long fd, + long long buf, + long long count, + long long basep); +void __sanitizer_syscall_pre_impl_compat_20_statfs(long long path, + long long buf); +void __sanitizer_syscall_post_impl_compat_20_statfs(long long res, + long long path, + long long buf); +void __sanitizer_syscall_pre_impl_compat_20_fstatfs(long long fd, + long long buf); +void __sanitizer_syscall_post_impl_compat_20_fstatfs(long long res, + long long fd, + long long buf); +/* syscall 159 has been skipped */ +/* syscall 160 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_30_getfh(long long fname, + long long fhp); +void __sanitizer_syscall_post_impl_compat_30_getfh(long long res, + long long fname, + long long fhp); +void __sanitizer_syscall_pre_impl_compat_09_ogetdomainname(long long domainname, + long long len); +void __sanitizer_syscall_post_impl_compat_09_ogetdomainname( + long long res, long long domainname, long long len); +void __sanitizer_syscall_pre_impl_compat_09_osetdomainname(long long domainname, + long long len); +void __sanitizer_syscall_post_impl_compat_09_osetdomainname( + long long res, long long domainname, long long len); +void __sanitizer_syscall_pre_impl_compat_09_ouname(long long name); +void __sanitizer_syscall_post_impl_compat_09_ouname(long long res, + long long name); +void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms); +void __sanitizer_syscall_post_impl_sysarch(long long res, long long op, + long long parms); +void __sanitizer_syscall_pre_impl___futex(long long uaddr, long long op, + long long val, long long timeout, + long long uaddr2, long long val2, + long long val3); +void __sanitizer_syscall_post_impl___futex(long long res, long long uaddr, + long long op, long long val, + long long timeout, long long uaddr2, + long long val2, long long val3); +void __sanitizer_syscall_pre_impl___futex_set_robust_list(long long head, + long long len); +void __sanitizer_syscall_post_impl___futex_set_robust_list(long long res, + long long head, + long long len); +void __sanitizer_syscall_pre_impl___futex_get_robust_list(long long lwpid, + long long headp, + long long lenp); +void __sanitizer_syscall_post_impl___futex_get_robust_list(long long res, + long long lwpid, + long long headp, + long long lenp); +#if !defined(_LP64) +void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which, + long long a2, long long a3, + long long a4, long long a5); +void __sanitizer_syscall_post_impl_compat_10_osemsys(long long res, + long long which, + long long a2, long long a3, + long long a4, + long long a5); +#else +/* syscall 169 has been skipped */ +#endif +#if !defined(_LP64) +void __sanitizer_syscall_pre_impl_compat_10_omsgsys(long long which, + long long a2, long long a3, + long long a4, long long a5, + long long a6); +void __sanitizer_syscall_post_impl_compat_10_omsgsys(long long res, + long long which, + long long a2, long long a3, + long long a4, long long a5, + long long a6); +#else +/* syscall 170 has been skipped */ +#endif +#if !defined(_LP64) +void __sanitizer_syscall_pre_impl_compat_10_oshmsys(long long which, + long long a2, long long a3, + long long a4); +void __sanitizer_syscall_post_impl_compat_10_oshmsys(long long res, + long long which, + long long a2, long long a3, + long long a4); +#else +/* syscall 171 has been skipped */ +#endif +/* syscall 172 has been skipped */ +void __sanitizer_syscall_pre_impl_pread(long long fd, long long buf, + long long nbyte, long long PAD, + long long offset); +void __sanitizer_syscall_post_impl_pread(long long res, long long fd, + long long buf, long long nbyte, + long long PAD, long long offset); +void __sanitizer_syscall_pre_impl_pwrite(long long fd, long long buf, + long long nbyte, long long PAD, + long long offset); +void __sanitizer_syscall_post_impl_pwrite(long long res, long long fd, + long long buf, long long nbyte, + long long PAD, long long offset); +void __sanitizer_syscall_pre_impl_compat_30_ntp_gettime(long long ntvp); +void __sanitizer_syscall_post_impl_compat_30_ntp_gettime(long long res, + long long ntvp); +#if defined(NTP) || !defined(_KERNEL_OPT) +void __sanitizer_syscall_pre_impl_ntp_adjtime(long long tp); +void __sanitizer_syscall_post_impl_ntp_adjtime(long long res, long long tp); +#else +/* syscall 176 has been skipped */ +#endif +/* syscall 177 has been skipped */ +/* syscall 178 has been skipped */ +/* syscall 179 has been skipped */ +/* syscall 180 has been skipped */ +void __sanitizer_syscall_pre_impl_setgid(long long gid); +void __sanitizer_syscall_post_impl_setgid(long long res, long long gid); +void __sanitizer_syscall_pre_impl_setegid(long long egid); +void __sanitizer_syscall_post_impl_setegid(long long res, long long egid); +void __sanitizer_syscall_pre_impl_seteuid(long long euid); +void __sanitizer_syscall_post_impl_seteuid(long long res, long long euid); +void __sanitizer_syscall_pre_impl_lfs_bmapv(long long fsidp, long long blkiov, + long long blkcnt); +void __sanitizer_syscall_post_impl_lfs_bmapv(long long res, long long fsidp, + long long blkiov, + long long blkcnt); +void __sanitizer_syscall_pre_impl_lfs_markv(long long fsidp, long long blkiov, + long long blkcnt); +void __sanitizer_syscall_post_impl_lfs_markv(long long res, long long fsidp, + long long blkiov, + long long blkcnt); +void __sanitizer_syscall_pre_impl_lfs_segclean(long long fsidp, + long long segment); +void __sanitizer_syscall_post_impl_lfs_segclean(long long res, long long fsidp, + long long segment); +void __sanitizer_syscall_pre_impl_compat_50_lfs_segwait(long long fsidp, + long long tv); +void __sanitizer_syscall_post_impl_compat_50_lfs_segwait(long long res, + long long fsidp, + long long tv); +void __sanitizer_syscall_pre_impl_compat_12_stat12(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_12_stat12(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl_compat_12_fstat12(long long fd, long long sb); +void __sanitizer_syscall_post_impl_compat_12_fstat12(long long res, + long long fd, + long long sb); +void __sanitizer_syscall_pre_impl_compat_12_lstat12(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_12_lstat12(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl_pathconf(long long path, long long name); +void __sanitizer_syscall_post_impl_pathconf(long long res, long long path, + long long name); +void __sanitizer_syscall_pre_impl_fpathconf(long long fd, long long name); +void __sanitizer_syscall_post_impl_fpathconf(long long res, long long fd, + long long name); +void __sanitizer_syscall_pre_impl_getsockopt2(long long s, long long level, + long long name, long long val, + long long avalsize); +void __sanitizer_syscall_post_impl_getsockopt2(long long res, long long s, + long long level, long long name, + long long val, + long long avalsize); +void __sanitizer_syscall_pre_impl_getrlimit(long long which, long long rlp); +void __sanitizer_syscall_post_impl_getrlimit(long long res, long long which, + long long rlp); +void __sanitizer_syscall_pre_impl_setrlimit(long long which, long long rlp); +void __sanitizer_syscall_post_impl_setrlimit(long long res, long long which, + long long rlp); +void __sanitizer_syscall_pre_impl_compat_12_getdirentries(long long fd, + long long buf, + long long count, + long long basep); +void __sanitizer_syscall_post_impl_compat_12_getdirentries(long long res, + long long fd, + long long buf, + long long count, + long long basep); +void __sanitizer_syscall_pre_impl_mmap(long long addr, long long len, + long long prot, long long flags, + long long fd, long long PAD, + long long pos); +void __sanitizer_syscall_post_impl_mmap(long long res, long long addr, + long long len, long long prot, + long long flags, long long fd, + long long PAD, long long pos); +void __sanitizer_syscall_pre_impl___syscall(long long code, long long arg0, + long long arg1, long long arg2, + long long arg3, long long arg4, + long long arg5, long long arg6, + long long arg7); +void __sanitizer_syscall_post_impl___syscall(long long res, long long code, + long long arg0, long long arg1, + long long arg2, long long arg3, + long long arg4, long long arg5, + long long arg6, long long arg7); +void __sanitizer_syscall_pre_impl_lseek(long long fd, long long PAD, + long long offset, long long whence); +void __sanitizer_syscall_post_impl_lseek(long long res, long long fd, + long long PAD, long long offset, + long long whence); +void __sanitizer_syscall_pre_impl_truncate(long long path, long long PAD, + long long length); +void __sanitizer_syscall_post_impl_truncate(long long res, long long path, + long long PAD, long long length); +void __sanitizer_syscall_pre_impl_ftruncate(long long fd, long long PAD, + long long length); +void __sanitizer_syscall_post_impl_ftruncate(long long res, long long fd, + long long PAD, long long length); +void __sanitizer_syscall_pre_impl___sysctl(long long name, long long namelen, + long long oldv, long long oldlenp, + long long newv, long long newlen); +void __sanitizer_syscall_post_impl___sysctl(long long res, long long name, + long long namelen, long long oldv, + long long oldlenp, long long newv, + long long newlen); +void __sanitizer_syscall_pre_impl_mlock(long long addr, long long len); +void __sanitizer_syscall_post_impl_mlock(long long res, long long addr, + long long len); +void __sanitizer_syscall_pre_impl_munlock(long long addr, long long len); +void __sanitizer_syscall_post_impl_munlock(long long res, long long addr, + long long len); +void __sanitizer_syscall_pre_impl_undelete(long long path); +void __sanitizer_syscall_post_impl_undelete(long long res, long long path); +void __sanitizer_syscall_pre_impl_compat_50_futimes(long long fd, + long long tptr); +void __sanitizer_syscall_post_impl_compat_50_futimes(long long res, + long long fd, + long long tptr); +void __sanitizer_syscall_pre_impl_getpgid(long long pid); +void __sanitizer_syscall_post_impl_getpgid(long long res, long long pid); +void __sanitizer_syscall_pre_impl_reboot(long long opt, long long bootstr); +void __sanitizer_syscall_post_impl_reboot(long long res, long long opt, + long long bootstr); +void __sanitizer_syscall_pre_impl_poll(long long fds, long long nfds, + long long timeout); +void __sanitizer_syscall_post_impl_poll(long long res, long long fds, + long long nfds, long long timeout); +void __sanitizer_syscall_pre_impl_afssys(long long id, long long a1, + long long a2, long long a3, + long long a4, long long a5, + long long a6); +void __sanitizer_syscall_post_impl_afssys(long long res, long long id, + long long a1, long long a2, + long long a3, long long a4, + long long a5, long long a6); +/* syscall 211 has been skipped */ +/* syscall 212 has been skipped */ +/* syscall 213 has been skipped */ +/* syscall 214 has been skipped */ +/* syscall 215 has been skipped */ +/* syscall 216 has been skipped */ +/* syscall 217 has been skipped */ +/* syscall 218 has been skipped */ +/* syscall 219 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_14___semctl(long long semid, + long long semnum, + long long cmd, + long long arg); +void __sanitizer_syscall_post_impl_compat_14___semctl(long long res, + long long semid, + long long semnum, + long long cmd, + long long arg); +void __sanitizer_syscall_pre_impl_semget(long long key, long long nsems, + long long semflg); +void __sanitizer_syscall_post_impl_semget(long long res, long long key, + long long nsems, long long semflg); +void __sanitizer_syscall_pre_impl_semop(long long semid, long long sops, + long long nsops); +void __sanitizer_syscall_post_impl_semop(long long res, long long semid, + long long sops, long long nsops); +void __sanitizer_syscall_pre_impl_semconfig(long long flag); +void __sanitizer_syscall_post_impl_semconfig(long long res, long long flag); +void __sanitizer_syscall_pre_impl_compat_14_msgctl(long long msqid, + long long cmd, + long long buf); +void __sanitizer_syscall_post_impl_compat_14_msgctl(long long res, + long long msqid, + long long cmd, + long long buf); +void __sanitizer_syscall_pre_impl_msgget(long long key, long long msgflg); +void __sanitizer_syscall_post_impl_msgget(long long res, long long key, + long long msgflg); +void __sanitizer_syscall_pre_impl_msgsnd(long long msqid, long long msgp, + long long msgsz, long long msgflg); +void __sanitizer_syscall_post_impl_msgsnd(long long res, long long msqid, + long long msgp, long long msgsz, + long long msgflg); +void __sanitizer_syscall_pre_impl_msgrcv(long long msqid, long long msgp, + long long msgsz, long long msgtyp, + long long msgflg); +void __sanitizer_syscall_post_impl_msgrcv(long long res, long long msqid, + long long msgp, long long msgsz, + long long msgtyp, long long msgflg); +void __sanitizer_syscall_pre_impl_shmat(long long shmid, long long shmaddr, + long long shmflg); +void __sanitizer_syscall_post_impl_shmat(long long res, long long shmid, + long long shmaddr, long long shmflg); +void __sanitizer_syscall_pre_impl_compat_14_shmctl(long long shmid, + long long cmd, + long long buf); +void __sanitizer_syscall_post_impl_compat_14_shmctl(long long res, + long long shmid, + long long cmd, + long long buf); +void __sanitizer_syscall_pre_impl_shmdt(long long shmaddr); +void __sanitizer_syscall_post_impl_shmdt(long long res, long long shmaddr); +void __sanitizer_syscall_pre_impl_shmget(long long key, long long size, + long long shmflg); +void __sanitizer_syscall_post_impl_shmget(long long res, long long key, + long long size, long long shmflg); +void __sanitizer_syscall_pre_impl_compat_50_clock_gettime(long long clock_id, + long long tp); +void __sanitizer_syscall_post_impl_compat_50_clock_gettime(long long res, + long long clock_id, + long long tp); +void __sanitizer_syscall_pre_impl_compat_50_clock_settime(long long clock_id, + long long tp); +void __sanitizer_syscall_post_impl_compat_50_clock_settime(long long res, + long long clock_id, + long long tp); +void __sanitizer_syscall_pre_impl_compat_50_clock_getres(long long clock_id, + long long tp); +void __sanitizer_syscall_post_impl_compat_50_clock_getres(long long res, + long long clock_id, + long long tp); +void __sanitizer_syscall_pre_impl_timer_create(long long clock_id, + long long evp, + long long timerid); +void __sanitizer_syscall_post_impl_timer_create(long long res, + long long clock_id, + long long evp, + long long timerid); +void __sanitizer_syscall_pre_impl_timer_delete(long long timerid); +void __sanitizer_syscall_post_impl_timer_delete(long long res, + long long timerid); +void __sanitizer_syscall_pre_impl_compat_50_timer_settime(long long timerid, + long long flags, + long long value, + long long ovalue); +void __sanitizer_syscall_post_impl_compat_50_timer_settime(long long res, + long long timerid, + long long flags, + long long value, + long long ovalue); +void __sanitizer_syscall_pre_impl_compat_50_timer_gettime(long long timerid, + long long value); +void __sanitizer_syscall_post_impl_compat_50_timer_gettime(long long res, + long long timerid, + long long value); +void __sanitizer_syscall_pre_impl_timer_getoverrun(long long timerid); +void __sanitizer_syscall_post_impl_timer_getoverrun(long long res, + long long timerid); +void __sanitizer_syscall_pre_impl_compat_50_nanosleep(long long rqtp, + long long rmtp); +void __sanitizer_syscall_post_impl_compat_50_nanosleep(long long res, + long long rqtp, + long long rmtp); +void __sanitizer_syscall_pre_impl_fdatasync(long long fd); +void __sanitizer_syscall_post_impl_fdatasync(long long res, long long fd); +void __sanitizer_syscall_pre_impl_mlockall(long long flags); +void __sanitizer_syscall_post_impl_mlockall(long long res, long long flags); +void __sanitizer_syscall_pre_impl_munlockall(void); +void __sanitizer_syscall_post_impl_munlockall(long long res); +void __sanitizer_syscall_pre_impl_compat_50___sigtimedwait(long long set, + long long info, + long long timeout); +void __sanitizer_syscall_post_impl_compat_50___sigtimedwait(long long res, + long long set, + long long info, + long long timeout); +void __sanitizer_syscall_pre_impl_sigqueueinfo(long long pid, long long info); +void __sanitizer_syscall_post_impl_sigqueueinfo(long long res, long long pid, + long long info); +void __sanitizer_syscall_pre_impl_modctl(long long cmd, long long arg); +void __sanitizer_syscall_post_impl_modctl(long long res, long long cmd, + long long arg); +void __sanitizer_syscall_pre_impl__ksem_init(long long value, long long idp); +void __sanitizer_syscall_post_impl__ksem_init(long long res, long long value, + long long idp); +void __sanitizer_syscall_pre_impl__ksem_open(long long name, long long oflag, + long long mode, long long value, + long long idp); +void __sanitizer_syscall_post_impl__ksem_open(long long res, long long name, + long long oflag, long long mode, + long long value, long long idp); +void __sanitizer_syscall_pre_impl__ksem_unlink(long long name); +void __sanitizer_syscall_post_impl__ksem_unlink(long long res, long long name); +void __sanitizer_syscall_pre_impl__ksem_close(long long id); +void __sanitizer_syscall_post_impl__ksem_close(long long res, long long id); +void __sanitizer_syscall_pre_impl__ksem_post(long long id); +void __sanitizer_syscall_post_impl__ksem_post(long long res, long long id); +void __sanitizer_syscall_pre_impl__ksem_wait(long long id); +void __sanitizer_syscall_post_impl__ksem_wait(long long res, long long id); +void __sanitizer_syscall_pre_impl__ksem_trywait(long long id); +void __sanitizer_syscall_post_impl__ksem_trywait(long long res, long long id); +void __sanitizer_syscall_pre_impl__ksem_getvalue(long long id, long long value); +void __sanitizer_syscall_post_impl__ksem_getvalue(long long res, long long id, + long long value); +void __sanitizer_syscall_pre_impl__ksem_destroy(long long id); +void __sanitizer_syscall_post_impl__ksem_destroy(long long res, long long id); +void __sanitizer_syscall_pre_impl__ksem_timedwait(long long id, + long long abstime); +void __sanitizer_syscall_post_impl__ksem_timedwait(long long res, long long id, + long long abstime); +void __sanitizer_syscall_pre_impl_mq_open(long long name, long long oflag, + long long mode, long long attr); +void __sanitizer_syscall_post_impl_mq_open(long long res, long long name, + long long oflag, long long mode, + long long attr); +void __sanitizer_syscall_pre_impl_mq_close(long long mqdes); +void __sanitizer_syscall_post_impl_mq_close(long long res, long long mqdes); +void __sanitizer_syscall_pre_impl_mq_unlink(long long name); +void __sanitizer_syscall_post_impl_mq_unlink(long long res, long long name); +void __sanitizer_syscall_pre_impl_mq_getattr(long long mqdes, long long mqstat); +void __sanitizer_syscall_post_impl_mq_getattr(long long res, long long mqdes, + long long mqstat); +void __sanitizer_syscall_pre_impl_mq_setattr(long long mqdes, long long mqstat, + long long omqstat); +void __sanitizer_syscall_post_impl_mq_setattr(long long res, long long mqdes, + long long mqstat, + long long omqstat); +void __sanitizer_syscall_pre_impl_mq_notify(long long mqdes, + long long notification); +void __sanitizer_syscall_post_impl_mq_notify(long long res, long long mqdes, + long long notification); +void __sanitizer_syscall_pre_impl_mq_send(long long mqdes, long long msg_ptr, + long long msg_len, + long long msg_prio); +void __sanitizer_syscall_post_impl_mq_send(long long res, long long mqdes, + long long msg_ptr, long long msg_len, + long long msg_prio); +void __sanitizer_syscall_pre_impl_mq_receive(long long mqdes, long long msg_ptr, + long long msg_len, + long long msg_prio); +void __sanitizer_syscall_post_impl_mq_receive(long long res, long long mqdes, + long long msg_ptr, + long long msg_len, + long long msg_prio); +void __sanitizer_syscall_pre_impl_compat_50_mq_timedsend(long long mqdes, + long long msg_ptr, + long long msg_len, + long long msg_prio, + long long abs_timeout); +void __sanitizer_syscall_post_impl_compat_50_mq_timedsend( + long long res, long long mqdes, long long msg_ptr, long long msg_len, + long long msg_prio, long long abs_timeout); +void __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive( + long long mqdes, long long msg_ptr, long long msg_len, long long msg_prio, + long long abs_timeout); +void __sanitizer_syscall_post_impl_compat_50_mq_timedreceive( + long long res, long long mqdes, long long msg_ptr, long long msg_len, + long long msg_prio, long long abs_timeout); +/* syscall 267 has been skipped */ +/* syscall 268 has been skipped */ +/* syscall 269 has been skipped */ +void __sanitizer_syscall_pre_impl___posix_rename(long long from, long long to); +void __sanitizer_syscall_post_impl___posix_rename(long long res, long long from, + long long to); +void __sanitizer_syscall_pre_impl_swapctl(long long cmd, long long arg, + long long misc); +void __sanitizer_syscall_post_impl_swapctl(long long res, long long cmd, + long long arg, long long misc); +void __sanitizer_syscall_pre_impl_compat_30_getdents(long long fd, + long long buf, + long long count); +void __sanitizer_syscall_post_impl_compat_30_getdents(long long res, + long long fd, + long long buf, + long long count); +void __sanitizer_syscall_pre_impl_minherit(long long addr, long long len, + long long inherit); +void __sanitizer_syscall_post_impl_minherit(long long res, long long addr, + long long len, long long inherit); +void __sanitizer_syscall_pre_impl_lchmod(long long path, long long mode); +void __sanitizer_syscall_post_impl_lchmod(long long res, long long path, + long long mode); +void __sanitizer_syscall_pre_impl_lchown(long long path, long long uid, + long long gid); +void __sanitizer_syscall_post_impl_lchown(long long res, long long path, + long long uid, long long gid); +void __sanitizer_syscall_pre_impl_compat_50_lutimes(long long path, + long long tptr); +void __sanitizer_syscall_post_impl_compat_50_lutimes(long long res, + long long path, + long long tptr); +void __sanitizer_syscall_pre_impl___msync13(long long addr, long long len, + long long flags); +void __sanitizer_syscall_post_impl___msync13(long long res, long long addr, + long long len, long long flags); +void __sanitizer_syscall_pre_impl_compat_30___stat13(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_30___stat13(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl_compat_30___fstat13(long long fd, + long long sb); +void __sanitizer_syscall_post_impl_compat_30___fstat13(long long res, + long long fd, + long long sb); +void __sanitizer_syscall_pre_impl_compat_30___lstat13(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_30___lstat13(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl___sigaltstack14(long long nss, long long oss); +void __sanitizer_syscall_post_impl___sigaltstack14(long long res, long long nss, + long long oss); +void __sanitizer_syscall_pre_impl___vfork14(void); +void __sanitizer_syscall_post_impl___vfork14(long long res); +void __sanitizer_syscall_pre_impl___posix_chown(long long path, long long uid, + long long gid); +void __sanitizer_syscall_post_impl___posix_chown(long long res, long long path, + long long uid, long long gid); +void __sanitizer_syscall_pre_impl___posix_fchown(long long fd, long long uid, + long long gid); +void __sanitizer_syscall_post_impl___posix_fchown(long long res, long long fd, + long long uid, long long gid); +void __sanitizer_syscall_pre_impl___posix_lchown(long long path, long long uid, + long long gid); +void __sanitizer_syscall_post_impl___posix_lchown(long long res, long long path, + long long uid, long long gid); +void __sanitizer_syscall_pre_impl_getsid(long long pid); +void __sanitizer_syscall_post_impl_getsid(long long res, long long pid); +void __sanitizer_syscall_pre_impl___clone(long long flags, long long stack); +void __sanitizer_syscall_post_impl___clone(long long res, long long flags, + long long stack); +void __sanitizer_syscall_pre_impl_fktrace(long long fd, long long ops, + long long facs, long long pid); +void __sanitizer_syscall_post_impl_fktrace(long long res, long long fd, + long long ops, long long facs, + long long pid); +void __sanitizer_syscall_pre_impl_preadv(long long fd, long long iovp, + long long iovcnt, long long PAD, + long long offset); +void __sanitizer_syscall_post_impl_preadv(long long res, long long fd, + long long iovp, long long iovcnt, + long long PAD, long long offset); +void __sanitizer_syscall_pre_impl_pwritev(long long fd, long long iovp, + long long iovcnt, long long PAD, + long long offset); +void __sanitizer_syscall_post_impl_pwritev(long long res, long long fd, + long long iovp, long long iovcnt, + long long PAD, long long offset); +void __sanitizer_syscall_pre_impl_compat_16___sigaction14(long long signum, + long long nsa, + long long osa); +void __sanitizer_syscall_post_impl_compat_16___sigaction14(long long res, + long long signum, + long long nsa, + long long osa); +void __sanitizer_syscall_pre_impl___sigpending14(long long set); +void __sanitizer_syscall_post_impl___sigpending14(long long res, long long set); +void __sanitizer_syscall_pre_impl___sigprocmask14(long long how, long long set, + long long oset); +void __sanitizer_syscall_post_impl___sigprocmask14(long long res, long long how, + long long set, + long long oset); +void __sanitizer_syscall_pre_impl___sigsuspend14(long long set); +void __sanitizer_syscall_post_impl___sigsuspend14(long long res, long long set); +void __sanitizer_syscall_pre_impl_compat_16___sigreturn14(long long sigcntxp); +void __sanitizer_syscall_post_impl_compat_16___sigreturn14(long long res, + long long sigcntxp); +void __sanitizer_syscall_pre_impl___getcwd(long long bufp, long long length); +void __sanitizer_syscall_post_impl___getcwd(long long res, long long bufp, + long long length); +void __sanitizer_syscall_pre_impl_fchroot(long long fd); +void __sanitizer_syscall_post_impl_fchroot(long long res, long long fd); +void __sanitizer_syscall_pre_impl_compat_30_fhopen(long long fhp, + long long flags); +void __sanitizer_syscall_post_impl_compat_30_fhopen(long long res, + long long fhp, + long long flags); +void __sanitizer_syscall_pre_impl_compat_30_fhstat(long long fhp, long long sb); +void __sanitizer_syscall_post_impl_compat_30_fhstat(long long res, + long long fhp, + long long sb); +void __sanitizer_syscall_pre_impl_compat_20_fhstatfs(long long fhp, + long long buf); +void __sanitizer_syscall_post_impl_compat_20_fhstatfs(long long res, + long long fhp, + long long buf); +void __sanitizer_syscall_pre_impl_compat_50_____semctl13(long long semid, + long long semnum, + long long cmd, + long long arg); +void __sanitizer_syscall_post_impl_compat_50_____semctl13(long long res, + long long semid, + long long semnum, + long long cmd, + long long arg); +void __sanitizer_syscall_pre_impl_compat_50___msgctl13(long long msqid, + long long cmd, + long long buf); +void __sanitizer_syscall_post_impl_compat_50___msgctl13(long long res, + long long msqid, + long long cmd, + long long buf); +void __sanitizer_syscall_pre_impl_compat_50___shmctl13(long long shmid, + long long cmd, + long long buf); +void __sanitizer_syscall_post_impl_compat_50___shmctl13(long long res, + long long shmid, + long long cmd, + long long buf); +void __sanitizer_syscall_pre_impl_lchflags(long long path, long long flags); +void __sanitizer_syscall_post_impl_lchflags(long long res, long long path, + long long flags); +void __sanitizer_syscall_pre_impl_issetugid(void); +void __sanitizer_syscall_post_impl_issetugid(long long res); +void __sanitizer_syscall_pre_impl_utrace(long long label, long long addr, + long long len); +void __sanitizer_syscall_post_impl_utrace(long long res, long long label, + long long addr, long long len); +void __sanitizer_syscall_pre_impl_getcontext(long long ucp); +void __sanitizer_syscall_post_impl_getcontext(long long res, long long ucp); +void __sanitizer_syscall_pre_impl_setcontext(long long ucp); +void __sanitizer_syscall_post_impl_setcontext(long long res, long long ucp); +void __sanitizer_syscall_pre_impl__lwp_create(long long ucp, long long flags, + long long new_lwp); +void __sanitizer_syscall_post_impl__lwp_create(long long res, long long ucp, + long long flags, + long long new_lwp); +void __sanitizer_syscall_pre_impl__lwp_exit(void); +void __sanitizer_syscall_post_impl__lwp_exit(long long res); +void __sanitizer_syscall_pre_impl__lwp_self(void); +void __sanitizer_syscall_post_impl__lwp_self(long long res); +void __sanitizer_syscall_pre_impl__lwp_wait(long long wait_for, + long long departed); +void __sanitizer_syscall_post_impl__lwp_wait(long long res, long long wait_for, + long long departed); +void __sanitizer_syscall_pre_impl__lwp_suspend(long long target); +void __sanitizer_syscall_post_impl__lwp_suspend(long long res, + long long target); +void __sanitizer_syscall_pre_impl__lwp_continue(long long target); +void __sanitizer_syscall_post_impl__lwp_continue(long long res, + long long target); +void __sanitizer_syscall_pre_impl__lwp_wakeup(long long target); +void __sanitizer_syscall_post_impl__lwp_wakeup(long long res, long long target); +void __sanitizer_syscall_pre_impl__lwp_getprivate(void); +void __sanitizer_syscall_post_impl__lwp_getprivate(long long res); +void __sanitizer_syscall_pre_impl__lwp_setprivate(long long ptr); +void __sanitizer_syscall_post_impl__lwp_setprivate(long long res, + long long ptr); +void __sanitizer_syscall_pre_impl__lwp_kill(long long target, long long signo); +void __sanitizer_syscall_post_impl__lwp_kill(long long res, long long target, + long long signo); +void __sanitizer_syscall_pre_impl__lwp_detach(long long target); +void __sanitizer_syscall_post_impl__lwp_detach(long long res, long long target); +void __sanitizer_syscall_pre_impl_compat_50__lwp_park(long long ts, + long long unpark, + long long hint, + long long unparkhint); +void __sanitizer_syscall_post_impl_compat_50__lwp_park(long long res, + long long ts, + long long unpark, + long long hint, + long long unparkhint); +void __sanitizer_syscall_pre_impl__lwp_unpark(long long target, long long hint); +void __sanitizer_syscall_post_impl__lwp_unpark(long long res, long long target, + long long hint); +void __sanitizer_syscall_pre_impl__lwp_unpark_all(long long targets, + long long ntargets, + long long hint); +void __sanitizer_syscall_post_impl__lwp_unpark_all(long long res, + long long targets, + long long ntargets, + long long hint); +void __sanitizer_syscall_pre_impl__lwp_setname(long long target, + long long name); +void __sanitizer_syscall_post_impl__lwp_setname(long long res, long long target, + long long name); +void __sanitizer_syscall_pre_impl__lwp_getname(long long target, long long name, + long long len); +void __sanitizer_syscall_post_impl__lwp_getname(long long res, long long target, + long long name, long long len); +void __sanitizer_syscall_pre_impl__lwp_ctl(long long features, + long long address); +void __sanitizer_syscall_post_impl__lwp_ctl(long long res, long long features, + long long address); +/* syscall 326 has been skipped */ +/* syscall 327 has been skipped */ +/* syscall 328 has been skipped */ +/* syscall 329 has been skipped */ +void __sanitizer_syscall_pre_impl_compat_60_sa_register( + long long newv, long long oldv, long long flags, + long long stackinfo_offset); +void __sanitizer_syscall_post_impl_compat_60_sa_register( + long long res, long long newv, long long oldv, long long flags, + long long stackinfo_offset); +void __sanitizer_syscall_pre_impl_compat_60_sa_stacks(long long num, + long long stacks); +void __sanitizer_syscall_post_impl_compat_60_sa_stacks(long long res, + long long num, + long long stacks); +void __sanitizer_syscall_pre_impl_compat_60_sa_enable(void); +void __sanitizer_syscall_post_impl_compat_60_sa_enable(long long res); +void __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency( + long long concurrency); +void __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency( + long long res, long long concurrency); +void __sanitizer_syscall_pre_impl_compat_60_sa_yield(void); +void __sanitizer_syscall_post_impl_compat_60_sa_yield(long long res); +void __sanitizer_syscall_pre_impl_compat_60_sa_preempt(long long sa_id); +void __sanitizer_syscall_post_impl_compat_60_sa_preempt(long long res, + long long sa_id); +/* syscall 336 has been skipped */ +/* syscall 337 has been skipped */ +/* syscall 338 has been skipped */ +/* syscall 339 has been skipped */ +void __sanitizer_syscall_pre_impl___sigaction_sigtramp(long long signum, + long long nsa, + long long osa, + long long tramp, + long long vers); +void __sanitizer_syscall_post_impl___sigaction_sigtramp( + long long res, long long signum, long long nsa, long long osa, + long long tramp, long long vers); +/* syscall 341 has been skipped */ +/* syscall 342 has been skipped */ +void __sanitizer_syscall_pre_impl_rasctl(long long addr, long long len, + long long op); +void __sanitizer_syscall_post_impl_rasctl(long long res, long long addr, + long long len, long long op); +void __sanitizer_syscall_pre_impl_kqueue(void); +void __sanitizer_syscall_post_impl_kqueue(long long res); +void __sanitizer_syscall_pre_impl_compat_50_kevent( + long long fd, long long changelist, long long nchanges, long long eventlist, + long long nevents, long long timeout); +void __sanitizer_syscall_post_impl_compat_50_kevent( + long long res, long long fd, long long changelist, long long nchanges, + long long eventlist, long long nevents, long long timeout); +void __sanitizer_syscall_pre_impl__sched_setparam(long long pid, long long lid, + long long policy, + long long params); +void __sanitizer_syscall_post_impl__sched_setparam(long long res, long long pid, + long long lid, + long long policy, + long long params); +void __sanitizer_syscall_pre_impl__sched_getparam(long long pid, long long lid, + long long policy, + long long params); +void __sanitizer_syscall_post_impl__sched_getparam(long long res, long long pid, + long long lid, + long long policy, + long long params); +void __sanitizer_syscall_pre_impl__sched_setaffinity(long long pid, + long long lid, + long long size, + long long cpuset); +void __sanitizer_syscall_post_impl__sched_setaffinity(long long res, + long long pid, + long long lid, + long long size, + long long cpuset); +void __sanitizer_syscall_pre_impl__sched_getaffinity(long long pid, + long long lid, + long long size, + long long cpuset); +void __sanitizer_syscall_post_impl__sched_getaffinity(long long res, + long long pid, + long long lid, + long long size, + long long cpuset); +void __sanitizer_syscall_pre_impl_sched_yield(void); +void __sanitizer_syscall_post_impl_sched_yield(long long res); +void __sanitizer_syscall_pre_impl__sched_protect(long long priority); +void __sanitizer_syscall_post_impl__sched_protect(long long res, + long long priority); +/* syscall 352 has been skipped */ +/* syscall 353 has been skipped */ +void __sanitizer_syscall_pre_impl_fsync_range(long long fd, long long flags, + long long start, + long long length); +void __sanitizer_syscall_post_impl_fsync_range(long long res, long long fd, + long long flags, long long start, + long long length); +void __sanitizer_syscall_pre_impl_uuidgen(long long store, long long count); +void __sanitizer_syscall_post_impl_uuidgen(long long res, long long store, + long long count); +void __sanitizer_syscall_pre_impl_compat_90_getvfsstat(long long buf, + long long bufsize, + long long flags); +void __sanitizer_syscall_post_impl_compat_90_getvfsstat(long long res, + long long buf, + long long bufsize, + long long flags); +void __sanitizer_syscall_pre_impl_compat_90_statvfs1(long long path, + long long buf, + long long flags); +void __sanitizer_syscall_post_impl_compat_90_statvfs1(long long res, + long long path, + long long buf, + long long flags); +void __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(long long fd, + long long buf, + long long flags); +void __sanitizer_syscall_post_impl_compat_90_fstatvfs1(long long res, + long long fd, + long long buf, + long long flags); +void __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(long long fhp, + long long buf, + long long flags); +void __sanitizer_syscall_post_impl_compat_30_fhstatvfs1(long long res, + long long fhp, + long long buf, + long long flags); +void __sanitizer_syscall_pre_impl_extattrctl(long long path, long long cmd, + long long filename, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_post_impl_extattrctl(long long res, long long path, + long long cmd, long long filename, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_pre_impl_extattr_set_file(long long path, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_set_file( + long long res, long long path, long long attrnamespace, long long attrname, + long long data, long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_get_file(long long path, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_get_file( + long long res, long long path, long long attrnamespace, long long attrname, + long long data, long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_delete_file(long long path, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_post_impl_extattr_delete_file(long long res, + long long path, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_pre_impl_extattr_set_fd(long long fd, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_set_fd(long long res, long long fd, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_get_fd(long long fd, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_get_fd(long long res, long long fd, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_delete_fd(long long fd, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_post_impl_extattr_delete_fd(long long res, + long long fd, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_pre_impl_extattr_set_link(long long path, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_set_link( + long long res, long long path, long long attrnamespace, long long attrname, + long long data, long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_get_link(long long path, + long long attrnamespace, + long long attrname, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_get_link( + long long res, long long path, long long attrnamespace, long long attrname, + long long data, long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_delete_link(long long path, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_post_impl_extattr_delete_link(long long res, + long long path, + long long attrnamespace, + long long attrname); +void __sanitizer_syscall_pre_impl_extattr_list_fd(long long fd, + long long attrnamespace, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_list_fd(long long res, long long fd, + long long attrnamespace, + long long data, + long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_list_file(long long path, + long long attrnamespace, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_list_file(long long res, + long long path, + long long attrnamespace, + long long data, + long long nbytes); +void __sanitizer_syscall_pre_impl_extattr_list_link(long long path, + long long attrnamespace, + long long data, + long long nbytes); +void __sanitizer_syscall_post_impl_extattr_list_link(long long res, + long long path, + long long attrnamespace, + long long data, + long long nbytes); +void __sanitizer_syscall_pre_impl_compat_50_pselect(long long nd, long long in, + long long ou, long long ex, + long long ts, + long long mask); +void __sanitizer_syscall_post_impl_compat_50_pselect(long long res, + long long nd, long long in, + long long ou, long long ex, + long long ts, + long long mask); +void __sanitizer_syscall_pre_impl_compat_50_pollts(long long fds, + long long nfds, long long ts, + long long mask); +void __sanitizer_syscall_post_impl_compat_50_pollts( + long long res, long long fds, long long nfds, long long ts, long long mask); +void __sanitizer_syscall_pre_impl_setxattr(long long path, long long name, + long long value, long long size, + long long flags); +void __sanitizer_syscall_post_impl_setxattr(long long res, long long path, + long long name, long long value, + long long size, long long flags); +void __sanitizer_syscall_pre_impl_lsetxattr(long long path, long long name, + long long value, long long size, + long long flags); +void __sanitizer_syscall_post_impl_lsetxattr(long long res, long long path, + long long name, long long value, + long long size, long long flags); +void __sanitizer_syscall_pre_impl_fsetxattr(long long fd, long long name, + long long value, long long size, + long long flags); +void __sanitizer_syscall_post_impl_fsetxattr(long long res, long long fd, + long long name, long long value, + long long size, long long flags); +void __sanitizer_syscall_pre_impl_getxattr(long long path, long long name, + long long value, long long size); +void __sanitizer_syscall_post_impl_getxattr(long long res, long long path, + long long name, long long value, + long long size); +void __sanitizer_syscall_pre_impl_lgetxattr(long long path, long long name, + long long value, long long size); +void __sanitizer_syscall_post_impl_lgetxattr(long long res, long long path, + long long name, long long value, + long long size); +void __sanitizer_syscall_pre_impl_fgetxattr(long long fd, long long name, + long long value, long long size); +void __sanitizer_syscall_post_impl_fgetxattr(long long res, long long fd, + long long name, long long value, + long long size); +void __sanitizer_syscall_pre_impl_listxattr(long long path, long long list, + long long size); +void __sanitizer_syscall_post_impl_listxattr(long long res, long long path, + long long list, long long size); +void __sanitizer_syscall_pre_impl_llistxattr(long long path, long long list, + long long size); +void __sanitizer_syscall_post_impl_llistxattr(long long res, long long path, + long long list, long long size); +void __sanitizer_syscall_pre_impl_flistxattr(long long fd, long long list, + long long size); +void __sanitizer_syscall_post_impl_flistxattr(long long res, long long fd, + long long list, long long size); +void __sanitizer_syscall_pre_impl_removexattr(long long path, long long name); +void __sanitizer_syscall_post_impl_removexattr(long long res, long long path, + long long name); +void __sanitizer_syscall_pre_impl_lremovexattr(long long path, long long name); +void __sanitizer_syscall_post_impl_lremovexattr(long long res, long long path, + long long name); +void __sanitizer_syscall_pre_impl_fremovexattr(long long fd, long long name); +void __sanitizer_syscall_post_impl_fremovexattr(long long res, long long fd, + long long name); +void __sanitizer_syscall_pre_impl_compat_50___stat30(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_50___stat30(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl_compat_50___fstat30(long long fd, + long long sb); +void __sanitizer_syscall_post_impl_compat_50___fstat30(long long res, + long long fd, + long long sb); +void __sanitizer_syscall_pre_impl_compat_50___lstat30(long long path, + long long ub); +void __sanitizer_syscall_post_impl_compat_50___lstat30(long long res, + long long path, + long long ub); +void __sanitizer_syscall_pre_impl___getdents30(long long fd, long long buf, + long long count); +void __sanitizer_syscall_post_impl___getdents30(long long res, long long fd, + long long buf, long long count); +void __sanitizer_syscall_pre_impl_posix_fadvise(long long); +void __sanitizer_syscall_post_impl_posix_fadvise(long long res, long long); +void __sanitizer_syscall_pre_impl_compat_30___fhstat30(long long fhp, + long long sb); +void __sanitizer_syscall_post_impl_compat_30___fhstat30(long long res, + long long fhp, + long long sb); +void __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30(long long ntvp); +void __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(long long res, + long long ntvp); +void __sanitizer_syscall_pre_impl___socket30(long long domain, long long type, + long long protocol); +void __sanitizer_syscall_post_impl___socket30(long long res, long long domain, + long long type, + long long protocol); +void __sanitizer_syscall_pre_impl___getfh30(long long fname, long long fhp, + long long fh_size); +void __sanitizer_syscall_post_impl___getfh30(long long res, long long fname, + long long fhp, long long fh_size); +void __sanitizer_syscall_pre_impl___fhopen40(long long fhp, long long fh_size, + long long flags); +void __sanitizer_syscall_post_impl___fhopen40(long long res, long long fhp, + long long fh_size, + long long flags); +void __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(long long fhp, + long long fh_size, + long long buf, + long long flags); +void __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(long long res, + long long fhp, + long long fh_size, + long long buf, + long long flags); +void __sanitizer_syscall_pre_impl_compat_50___fhstat40(long long fhp, + long long fh_size, + long long sb); +void __sanitizer_syscall_post_impl_compat_50___fhstat40(long long res, + long long fhp, + long long fh_size, + long long sb); +void __sanitizer_syscall_pre_impl_aio_cancel(long long fildes, + long long aiocbp); +void __sanitizer_syscall_post_impl_aio_cancel(long long res, long long fildes, + long long aiocbp); +void __sanitizer_syscall_pre_impl_aio_error(long long aiocbp); +void __sanitizer_syscall_post_impl_aio_error(long long res, long long aiocbp); +void __sanitizer_syscall_pre_impl_aio_fsync(long long op, long long aiocbp); +void __sanitizer_syscall_post_impl_aio_fsync(long long res, long long op, + long long aiocbp); +void __sanitizer_syscall_pre_impl_aio_read(long long aiocbp); +void __sanitizer_syscall_post_impl_aio_read(long long res, long long aiocbp); +void __sanitizer_syscall_pre_impl_aio_return(long long aiocbp); +void __sanitizer_syscall_post_impl_aio_return(long long res, long long aiocbp); +void __sanitizer_syscall_pre_impl_compat_50_aio_suspend(long long list, + long long nent, + long long timeout); +void __sanitizer_syscall_post_impl_compat_50_aio_suspend(long long res, + long long list, + long long nent, + long long timeout); +void __sanitizer_syscall_pre_impl_aio_write(long long aiocbp); +void __sanitizer_syscall_post_impl_aio_write(long long res, long long aiocbp); +void __sanitizer_syscall_pre_impl_lio_listio(long long mode, long long list, + long long nent, long long sig); +void __sanitizer_syscall_post_impl_lio_listio(long long res, long long mode, + long long list, long long nent, + long long sig); +/* syscall 407 has been skipped */ +/* syscall 408 has been skipped */ +/* syscall 409 has been skipped */ +void __sanitizer_syscall_pre_impl___mount50(long long type, long long path, + long long flags, long long data, + long long data_len); +void __sanitizer_syscall_post_impl___mount50(long long res, long long type, + long long path, long long flags, + long long data, + long long data_len); +void __sanitizer_syscall_pre_impl_mremap(long long old_address, + long long old_size, + long long new_address, + long long new_size, long long flags); +void __sanitizer_syscall_post_impl_mremap(long long res, long long old_address, + long long old_size, + long long new_address, + long long new_size, long long flags); +void __sanitizer_syscall_pre_impl_pset_create(long long psid); +void __sanitizer_syscall_post_impl_pset_create(long long res, long long psid); +void __sanitizer_syscall_pre_impl_pset_destroy(long long psid); +void __sanitizer_syscall_post_impl_pset_destroy(long long res, long long psid); +void __sanitizer_syscall_pre_impl_pset_assign(long long psid, long long cpuid, + long long opsid); +void __sanitizer_syscall_post_impl_pset_assign(long long res, long long psid, + long long cpuid, + long long opsid); +void __sanitizer_syscall_pre_impl__pset_bind(long long idtype, + long long first_id, + long long second_id, + long long psid, long long opsid); +void __sanitizer_syscall_post_impl__pset_bind(long long res, long long idtype, + long long first_id, + long long second_id, + long long psid, long long opsid); +void __sanitizer_syscall_pre_impl___posix_fadvise50(long long fd, long long PAD, + long long offset, + long long len, + long long advice); +void __sanitizer_syscall_post_impl___posix_fadvise50( + long long res, long long fd, long long PAD, long long offset, long long len, + long long advice); +void __sanitizer_syscall_pre_impl___select50(long long nd, long long in, + long long ou, long long ex, + long long tv); +void __sanitizer_syscall_post_impl___select50(long long res, long long nd, + long long in, long long ou, + long long ex, long long tv); +void __sanitizer_syscall_pre_impl___gettimeofday50(long long tp, long long tzp); +void __sanitizer_syscall_post_impl___gettimeofday50(long long res, long long tp, + long long tzp); +void __sanitizer_syscall_pre_impl___settimeofday50(long long tv, long long tzp); +void __sanitizer_syscall_post_impl___settimeofday50(long long res, long long tv, + long long tzp); +void __sanitizer_syscall_pre_impl___utimes50(long long path, long long tptr); +void __sanitizer_syscall_post_impl___utimes50(long long res, long long path, + long long tptr); +void __sanitizer_syscall_pre_impl___adjtime50(long long delta, + long long olddelta); +void __sanitizer_syscall_post_impl___adjtime50(long long res, long long delta, + long long olddelta); +void __sanitizer_syscall_pre_impl___lfs_segwait50(long long fsidp, + long long tv); +void __sanitizer_syscall_post_impl___lfs_segwait50(long long res, + long long fsidp, + long long tv); +void __sanitizer_syscall_pre_impl___futimes50(long long fd, long long tptr); +void __sanitizer_syscall_post_impl___futimes50(long long res, long long fd, + long long tptr); +void __sanitizer_syscall_pre_impl___lutimes50(long long path, long long tptr); +void __sanitizer_syscall_post_impl___lutimes50(long long res, long long path, + long long tptr); +void __sanitizer_syscall_pre_impl___setitimer50(long long which, long long itv, + long long oitv); +void __sanitizer_syscall_post_impl___setitimer50(long long res, long long which, + long long itv, long long oitv); +void __sanitizer_syscall_pre_impl___getitimer50(long long which, long long itv); +void __sanitizer_syscall_post_impl___getitimer50(long long res, long long which, + long long itv); +void __sanitizer_syscall_pre_impl___clock_gettime50(long long clock_id, + long long tp); +void __sanitizer_syscall_post_impl___clock_gettime50(long long res, + long long clock_id, + long long tp); +void __sanitizer_syscall_pre_impl___clock_settime50(long long clock_id, + long long tp); +void __sanitizer_syscall_post_impl___clock_settime50(long long res, + long long clock_id, + long long tp); +void __sanitizer_syscall_pre_impl___clock_getres50(long long clock_id, + long long tp); +void __sanitizer_syscall_post_impl___clock_getres50(long long res, + long long clock_id, + long long tp); +void __sanitizer_syscall_pre_impl___nanosleep50(long long rqtp, long long rmtp); +void __sanitizer_syscall_post_impl___nanosleep50(long long res, long long rqtp, + long long rmtp); +void __sanitizer_syscall_pre_impl_____sigtimedwait50(long long set, + long long info, + long long timeout); +void __sanitizer_syscall_post_impl_____sigtimedwait50(long long res, + long long set, + long long info, + long long timeout); +void __sanitizer_syscall_pre_impl___mq_timedsend50(long long mqdes, + long long msg_ptr, + long long msg_len, + long long msg_prio, + long long abs_timeout); +void __sanitizer_syscall_post_impl___mq_timedsend50( + long long res, long long mqdes, long long msg_ptr, long long msg_len, + long long msg_prio, long long abs_timeout); +void __sanitizer_syscall_pre_impl___mq_timedreceive50(long long mqdes, + long long msg_ptr, + long long msg_len, + long long msg_prio, + long long abs_timeout); +void __sanitizer_syscall_post_impl___mq_timedreceive50( + long long res, long long mqdes, long long msg_ptr, long long msg_len, + long long msg_prio, long long abs_timeout); +void __sanitizer_syscall_pre_impl_compat_60__lwp_park(long long ts, + long long unpark, + long long hint, + long long unparkhint); +void __sanitizer_syscall_post_impl_compat_60__lwp_park(long long res, + long long ts, + long long unpark, + long long hint, + long long unparkhint); +void __sanitizer_syscall_pre_impl___kevent50(long long fd, long long changelist, + long long nchanges, + long long eventlist, + long long nevents, + long long timeout); +void __sanitizer_syscall_post_impl___kevent50( + long long res, long long fd, long long changelist, long long nchanges, + long long eventlist, long long nevents, long long timeout); +void __sanitizer_syscall_pre_impl___pselect50(long long nd, long long in, + long long ou, long long ex, + long long ts, long long mask); +void __sanitizer_syscall_post_impl___pselect50(long long res, long long nd, + long long in, long long ou, + long long ex, long long ts, + long long mask); +void __sanitizer_syscall_pre_impl___pollts50(long long fds, long long nfds, + long long ts, long long mask); +void __sanitizer_syscall_post_impl___pollts50(long long res, long long fds, + long long nfds, long long ts, + long long mask); +void __sanitizer_syscall_pre_impl___aio_suspend50(long long list, + long long nent, + long long timeout); +void __sanitizer_syscall_post_impl___aio_suspend50(long long res, + long long list, + long long nent, + long long timeout); +void __sanitizer_syscall_pre_impl___stat50(long long path, long long ub); +void __sanitizer_syscall_post_impl___stat50(long long res, long long path, + long long ub); +void __sanitizer_syscall_pre_impl___fstat50(long long fd, long long sb); +void __sanitizer_syscall_post_impl___fstat50(long long res, long long fd, + long long sb); +void __sanitizer_syscall_pre_impl___lstat50(long long path, long long ub); +void __sanitizer_syscall_post_impl___lstat50(long long res, long long path, + long long ub); +void __sanitizer_syscall_pre_impl_____semctl50(long long semid, + long long semnum, long long cmd, + long long arg); +void __sanitizer_syscall_post_impl_____semctl50(long long res, long long semid, + long long semnum, long long cmd, + long long arg); +void __sanitizer_syscall_pre_impl___shmctl50(long long shmid, long long cmd, + long long buf); +void __sanitizer_syscall_post_impl___shmctl50(long long res, long long shmid, + long long cmd, long long buf); +void __sanitizer_syscall_pre_impl___msgctl50(long long msqid, long long cmd, + long long buf); +void __sanitizer_syscall_post_impl___msgctl50(long long res, long long msqid, + long long cmd, long long buf); +void __sanitizer_syscall_pre_impl___getrusage50(long long who, + long long rusage); +void __sanitizer_syscall_post_impl___getrusage50(long long res, long long who, + long long rusage); +void __sanitizer_syscall_pre_impl___timer_settime50(long long timerid, + long long flags, + long long value, + long long ovalue); +void __sanitizer_syscall_post_impl___timer_settime50(long long res, + long long timerid, + long long flags, + long long value, + long long ovalue); +void __sanitizer_syscall_pre_impl___timer_gettime50(long long timerid, + long long value); +void __sanitizer_syscall_post_impl___timer_gettime50(long long res, + long long timerid, + long long value); +#if defined(NTP) || !defined(_KERNEL_OPT) +void __sanitizer_syscall_pre_impl___ntp_gettime50(long long ntvp); +void __sanitizer_syscall_post_impl___ntp_gettime50(long long res, + long long ntvp); +#else +/* syscall 448 has been skipped */ +#endif +void __sanitizer_syscall_pre_impl___wait450(long long pid, long long status, + long long options, + long long rusage); +void __sanitizer_syscall_post_impl___wait450(long long res, long long pid, + long long status, + long long options, + long long rusage); +void __sanitizer_syscall_pre_impl___mknod50(long long path, long long mode, + long long dev); +void __sanitizer_syscall_post_impl___mknod50(long long res, long long path, + long long mode, long long dev); +void __sanitizer_syscall_pre_impl___fhstat50(long long fhp, long long fh_size, + long long sb); +void __sanitizer_syscall_post_impl___fhstat50(long long res, long long fhp, + long long fh_size, long long sb); +/* syscall 452 has been skipped */ +void __sanitizer_syscall_pre_impl_pipe2(long long fildes, long long flags); +void __sanitizer_syscall_post_impl_pipe2(long long res, long long fildes, + long long flags); +void __sanitizer_syscall_pre_impl_dup3(long long from, long long to, + long long flags); +void __sanitizer_syscall_post_impl_dup3(long long res, long long from, + long long to, long long flags); +void __sanitizer_syscall_pre_impl_kqueue1(long long flags); +void __sanitizer_syscall_post_impl_kqueue1(long long res, long long flags); +void __sanitizer_syscall_pre_impl_paccept(long long s, long long name, + long long anamelen, long long mask, + long long flags); +void __sanitizer_syscall_post_impl_paccept(long long res, long long s, + long long name, long long anamelen, + long long mask, long long flags); +void __sanitizer_syscall_pre_impl_linkat(long long fd1, long long name1, + long long fd2, long long name2, + long long flags); +void __sanitizer_syscall_post_impl_linkat(long long res, long long fd1, + long long name1, long long fd2, + long long name2, long long flags); +void __sanitizer_syscall_pre_impl_renameat(long long fromfd, long long from, + long long tofd, long long to); +void __sanitizer_syscall_post_impl_renameat(long long res, long long fromfd, + long long from, long long tofd, + long long to); +void __sanitizer_syscall_pre_impl_mkfifoat(long long fd, long long path, + long long mode); +void __sanitizer_syscall_post_impl_mkfifoat(long long res, long long fd, + long long path, long long mode); +void __sanitizer_syscall_pre_impl_mknodat(long long fd, long long path, + long long mode, long long PAD, + long long dev); +void __sanitizer_syscall_post_impl_mknodat(long long res, long long fd, + long long path, long long mode, + long long PAD, long long dev); +void __sanitizer_syscall_pre_impl_mkdirat(long long fd, long long path, + long long mode); +void __sanitizer_syscall_post_impl_mkdirat(long long res, long long fd, + long long path, long long mode); +void __sanitizer_syscall_pre_impl_faccessat(long long fd, long long path, + long long amode, long long flag); +void __sanitizer_syscall_post_impl_faccessat(long long res, long long fd, + long long path, long long amode, + long long flag); +void __sanitizer_syscall_pre_impl_fchmodat(long long fd, long long path, + long long mode, long long flag); +void __sanitizer_syscall_post_impl_fchmodat(long long res, long long fd, + long long path, long long mode, + long long flag); +void __sanitizer_syscall_pre_impl_fchownat(long long fd, long long path, + long long owner, long long group, + long long flag); +void __sanitizer_syscall_post_impl_fchownat(long long res, long long fd, + long long path, long long owner, + long long group, long long flag); +void __sanitizer_syscall_pre_impl_fexecve(long long fd, long long argp, + long long envp); +void __sanitizer_syscall_post_impl_fexecve(long long res, long long fd, + long long argp, long long envp); +void __sanitizer_syscall_pre_impl_fstatat(long long fd, long long path, + long long buf, long long flag); +void __sanitizer_syscall_post_impl_fstatat(long long res, long long fd, + long long path, long long buf, + long long flag); +void __sanitizer_syscall_pre_impl_utimensat(long long fd, long long path, + long long tptr, long long flag); +void __sanitizer_syscall_post_impl_utimensat(long long res, long long fd, + long long path, long long tptr, + long long flag); +void __sanitizer_syscall_pre_impl_openat(long long fd, long long path, + long long oflags, long long mode); +void __sanitizer_syscall_post_impl_openat(long long res, long long fd, + long long path, long long oflags, + long long mode); +void __sanitizer_syscall_pre_impl_readlinkat(long long fd, long long path, + long long buf, long long bufsize); +void __sanitizer_syscall_post_impl_readlinkat(long long res, long long fd, + long long path, long long buf, + long long bufsize); +void __sanitizer_syscall_pre_impl_symlinkat(long long path1, long long fd, + long long path2); +void __sanitizer_syscall_post_impl_symlinkat(long long res, long long path1, + long long fd, long long path2); +void __sanitizer_syscall_pre_impl_unlinkat(long long fd, long long path, + long long flag); +void __sanitizer_syscall_post_impl_unlinkat(long long res, long long fd, + long long path, long long flag); +void __sanitizer_syscall_pre_impl_futimens(long long fd, long long tptr); +void __sanitizer_syscall_post_impl_futimens(long long res, long long fd, + long long tptr); +void __sanitizer_syscall_pre_impl___quotactl(long long path, long long args); +void __sanitizer_syscall_post_impl___quotactl(long long res, long long path, + long long args); +void __sanitizer_syscall_pre_impl_posix_spawn(long long pid, long long path, + long long file_actions, + long long attrp, long long argv, + long long envp); +void __sanitizer_syscall_post_impl_posix_spawn(long long res, long long pid, + long long path, + long long file_actions, + long long attrp, long long argv, + long long envp); +void __sanitizer_syscall_pre_impl_recvmmsg(long long s, long long mmsg, + long long vlen, long long flags, + long long timeout); +void __sanitizer_syscall_post_impl_recvmmsg(long long res, long long s, + long long mmsg, long long vlen, + long long flags, long long timeout); +void __sanitizer_syscall_pre_impl_sendmmsg(long long s, long long mmsg, + long long vlen, long long flags); +void __sanitizer_syscall_post_impl_sendmmsg(long long res, long long s, + long long mmsg, long long vlen, + long long flags); +void __sanitizer_syscall_pre_impl_clock_nanosleep(long long clock_id, + long long flags, + long long rqtp, + long long rmtp); +void __sanitizer_syscall_post_impl_clock_nanosleep(long long res, + long long clock_id, + long long flags, + long long rqtp, + long long rmtp); +void __sanitizer_syscall_pre_impl____lwp_park60(long long clock_id, + long long flags, long long ts, + long long unpark, + long long hint, + long long unparkhint); +void __sanitizer_syscall_post_impl____lwp_park60( + long long res, long long clock_id, long long flags, long long ts, + long long unpark, long long hint, long long unparkhint); +void __sanitizer_syscall_pre_impl_posix_fallocate(long long fd, long long PAD, + long long pos, long long len); +void __sanitizer_syscall_post_impl_posix_fallocate(long long res, long long fd, + long long PAD, long long pos, + long long len); +void __sanitizer_syscall_pre_impl_fdiscard(long long fd, long long PAD, + long long pos, long long len); +void __sanitizer_syscall_post_impl_fdiscard(long long res, long long fd, + long long PAD, long long pos, + long long len); +void __sanitizer_syscall_pre_impl_wait6(long long idtype, long long id, + long long status, long long options, + long long wru, long long info); +void __sanitizer_syscall_post_impl_wait6(long long res, long long idtype, + long long id, long long status, + long long options, long long wru, + long long info); +void __sanitizer_syscall_pre_impl_clock_getcpuclockid2(long long idtype, + long long id, + long long clock_id); +void __sanitizer_syscall_post_impl_clock_getcpuclockid2(long long res, + long long idtype, + long long id, + long long clock_id); +void __sanitizer_syscall_pre_impl___getvfsstat90(long long buf, + long long bufsize, + long long flags); +void __sanitizer_syscall_post_impl___getvfsstat90(long long res, long long buf, + long long bufsize, + long long flags); +void __sanitizer_syscall_pre_impl___statvfs190(long long path, long long buf, + long long flags); +void __sanitizer_syscall_post_impl___statvfs190(long long res, long long path, + long long buf, long long flags); +void __sanitizer_syscall_pre_impl___fstatvfs190(long long fd, long long buf, + long long flags); +void __sanitizer_syscall_post_impl___fstatvfs190(long long res, long long fd, + long long buf, + long long flags); +void __sanitizer_syscall_pre_impl___fhstatvfs190(long long fhp, + long long fh_size, + long long buf, + long long flags); +void __sanitizer_syscall_post_impl___fhstatvfs190(long long res, long long fhp, + long long fh_size, + long long buf, + long long flags); +void __sanitizer_syscall_pre_impl___acl_get_link(long long path, long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_get_link(long long res, long long path, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl___acl_set_link(long long path, long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_set_link(long long res, long long path, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl___acl_delete_link(long long path, + long long type); +void __sanitizer_syscall_post_impl___acl_delete_link(long long res, + long long path, + long long type); +void __sanitizer_syscall_pre_impl___acl_aclcheck_link(long long path, + long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_aclcheck_link(long long res, + long long path, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl___acl_get_file(long long path, long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_get_file(long long res, long long path, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl___acl_set_file(long long path, long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_set_file(long long res, long long path, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl___acl_get_fd(long long filedes, + long long type, long long aclp); +void __sanitizer_syscall_post_impl___acl_get_fd(long long res, + long long filedes, + long long type, long long aclp); +void __sanitizer_syscall_pre_impl___acl_set_fd(long long filedes, + long long type, long long aclp); +void __sanitizer_syscall_post_impl___acl_set_fd(long long res, + long long filedes, + long long type, long long aclp); +void __sanitizer_syscall_pre_impl___acl_delete_file(long long path, + long long type); +void __sanitizer_syscall_post_impl___acl_delete_file(long long res, + long long path, + long long type); +void __sanitizer_syscall_pre_impl___acl_delete_fd(long long filedes, + long long type); +void __sanitizer_syscall_post_impl___acl_delete_fd(long long res, + long long filedes, + long long type); +void __sanitizer_syscall_pre_impl___acl_aclcheck_file(long long path, + long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_aclcheck_file(long long res, + long long path, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl___acl_aclcheck_fd(long long filedes, + long long type, + long long aclp); +void __sanitizer_syscall_post_impl___acl_aclcheck_fd(long long res, + long long filedes, + long long type, + long long aclp); +void __sanitizer_syscall_pre_impl_lpathconf(long long path, long long name); +void __sanitizer_syscall_post_impl_lpathconf(long long res, long long path, + long long name); + +#ifdef __cplusplus +} // extern "C" +#endif + +// DO NOT EDIT! THIS FILE HAS BEEN GENERATED! + +#endif // SANITIZER_NETBSD_SYSCALL_HOOKS_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/scudo_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/scudo_interface.h new file mode 100644 index 0000000..dd522c1 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/scudo_interface.h @@ -0,0 +1,38 @@ +//===-- sanitizer/scudo_interface.h -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// Public Scudo interface header. +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_SCUDO_INTERFACE_H_ +#define SANITIZER_SCUDO_INTERFACE_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + // This function may be optionally provided by a user and should return + // a string containing Scudo runtime options. See scudo_flags.h for details. + const char* __scudo_default_options(void); + + // This function allows to set the RSS limit at runtime. This can be either + // the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit + // can be removed by setting LimitMb to 0. This function's parameters should + // be fully trusted to avoid security mishaps. + void __scudo_set_rss_limit(size_t LimitMb, int HardLimit); + + // This function outputs various allocator statistics for both the Primary + // and Secondary allocators, including memory usage, number of allocations + // and deallocations. + void __scudo_print_stats(void); +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_SCUDO_INTERFACE_H_ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/tsan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/tsan_interface.h new file mode 100644 index 0000000..565aa39 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/tsan_interface.h @@ -0,0 +1,176 @@ +//===-- tsan_interface.h ----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Public interface header for TSan. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_TSAN_INTERFACE_H +#define SANITIZER_TSAN_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// __tsan_release establishes a happens-before relation with a preceding +// __tsan_acquire on the same address. +void __tsan_acquire(void *addr); +void __tsan_release(void *addr); + +// Annotations for custom mutexes. +// The annotations allow to get better reports (with sets of locked mutexes), +// detect more types of bugs (e.g. mutex misuses, races between lock/unlock and +// destruction and potential deadlocks) and improve precision and performance +// (by ignoring individual atomic operations in mutex code). However, the +// downside is that annotated mutex code itself is not checked for correctness. + +// Mutex creation flags are passed to __tsan_mutex_create annotation. +// If mutex has no constructor and __tsan_mutex_create is not called, +// the flags may be passed to __tsan_mutex_pre_lock/__tsan_mutex_post_lock +// annotations. + +// Mutex has static storage duration and no-op constructor and destructor. +// This effectively makes tsan ignore destroy annotation. +static const unsigned __tsan_mutex_linker_init = 1 << 0; +// Mutex is write reentrant. +static const unsigned __tsan_mutex_write_reentrant = 1 << 1; +// Mutex is read reentrant. +static const unsigned __tsan_mutex_read_reentrant = 1 << 2; +// Mutex does not have static storage duration, and must not be used after +// its destructor runs. The opposite of __tsan_mutex_linker_init. +// If this flag is passed to __tsan_mutex_destroy, then the destruction +// is ignored unless this flag was previously set on the mutex. +static const unsigned __tsan_mutex_not_static = 1 << 8; + +// Mutex operation flags: + +// Denotes read lock operation. +static const unsigned __tsan_mutex_read_lock = 1 << 3; +// Denotes try lock operation. +static const unsigned __tsan_mutex_try_lock = 1 << 4; +// Denotes that a try lock operation has failed to acquire the mutex. +static const unsigned __tsan_mutex_try_lock_failed = 1 << 5; +// Denotes that the lock operation acquires multiple recursion levels. +// Number of levels is passed in recursion parameter. +// This is useful for annotation of e.g. Java builtin monitors, +// for which wait operation releases all recursive acquisitions of the mutex. +static const unsigned __tsan_mutex_recursive_lock = 1 << 6; +// Denotes that the unlock operation releases all recursion levels. +// Number of released levels is returned and later must be passed to +// the corresponding __tsan_mutex_post_lock annotation. +static const unsigned __tsan_mutex_recursive_unlock = 1 << 7; + +// Convenient composed constants. +static const unsigned __tsan_mutex_try_read_lock = + __tsan_mutex_read_lock | __tsan_mutex_try_lock; +static const unsigned __tsan_mutex_try_read_lock_failed = + __tsan_mutex_try_read_lock | __tsan_mutex_try_lock_failed; + +// Annotate creation of a mutex. +// Supported flags: mutex creation flags. +void __tsan_mutex_create(void *addr, unsigned flags); + +// Annotate destruction of a mutex. +// Supported flags: +// - __tsan_mutex_linker_init +// - __tsan_mutex_not_static +void __tsan_mutex_destroy(void *addr, unsigned flags); + +// Annotate start of lock operation. +// Supported flags: +// - __tsan_mutex_read_lock +// - __tsan_mutex_try_lock +// - all mutex creation flags +void __tsan_mutex_pre_lock(void *addr, unsigned flags); + +// Annotate end of lock operation. +// Supported flags: +// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_lock) +// - __tsan_mutex_try_lock (must match __tsan_mutex_pre_lock) +// - __tsan_mutex_try_lock_failed +// - __tsan_mutex_recursive_lock +// - all mutex creation flags +void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion); + +// Annotate start of unlock operation. +// Supported flags: +// - __tsan_mutex_read_lock +// - __tsan_mutex_recursive_unlock +int __tsan_mutex_pre_unlock(void *addr, unsigned flags); + +// Annotate end of unlock operation. +// Supported flags: +// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock) +void __tsan_mutex_post_unlock(void *addr, unsigned flags); + +// Annotate start/end of notify/signal/broadcast operation. +// Supported flags: none. +void __tsan_mutex_pre_signal(void *addr, unsigned flags); +void __tsan_mutex_post_signal(void *addr, unsigned flags); + +// Annotate start/end of a region of code where lock/unlock/signal operation +// diverts to do something else unrelated to the mutex. This can be used to +// annotate, for example, calls into cooperative scheduler or contention +// profiling code. +// These annotations must be called only from within +// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock, +// __tsan_mutex_pre/post_signal regions. +// Supported flags: none. +void __tsan_mutex_pre_divert(void *addr, unsigned flags); +void __tsan_mutex_post_divert(void *addr, unsigned flags); + +// External race detection API. +// Can be used by non-instrumented libraries to detect when their objects are +// being used in an unsafe manner. +// - __tsan_external_read/__tsan_external_write annotates the logical reads +// and writes of the object at the specified address. 'caller_pc' should +// be the PC of the library user, which the library can obtain with e.g. +// `__builtin_return_address(0)`. +// - __tsan_external_register_tag registers a 'tag' with the specified name, +// which is later used in read/write annotations to denote the object type +// - __tsan_external_assign_tag can optionally mark a heap object with a tag +void *__tsan_external_register_tag(const char *object_type); +void __tsan_external_register_header(void *tag, const char *header); +void __tsan_external_assign_tag(void *addr, void *tag); +void __tsan_external_read(void *addr, void *caller_pc, void *tag); +void __tsan_external_write(void *addr, void *caller_pc, void *tag); + +// Fiber switching API. +// - TSAN context for fiber can be created by __tsan_create_fiber +// and freed by __tsan_destroy_fiber. +// - TSAN context of current fiber or thread can be obtained +// by calling __tsan_get_current_fiber. +// - __tsan_switch_to_fiber should be called immediately before switch +// to fiber, such as call of swapcontext. +// - Fiber name can be set by __tsan_set_fiber_name. +void *__tsan_get_current_fiber(void); +void *__tsan_create_fiber(unsigned flags); +void __tsan_destroy_fiber(void *fiber); +void __tsan_switch_to_fiber(void *fiber, unsigned flags); +void __tsan_set_fiber_name(void *fiber, const char *name); + +// Flags for __tsan_switch_to_fiber: +// Do not establish a happens-before relation between fibers +static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0; + +// User-provided callback invoked on TSan initialization. +void __tsan_on_initialize(); + +// User-provided callback invoked on TSan shutdown. +// `failed` - Nonzero if TSan did detect issues, zero otherwise. +// Return `0` if TSan should exit as if no issues were detected. Return nonzero +// if TSan should exit as if issues were detected. +int __tsan_on_finalize(int failed); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_TSAN_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/tsan_interface_atomic.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/tsan_interface_atomic.h new file mode 100644 index 0000000..5e41e22 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/tsan_interface_atomic.h @@ -0,0 +1,221 @@ +//===-- tsan_interface_atomic.h ---------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Public interface header for TSan atomics. +//===----------------------------------------------------------------------===// +#ifndef TSAN_INTERFACE_ATOMIC_H +#define TSAN_INTERFACE_ATOMIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; +#if defined(__SIZEOF_INT128__) \ + || (__clang_major__ * 100 + __clang_minor__ >= 302) +__extension__ typedef __int128 __tsan_atomic128; +# define __TSAN_HAS_INT128 1 +#else +# define __TSAN_HAS_INT128 0 +#endif + +// Part of ABI, do not change. +// https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic +typedef enum { + __tsan_memory_order_relaxed, + __tsan_memory_order_consume, + __tsan_memory_order_acquire, + __tsan_memory_order_release, + __tsan_memory_order_acq_rel, + __tsan_memory_order_seq_cst +} __tsan_memory_order; + +__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, + __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, + __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, + __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, + __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, + __tsan_memory_order mo); +#endif + +void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, + __tsan_memory_order mo); +void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, + __tsan_memory_order mo); +void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, + __tsan_memory_order mo); +void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, + __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, + __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, + __tsan_atomic64 v, __tsan_memory_order mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, + __tsan_atomic128 v, __tsan_memory_order mo); +#endif + +int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, + __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, + __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, + __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, + __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +#if __TSAN_HAS_INT128 +int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, + __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +#endif + +int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, + __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, + __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, + __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, + __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +#if __TSAN_HAS_INT128 +int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, + __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); +#endif + +__tsan_atomic8 __tsan_atomic8_compare_exchange_val( + volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic16 __tsan_atomic16_compare_exchange_val( + volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic32 __tsan_atomic32_compare_exchange_val( + volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +__tsan_atomic64 __tsan_atomic64_compare_exchange_val( + volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +#if __TSAN_HAS_INT128 +__tsan_atomic128 __tsan_atomic128_compare_exchange_val( + volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, + __tsan_memory_order mo, __tsan_memory_order fail_mo); +#endif + +void __tsan_atomic_thread_fence(__tsan_memory_order mo); +void __tsan_atomic_signal_fence(__tsan_memory_order mo); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // TSAN_INTERFACE_ATOMIC_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/ubsan_interface.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/ubsan_interface.h new file mode 100644 index 0000000..59fc6c3 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sanitizer/ubsan_interface.h @@ -0,0 +1,32 @@ +//===-- sanitizer/ubsan_interface.h -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of UBSanitizer (UBSan). +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_UBSAN_INTERFACE_H +#define SANITIZER_UBSAN_INTERFACE_H + +#ifdef __cplusplus +extern "C" { +#endif +/// User-provided default option settings. +/// +/// You can provide your own implementation of this function to return a string +/// containing UBSan runtime options (for example, +/// verbosity=1:halt_on_error=0). +/// +/// \returns Default options string. +const char* __ubsan_default_options(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_UBSAN_INTERFACE_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/serializeintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/serializeintrin.h new file mode 100644 index 0000000..b774e5a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/serializeintrin.h @@ -0,0 +1,30 @@ +/*===--------------- serializeintrin.h - serialize intrinsics --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SERIALIZEINTRIN_H +#define __SERIALIZEINTRIN_H + +/// Serialize instruction fetch and execution. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SERIALIZE instruction. +/// +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("serialize"))) +_serialize (void) +{ + __builtin_ia32_serialize (); +} + +#endif /* __SERIALIZEINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sgxintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sgxintrin.h new file mode 100644 index 0000000..303a21f --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/sgxintrin.h @@ -0,0 +1,60 @@ +/*===---- sgxintrin.h - X86 SGX intrinsics configuration -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SGXINTRIN_H +#define __SGXINTRIN_H + +#if __has_extension(gnu_asm) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("sgx"))) + +static __inline unsigned int __DEFAULT_FN_ATTRS +_enclu_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("enclu" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +static __inline unsigned int __DEFAULT_FN_ATTRS +_encls_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("encls" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +static __inline unsigned int __DEFAULT_FN_ATTRS +_enclv_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("enclv" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __has_extension(gnu_asm) */ + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/shaintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/shaintrin.h new file mode 100644 index 0000000..08b1fb1d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/shaintrin.h @@ -0,0 +1,61 @@ +/*===---- shaintrin.h - SHA intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SHAINTRIN_H +#define __SHAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128))) + +#define _mm_sha1rnds4_epu32(V1, V2, M) \ + __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1nexte_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1msg1_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1msg2_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256msg1_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256msg2_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __SHAINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/smmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/smmintrin.h new file mode 100644 index 0000000..025830a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/smmintrin.h @@ -0,0 +1,2430 @@ +/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __SMMINTRIN_H +#define __SMMINTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128))) + +/* SSE4 Rounding macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _MM_FROUND_RAISE_EXC 0x00 +#define _MM_FROUND_NO_EXC 0x08 + +#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT) +#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF) +#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF) +#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO) +#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION) +#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION) + +/// Rounds up each element of the 128-bit vector of [4 x float] to an +/// integer and returns the rounded values in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_ceil_ps(__m128 X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float] values to be rounded up. +/// \returns A 128-bit vector of [4 x float] containing the rounded values. +#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL) + +/// Rounds up each element of the 128-bit vector of [2 x double] to an +/// integer and returns the rounded values in a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_ceil_pd(__m128d X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double] values to be rounded up. +/// \returns A 128-bit vector of [2 x double] containing the rounded values. +#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL) + +/// Copies three upper elements of the first 128-bit vector operand to +/// the corresponding three upper elements of the 128-bit result vector of +/// [4 x float]. Rounds up the lowest element of the second 128-bit vector +/// operand to an integer and copies it to the lowest element of the 128-bit +/// result vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_ceil_ss(__m128 X, __m128 Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is +/// rounded up to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [4 x float] containing the copied and rounded +/// values. +#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL) + +/// Copies the upper element of the first 128-bit vector operand to the +/// corresponding upper element of the 128-bit result vector of [2 x double]. +/// Rounds up the lower element of the second 128-bit vector operand to an +/// integer and copies it to the lower element of the 128-bit result vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_ceil_sd(__m128d X, __m128d Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is +/// rounded up to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [2 x double] containing the copied and rounded +/// values. +#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL) + +/// Rounds down each element of the 128-bit vector of [4 x float] to an +/// an integer and returns the rounded values in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_floor_ps(__m128 X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float] values to be rounded down. +/// \returns A 128-bit vector of [4 x float] containing the rounded values. +#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR) + +/// Rounds down each element of the 128-bit vector of [2 x double] to an +/// integer and returns the rounded values in a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_floor_pd(__m128d X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [2 x double] containing the rounded values. +#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR) + +/// Copies three upper elements of the first 128-bit vector operand to +/// the corresponding three upper elements of the 128-bit result vector of +/// [4 x float]. Rounds down the lowest element of the second 128-bit vector +/// operand to an integer and copies it to the lowest element of the 128-bit +/// result vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_floor_ss(__m128 X, __m128 Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is +/// rounded down to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [4 x float] containing the copied and rounded +/// values. +#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR) + +/// Copies the upper element of the first 128-bit vector operand to the +/// corresponding upper element of the 128-bit result vector of [2 x double]. +/// Rounds down the lower element of the second 128-bit vector operand to an +/// integer and copies it to the lower element of the 128-bit result vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_floor_sd(__m128d X, __m128d Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is +/// rounded down to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [2 x double] containing the copied and rounded +/// values. +#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) + +/// Rounds each element of the 128-bit vector of [4 x float] to an +/// integer value according to the rounding control specified by the second +/// argument and returns the rounded values in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_round_ps(__m128 X, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [4 x float] containing the rounded values. +#define _mm_round_ps(X, M) \ + (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)) + +/// Copies three upper elements of the first 128-bit vector operand to +/// the corresponding three upper elements of the 128-bit result vector of +/// [4 x float]. Rounds the lowest element of the second 128-bit vector +/// operand to an integer value according to the rounding control specified +/// by the third argument and copies it to the lowest element of the 128-bit +/// result vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_round_ss(__m128 X, __m128 Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is +/// rounded to the nearest integer using the specified rounding control and +/// copied to the corresponding bits of the result. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [4 x float] containing the copied and rounded +/// values. +#define _mm_round_ss(X, Y, M) \ + (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (M)) + +/// Rounds each element of the 128-bit vector of [2 x double] to an +/// integer value according to the rounding control specified by the second +/// argument and returns the rounded values in a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_round_pd(__m128d X, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [2 x double] containing the rounded values. +#define _mm_round_pd(X, M) \ + (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)) + +/// Copies the upper element of the first 128-bit vector operand to the +/// corresponding upper element of the 128-bit result vector of [2 x double]. +/// Rounds the lower element of the second 128-bit vector operand to an +/// integer value according to the rounding control specified by the third +/// argument and copies it to the lower element of the 128-bit result vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_round_sd(__m128d X, __m128d Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is +/// rounded to the nearest integer using the specified rounding control and +/// copied to the corresponding bits of the result. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [2 x double] containing the copied and rounded +/// values. +#define _mm_round_sd(X, Y, M) \ + (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (M)) + +/* SSE4 Packed Blending Intrinsics. */ +/// Returns a 128-bit vector of [2 x double] where the values are +/// selected from either the first or second operand as specified by the +/// third operand, the control mask. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_blend_pd(__m128d V1, __m128d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction. +/// +/// \param V1 +/// A 128-bit vector of [2 x double]. +/// \param V2 +/// A 128-bit vector of [2 x double]. +/// \param M +/// An immediate integer operand, with mask bits [1:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 64-bit +/// element in operand \a V1 is copied to the same position in the result. +/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2 +/// is copied to the same position in the result. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +#define _mm_blend_pd(V1, V2, M) \ + (__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \ + (__v2df)(__m128d)(V2), (int)(M)) + +/// Returns a 128-bit vector of [4 x float] where the values are selected +/// from either the first or second operand as specified by the third +/// operand, the control mask. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_blend_ps(__m128 V1, __m128 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPS / BLENDPS instruction. +/// +/// \param V1 +/// A 128-bit vector of [4 x float]. +/// \param V2 +/// A 128-bit vector of [4 x float]. +/// \param M +/// An immediate integer operand, with mask bits [3:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 32-bit +/// element in operand \a V1 is copied to the same position in the result. +/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2 +/// is copied to the same position in the result. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +#define _mm_blend_ps(V1, V2, M) \ + (__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \ + (__v4sf)(__m128)(V2), (int)(M)) + +/// Returns a 128-bit vector of [2 x double] where the values are +/// selected from either the first or second operand as specified by the +/// third operand, the control mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPD / BLENDVPD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [2 x double]. +/// \param __V2 +/// A 128-bit vector of [2 x double]. +/// \param __M +/// A 128-bit vector operand, with mask bits 127 and 63 specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 64-bit element in operand \a __V1 is copied to the same +/// position in the result. When a mask bit is 1, the corresponding 64-bit +/// element in operand \a __V2 is copied to the same position in the result. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M) +{ + return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2, + (__v2df)__M); +} + +/// Returns a 128-bit vector of [4 x float] where the values are +/// selected from either the first or second operand as specified by the +/// third operand, the control mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPS / BLENDVPS instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x float]. +/// \param __V2 +/// A 128-bit vector of [4 x float]. +/// \param __M +/// A 128-bit vector operand, with mask bits 127, 95, 63, and 31 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 32-bit element in operand \a __V1 is copied to the same +/// position in the result. When a mask bit is 1, the corresponding 32-bit +/// element in operand \a __V2 is copied to the same position in the result. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M) +{ + return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2, + (__v4sf)__M); +} + +/// Returns a 128-bit vector of [16 x i8] where the values are selected +/// from either of the first or second operand as specified by the third +/// operand, the control mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPBLENDVB / PBLENDVB instruction. +/// +/// \param __V1 +/// A 128-bit vector of [16 x i8]. +/// \param __V2 +/// A 128-bit vector of [16 x i8]. +/// \param __M +/// A 128-bit vector operand, with mask bits 127, 119, 111...7 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 8-bit element in operand \a __V1 is copied to the same +/// position in the result. When a mask bit is 1, the corresponding 8-bit +/// element in operand \a __V2 is copied to the same position in the result. +/// \returns A 128-bit vector of [16 x i8] containing the copied values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M) +{ + return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2, + (__v16qi)__M); +} + +/// Returns a 128-bit vector of [8 x i16] where the values are selected +/// from either of the first or second operand as specified by the third +/// operand, the control mask. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_blend_epi16(__m128i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPBLENDW / PBLENDW instruction. +/// +/// \param V1 +/// A 128-bit vector of [8 x i16]. +/// \param V2 +/// A 128-bit vector of [8 x i16]. +/// \param M +/// An immediate integer operand, with mask bits [7:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 16-bit +/// element in operand \a V1 is copied to the same position in the result. +/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2 +/// is copied to the same position in the result. +/// \returns A 128-bit vector of [8 x i16] containing the copied values. +#define _mm_blend_epi16(V1, V2, M) \ + (__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \ + (__v8hi)(__m128i)(V2), (int)(M)) + +/* SSE4 Dword Multiply Instructions. */ +/// Multiples corresponding elements of two 128-bit vectors of [4 x i32] +/// and returns the lower 32 bits of the each product in a 128-bit vector of +/// [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULLD / PMULLD instruction. +/// +/// \param __V1 +/// A 128-bit integer vector. +/// \param __V2 +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the products of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mullo_epi32 (__m128i __V1, __m128i __V2) +{ + return (__m128i) ((__v4su)__V1 * (__v4su)__V2); +} + +/// Multiplies corresponding even-indexed elements of two 128-bit +/// vectors of [4 x i32] and returns a 128-bit vector of [2 x i64] +/// containing the products. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULDQ / PMULDQ instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [2 x i64] containing the products of both +/// operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mul_epi32 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2); +} + +/* SSE4 Floating Point Dot Product Instructions. */ +/// Computes the dot product of the two 128-bit vectors of [4 x float] +/// and returns it in the elements of the 128-bit result vector of +/// [4 x float]. +/// +/// The immediate integer operand controls which input elements +/// will contribute to the dot product, and where the final results are +/// returned. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_dp_ps(__m128 X, __m128 Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPS / DPPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. +/// \param Y +/// A 128-bit vector of [4 x float]. +/// \param M +/// An immediate integer operand. Mask bits [7:4] determine which elements +/// of the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [7] corresponding to the highest element of each [4 x +/// float] vector. If a bit is set, the corresponding elements from the two +/// input vectors are used as an input for dot product; otherwise that input +/// is treated as zero. Bits [3:0] determine which elements of the result +/// will receive a copy of the final dot product, with bit [0] corresponding +/// to the lowest element and bit [3] corresponding to the highest element of +/// each [4 x float] subvector. If a bit is set, the dot product is returned +/// in the corresponding element; otherwise that element is set to zero. +/// \returns A 128-bit vector of [4 x float] containing the dot product. +#define _mm_dp_ps(X, Y, M) \ + (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (M)) + +/// Computes the dot product of the two 128-bit vectors of [2 x double] +/// and returns it in the elements of the 128-bit result vector of +/// [2 x double]. +/// +/// The immediate integer operand controls which input +/// elements will contribute to the dot product, and where the final results +/// are returned. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_dp_pd(__m128d X, __m128d Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPD / DPPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. +/// \param Y +/// A 128-bit vector of [2 x double]. +/// \param M +/// An immediate integer operand. Mask bits [5:4] determine which elements +/// of the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [5] corresponding to the highest element of each of [2 x +/// double] vector. If a bit is set, the corresponding elements from the two +/// input vectors are used as an input for dot product; otherwise that input +/// is treated as zero. Bits [1:0] determine which elements of the result +/// will receive a copy of the final dot product, with bit [0] corresponding +/// to the lowest element and bit [1] corresponding to the highest element of +/// each [2 x double] vector. If a bit is set, the dot product is returned in +/// the corresponding element; otherwise that element is set to zero. +#define _mm_dp_pd(X, Y, M) \ + (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (M)) + +/* SSE4 Streaming Load Hint Instruction. */ +/// Loads integer values from a 128-bit aligned memory location to a +/// 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTDQA / MOVNTDQA instruction. +/// +/// \param __V +/// A pointer to a 128-bit aligned memory location that contains the integer +/// values. +/// \returns A 128-bit integer vector containing the data stored at the +/// specified memory location. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_stream_load_si128 (__m128i const *__V) +{ + return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V); +} + +/* SSE4 Packed Integer Min/Max Instructions. */ +/// Compares the corresponding elements of two 128-bit vectors of +/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the lesser +/// of the two values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSB / PMINSB instruction. +/// +/// \param __V1 +/// A 128-bit vector of [16 x i8]. +/// \param __V2 +/// A 128-bit vector of [16 x i8] +/// \returns A 128-bit vector of [16 x i8] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epi8 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSB / PMAXSB instruction. +/// +/// \param __V1 +/// A 128-bit vector of [16 x i8]. +/// \param __V2 +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit vector of [16 x i8] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epi8 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the lesser +/// value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUW / PMINUW instruction. +/// +/// \param __V1 +/// A 128-bit vector of [8 x u16]. +/// \param __V2 +/// A 128-bit vector of [8 x u16]. +/// \returns A 128-bit vector of [8 x u16] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epu16 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUW / PMAXUW instruction. +/// +/// \param __V1 +/// A 128-bit vector of [8 x u16]. +/// \param __V2 +/// A 128-bit vector of [8 x u16]. +/// \returns A 128-bit vector of [8 x u16] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epu16 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the lesser +/// value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSD / PMINSD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epi32 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSD / PMAXSD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epi32 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the lesser +/// value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUD / PMINUD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x u32]. +/// \param __V2 +/// A 128-bit vector of [4 x u32]. +/// \returns A 128-bit vector of [4 x u32] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epu32 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUD / PMAXUD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x u32]. +/// \param __V2 +/// A 128-bit vector of [4 x u32]. +/// \returns A 128-bit vector of [4 x u32] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epu32 (__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2); +} + +/* SSE4 Insertion and Extraction from XMM Register Instructions. */ +/// Takes the first argument \a X and inserts an element from the second +/// argument \a Y as selected by the third argument \a N. That result then +/// has elements zeroed out also as selected by the third argument \a N. The +/// resulting 128-bit vector of [4 x float] is then returned. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_insert_ps(__m128 X, __m128 Y, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTPS instruction. +/// +/// \param X +/// A 128-bit vector source operand of [4 x float]. With the exception of +/// those bits in the result copied from parameter \a Y and zeroed by bits +/// [3:0] of \a N, all bits from this parameter are copied to the result. +/// \param Y +/// A 128-bit vector source operand of [4 x float]. One single-precision +/// floating-point element from this source, as determined by the immediate +/// parameter, is copied to the result. +/// \param N +/// Specifies which bits from operand \a Y will be copied, which bits in the +/// result they will be be copied to, and which bits in the result will be +/// cleared. The following assignments are made: \n +/// Bits [7:6] specify the bits to copy from operand \a Y: \n +/// 00: Selects bits [31:0] from operand \a Y. \n +/// 01: Selects bits [63:32] from operand \a Y. \n +/// 10: Selects bits [95:64] from operand \a Y. \n +/// 11: Selects bits [127:96] from operand \a Y. \n +/// Bits [5:4] specify the bits in the result to which the selected bits +/// from operand \a Y are copied: \n +/// 00: Copies the selected bits from \a Y to result bits [31:0]. \n +/// 01: Copies the selected bits from \a Y to result bits [63:32]. \n +/// 10: Copies the selected bits from \a Y to result bits [95:64]. \n +/// 11: Copies the selected bits from \a Y to result bits [127:96]. \n +/// Bits[3:0]: If any of these bits are set, the corresponding result +/// element is cleared. +/// \returns A 128-bit vector of [4 x float] containing the copied +/// single-precision floating point elements from the operands. +#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) + +/// Extracts a 32-bit integer from a 128-bit vector of [4 x float] and +/// returns it, using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_ps(__m128 X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTPS / EXTRACTPS +/// instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. +/// \param N +/// An immediate value. Bits [1:0] determines which bits from the argument +/// \a X are extracted and returned: \n +/// 00: Bits [31:0] of parameter \a X are returned. \n +/// 01: Bits [63:32] of parameter \a X are returned. \n +/// 10: Bits [95:64] of parameter \a X are returned. \n +/// 11: Bits [127:96] of parameter \a X are returned. +/// \returns A 32-bit integer containing the extracted 32 bits of float data. +#define _mm_extract_ps(X, N) (__extension__ \ + ({ union { int __i; float __f; } __t; \ + __t.__f = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \ + __t.__i;})) + +/* Miscellaneous insert and extract macros. */ +/* Extract a single-precision float from X at index N into D. */ +#define _MM_EXTRACT_FLOAT(D, X, N) \ + { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } + +/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create + an index suitable for _mm_insert_ps. */ +#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) + +/* Extract a float from X at index N into the first index of the return. */ +#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \ + _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) + +/* Insert int into packed integer array at index. */ +/// Constructs a 128-bit vector of [16 x i8] by first making a copy of +/// the 128-bit integer vector parameter, and then inserting the lower 8 bits +/// of an integer parameter \a I into an offset specified by the immediate +/// value parameter \a N. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi8(__m128i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRB / PINSRB instruction. +/// +/// \param X +/// A 128-bit integer vector of [16 x i8]. This vector is copied to the +/// result and then one of the sixteen elements in the result vector is +/// replaced by the lower 8 bits of \a I. +/// \param I +/// An integer. The lower 8 bits of this operand are written to the result +/// beginning at the offset specified by \a N. +/// \param N +/// An immediate value. Bits [3:0] specify the bit offset in the result at +/// which the lower 8 bits of \a I are written. \n +/// 0000: Bits [7:0] of the result are used for insertion. \n +/// 0001: Bits [15:8] of the result are used for insertion. \n +/// 0010: Bits [23:16] of the result are used for insertion. \n +/// 0011: Bits [31:24] of the result are used for insertion. \n +/// 0100: Bits [39:32] of the result are used for insertion. \n +/// 0101: Bits [47:40] of the result are used for insertion. \n +/// 0110: Bits [55:48] of the result are used for insertion. \n +/// 0111: Bits [63:56] of the result are used for insertion. \n +/// 1000: Bits [71:64] of the result are used for insertion. \n +/// 1001: Bits [79:72] of the result are used for insertion. \n +/// 1010: Bits [87:80] of the result are used for insertion. \n +/// 1011: Bits [95:88] of the result are used for insertion. \n +/// 1100: Bits [103:96] of the result are used for insertion. \n +/// 1101: Bits [111:104] of the result are used for insertion. \n +/// 1110: Bits [119:112] of the result are used for insertion. \n +/// 1111: Bits [127:120] of the result are used for insertion. +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi8(X, I, N) \ + (__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \ + (int)(I), (int)(N)) + +/// Constructs a 128-bit vector of [4 x i32] by first making a copy of +/// the 128-bit integer vector parameter, and then inserting the 32-bit +/// integer parameter \a I at the offset specified by the immediate value +/// parameter \a N. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi32(__m128i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRD / PINSRD instruction. +/// +/// \param X +/// A 128-bit integer vector of [4 x i32]. This vector is copied to the +/// result and then one of the four elements in the result vector is +/// replaced by \a I. +/// \param I +/// A 32-bit integer that is written to the result beginning at the offset +/// specified by \a N. +/// \param N +/// An immediate value. Bits [1:0] specify the bit offset in the result at +/// which the integer \a I is written. \n +/// 00: Bits [31:0] of the result are used for insertion. \n +/// 01: Bits [63:32] of the result are used for insertion. \n +/// 10: Bits [95:64] of the result are used for insertion. \n +/// 11: Bits [127:96] of the result are used for insertion. +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi32(X, I, N) \ + (__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \ + (int)(I), (int)(N)) + +#ifdef __x86_64__ +/// Constructs a 128-bit vector of [2 x i64] by first making a copy of +/// the 128-bit integer vector parameter, and then inserting the 64-bit +/// integer parameter \a I, using the immediate value parameter \a N as an +/// insertion location selector. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi64(__m128i X, long long I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRQ / PINSRQ instruction. +/// +/// \param X +/// A 128-bit integer vector of [2 x i64]. This vector is copied to the +/// result and then one of the two elements in the result vector is replaced +/// by \a I. +/// \param I +/// A 64-bit integer that is written to the result beginning at the offset +/// specified by \a N. +/// \param N +/// An immediate value. Bit [0] specifies the bit offset in the result at +/// which the integer \a I is written. \n +/// 0: Bits [63:0] of the result are used for insertion. \n +/// 1: Bits [127:64] of the result are used for insertion. \n +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi64(X, I, N) \ + (__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \ + (long long)(I), (int)(N)) +#endif /* __x86_64__ */ + +/* Extract int from packed integer array at index. This returns the element + * as a zero extended value, so it is unsigned. + */ +/// Extracts an 8-bit element from the 128-bit integer vector of +/// [16 x i8], using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_epi8(__m128i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRB / PEXTRB instruction. +/// +/// \param X +/// A 128-bit integer vector. +/// \param N +/// An immediate value. Bits [3:0] specify which 8-bit vector element from +/// the argument \a X to extract and copy to the result. \n +/// 0000: Bits [7:0] of parameter \a X are extracted. \n +/// 0001: Bits [15:8] of the parameter \a X are extracted. \n +/// 0010: Bits [23:16] of the parameter \a X are extracted. \n +/// 0011: Bits [31:24] of the parameter \a X are extracted. \n +/// 0100: Bits [39:32] of the parameter \a X are extracted. \n +/// 0101: Bits [47:40] of the parameter \a X are extracted. \n +/// 0110: Bits [55:48] of the parameter \a X are extracted. \n +/// 0111: Bits [63:56] of the parameter \a X are extracted. \n +/// 1000: Bits [71:64] of the parameter \a X are extracted. \n +/// 1001: Bits [79:72] of the parameter \a X are extracted. \n +/// 1010: Bits [87:80] of the parameter \a X are extracted. \n +/// 1011: Bits [95:88] of the parameter \a X are extracted. \n +/// 1100: Bits [103:96] of the parameter \a X are extracted. \n +/// 1101: Bits [111:104] of the parameter \a X are extracted. \n +/// 1110: Bits [119:112] of the parameter \a X are extracted. \n +/// 1111: Bits [127:120] of the parameter \a X are extracted. +/// \returns An unsigned integer, whose lower 8 bits are selected from the +/// 128-bit integer vector parameter and the remaining bits are assigned +/// zeros. +#define _mm_extract_epi8(X, N) \ + (int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \ + (int)(N)) + +/// Extracts a 32-bit element from the 128-bit integer vector of +/// [4 x i32], using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_epi32(__m128i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRD / PEXTRD instruction. +/// +/// \param X +/// A 128-bit integer vector. +/// \param N +/// An immediate value. Bits [1:0] specify which 32-bit vector element from +/// the argument \a X to extract and copy to the result. \n +/// 00: Bits [31:0] of the parameter \a X are extracted. \n +/// 01: Bits [63:32] of the parameter \a X are extracted. \n +/// 10: Bits [95:64] of the parameter \a X are extracted. \n +/// 11: Bits [127:96] of the parameter \a X are exracted. +/// \returns An integer, whose lower 32 bits are selected from the 128-bit +/// integer vector parameter and the remaining bits are assigned zeros. +#define _mm_extract_epi32(X, N) \ + (int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)) + +#ifdef __x86_64__ +/// Extracts a 64-bit element from the 128-bit integer vector of +/// [2 x i64], using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// long long _mm_extract_epi64(__m128i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRQ / PEXTRQ instruction. +/// +/// \param X +/// A 128-bit integer vector. +/// \param N +/// An immediate value. Bit [0] specifies which 64-bit vector element from +/// the argument \a X to return. \n +/// 0: Bits [63:0] are returned. \n +/// 1: Bits [127:64] are returned. \n +/// \returns A 64-bit integer. +#define _mm_extract_epi64(X, N) \ + (long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)) +#endif /* __x86_64 */ + +/* SSE4 128-bit Packed Integer Comparisons. */ +/// Tests whether the specified bits in a 128-bit integer vector are all +/// zeros. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param __M +/// A 128-bit integer vector containing the bits to be tested. +/// \param __V +/// A 128-bit integer vector selecting which bits to test in operand \a __M. +/// \returns TRUE if the specified bits are all zeros; FALSE otherwise. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_testz_si128(__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V); +} + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// ones. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param __M +/// A 128-bit integer vector containing the bits to be tested. +/// \param __V +/// A 128-bit integer vector selecting which bits to test in operand \a __M. +/// \returns TRUE if the specified bits are all ones; FALSE otherwise. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_testc_si128(__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V); +} + +/// Tests whether the specified bits in a 128-bit integer vector are +/// neither all zeros nor all ones. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param __M +/// A 128-bit integer vector containing the bits to be tested. +/// \param __V +/// A 128-bit integer vector selecting which bits to test in operand \a __M. +/// \returns TRUE if the specified bits are neither all zeros nor all ones; +/// FALSE otherwise. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_testnzc_si128(__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V); +} + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// ones. +/// +/// \headerfile +/// +/// \code +/// int _mm_test_all_ones(__m128i V); +/// \endcode +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param V +/// A 128-bit integer vector containing the bits to be tested. +/// \returns TRUE if the bits specified in the operand are all set to 1; FALSE +/// otherwise. +#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V))) + +/// Tests whether the specified bits in a 128-bit integer vector are +/// neither all zeros nor all ones. +/// +/// \headerfile +/// +/// \code +/// int _mm_test_mix_ones_zeros(__m128i M, __m128i V); +/// \endcode +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param M +/// A 128-bit integer vector containing the bits to be tested. +/// \param V +/// A 128-bit integer vector selecting which bits to test in operand \a M. +/// \returns TRUE if the specified bits are neither all zeros nor all ones; +/// FALSE otherwise. +#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V)) + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// zeros. +/// +/// \headerfile +/// +/// \code +/// int _mm_test_all_zeros(__m128i M, __m128i V); +/// \endcode +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param M +/// A 128-bit integer vector containing the bits to be tested. +/// \param V +/// A 128-bit integer vector selecting which bits to test in operand \a M. +/// \returns TRUE if the specified bits are all zeros; FALSE otherwise. +#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) + +/* SSE4 64-bit Packed Integer Comparisons. */ +/// Compares each of the corresponding 64-bit values of the 128-bit +/// integer vectors for equality. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQQ / PCMPEQQ instruction. +/// +/// \param __V1 +/// A 128-bit integer vector. +/// \param __V2 +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpeq_epi64(__m128i __V1, __m128i __V2) +{ + return (__m128i)((__v2di)__V1 == (__v2di)__V2); +} + +/* SSE4 Packed Integer Sign-Extension. */ +/// Sign-extends each of the lower eight 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a +/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXBW / PMOVSXBW instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign- +/// extended to 16-bit values. +/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi8_epi16(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi); +} + +/// Sign-extends each of the lower four 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a +/// 128-bit vector of [4 x i32]. The upper twelve elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXBD / PMOVSXBD instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are +/// sign-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi8_epi32(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si); +} + +/// Sign-extends each of the lower two 8-bit integer elements of a +/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXBQ / PMOVSXBQ instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are +/// sign-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi8_epi64(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di); +} + +/// Sign-extends each of the lower four 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in +/// a 128-bit vector of [4 x i32]. The upper four elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXWD / PMOVSXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are +/// sign-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi16_epi32(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si); +} + +/// Sign-extends each of the lower two 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper six elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXWQ / PMOVSXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are +/// sign-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi16_epi64(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di); +} + +/// Sign-extends each of the lower two 32-bit integer elements of a +/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXDQ / PMOVSXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are +/// sign-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi32_epi64(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di); +} + +/* SSE4 Packed Integer Zero-Extension. */ +/// Zero-extends each of the lower eight 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a +/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXBW / PMOVZXBW instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are +/// zero-extended to 16-bit values. +/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepu8_epi16(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi); +} + +/// Zero-extends each of the lower four 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a +/// 128-bit vector of [4 x i32]. The upper twelve elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXBD / PMOVZXBD instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are +/// zero-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepu8_epi32(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si); +} + +/// Zero-extends each of the lower two 8-bit integer elements of a +/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXBQ / PMOVZXBQ instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are +/// zero-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepu8_epi64(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di); +} + +/// Zero-extends each of the lower four 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in +/// a 128-bit vector of [4 x i32]. The upper four elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXWD / PMOVZXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are +/// zero-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepu16_epi32(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si); +} + +/// Zero-extends each of the lower two 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper six elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXWQ / PMOVZXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are +/// zero-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepu16_epi64(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di); +} + +/// Zero-extends each of the lower two 32-bit integer elements of a +/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXDQ / PMOVZXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are +/// zero-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepu32_epi64(__m128i __V) +{ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di); +} + +/* SSE4 Pack with Unsigned Saturation. */ +/// Converts 32-bit signed integers from both 128-bit integer vector +/// operands into 16-bit unsigned integers, and returns the packed result. +/// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than +/// 0x0000 are saturated to 0x0000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKUSDW / PACKUSDW instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a +/// signed integer and is converted to a 16-bit unsigned integer with +/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values +/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values +/// are written to the lower 64 bits of the result. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a +/// signed integer and is converted to a 16-bit unsigned integer with +/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values +/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values +/// are written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [8 x i16] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_packus_epi32(__m128i __V1, __m128i __V2) +{ + return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2); +} + +/* SSE4 Multiple Packed Sums of Absolute Difference. */ +/// Subtracts 8-bit unsigned integer values and computes the absolute +/// values of the differences to the corresponding bits in the destination. +/// Then sums of the absolute differences are returned according to the bit +/// fields in the immediate operand. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mpsadbw_epu8(__m128i X, __m128i Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VMPSADBW / MPSADBW instruction. +/// +/// \param X +/// A 128-bit vector of [16 x i8]. +/// \param Y +/// A 128-bit vector of [16 x i8]. +/// \param M +/// An 8-bit immediate operand specifying how the absolute differences are to +/// be calculated, according to the following algorithm: +/// \code +/// // M2 represents bit 2 of the immediate operand +/// // M10 represents bits [1:0] of the immediate operand +/// i = M2 * 4; +/// j = M10 * 4; +/// for (k = 0; k < 8; k = k + 1) { +/// d0 = abs(X[i + k + 0] - Y[j + 0]); +/// d1 = abs(X[i + k + 1] - Y[j + 1]); +/// d2 = abs(X[i + k + 2] - Y[j + 2]); +/// d3 = abs(X[i + k + 3] - Y[j + 3]); +/// r[k] = d0 + d1 + d2 + d3; +/// } +/// \endcode +/// \returns A 128-bit integer vector containing the sums of the sets of +/// absolute differences between both operands. +#define _mm_mpsadbw_epu8(X, Y, M) \ + (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (M)) + +/// Finds the minimum unsigned 16-bit element in the input 128-bit +/// vector of [8 x u16] and returns it and along with its index. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPHMINPOSUW / PHMINPOSUW +/// instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x u16]. +/// \returns A 128-bit value where bits [15:0] contain the minimum value found +/// in parameter \a __V, bits [18:16] contain the index of the minimum value +/// and the remaining bits are set to 0. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_minpos_epu16(__m128i __V) +{ + return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V); +} + +/* Handle the sse4.2 definitions here. */ + +/* These definitions are normally in nmmintrin.h, but gcc puts them in here + so we'll do the same. */ + +#undef __DEFAULT_FN_ATTRS +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2"))) + +/* These specify the type of data that we're comparing. */ +#define _SIDD_UBYTE_OPS 0x00 +#define _SIDD_UWORD_OPS 0x01 +#define _SIDD_SBYTE_OPS 0x02 +#define _SIDD_SWORD_OPS 0x03 + +/* These specify the type of comparison operation. */ +#define _SIDD_CMP_EQUAL_ANY 0x00 +#define _SIDD_CMP_RANGES 0x04 +#define _SIDD_CMP_EQUAL_EACH 0x08 +#define _SIDD_CMP_EQUAL_ORDERED 0x0c + +/* These macros specify the polarity of the operation. */ +#define _SIDD_POSITIVE_POLARITY 0x00 +#define _SIDD_NEGATIVE_POLARITY 0x10 +#define _SIDD_MASKED_POSITIVE_POLARITY 0x20 +#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 + +/* These macros are used in _mm_cmpXstri() to specify the return. */ +#define _SIDD_LEAST_SIGNIFICANT 0x00 +#define _SIDD_MOST_SIGNIFICANT 0x40 + +/* These macros are used in _mm_cmpXstri() to specify the return. */ +#define _SIDD_BIT_MASK 0x00 +#define _SIDD_UNIT_MASK 0x40 + +/* SSE4.2 Packed Comparison Intrinsics. */ +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns a 128-bit integer vector representing the result +/// mask of the comparison. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cmpistrm(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRM / PCMPISTRM +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the result is zero-extended or expanded to 16 +/// bytes. \n +/// 0: The result is zero-extended to 16 bytes. \n +/// 1: The result is expanded to 16 bytes (this expansion is performed by +/// repeating each bit 8 or 16 times). +/// \returns Returns a 128-bit integer vector representing the result mask of +/// the comparison. +#define _mm_cmpistrm(A, B, M) \ + (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns an integer representing the result index of the +/// comparison. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistri(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the index of the lowest set bit or the +/// highest set bit is returned. \n +/// 0: The index of the least significant set bit. \n +/// 1: The index of the most significant set bit. \n +/// \returns Returns an integer representing the result index of the comparison. +#define _mm_cmpistri(A, B, M) \ + (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns a 128-bit integer vector representing the result +/// mask of the comparison. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cmpestrm(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRM / PCMPESTRM +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the result is zero-extended or expanded to 16 +/// bytes. \n +/// 0: The result is zero-extended to 16 bytes. \n +/// 1: The result is expanded to 16 bytes (this expansion is performed by +/// repeating each bit 8 or 16 times). \n +/// \returns Returns a 128-bit integer vector representing the result mask of +/// the comparison. +#define _mm_cmpestrm(A, LA, B, LB, M) \ + (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns an integer representing the result index of the +/// comparison. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestri(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the index of the lowest set bit or the +/// highest set bit is returned. \n +/// 0: The index of the least significant set bit. \n +/// 1: The index of the most significant set bit. \n +/// \returns Returns an integer representing the result index of the comparison. +#define _mm_cmpestri(A, LA, B, LB, M) \ + (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */ +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the bit mask is zero and the length of the +/// string in \a B is the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistra(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the bit mask is zero and the length of the string in +/// \a B is the maximum; otherwise, returns 0. +#define _mm_cmpistra(A, B, M) \ + (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the bit mask is non-zero, otherwise, returns +/// 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistrc(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0. +#define _mm_cmpistrc(A, B, M) \ + (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns bit 0 of the resulting bit mask. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistro(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns bit 0 of the resulting bit mask. +#define _mm_cmpistro(A, B, M) \ + (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a A is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistrs(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the length of the string in \a A is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpistrs(A, B, M) \ + (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a B is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistrz(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the length of the string in \a B is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpistrz(A, B, M) \ + (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the bit mask is zero and the length of the +/// string in \a B is the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestra(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the bit mask is zero and the length of the string in +/// \a B is the maximum, otherwise, returns 0. +#define _mm_cmpestra(A, LA, B, LB, M) \ + (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the resulting mask is non-zero, otherwise, +/// returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestrc(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0. +#define _mm_cmpestrc(A, LA, B, LB, M) \ + (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns bit 0 of the resulting bit mask. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestro(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns bit 0 of the resulting bit mask. +#define _mm_cmpestro(A, LA, B, LB, M) \ + (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a A is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestrs(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement in the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the length of the string in \a A is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpestrs(A, LA, B, LB, M) \ + (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a B is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestrz(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the length of the string in \a B is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpestrz(A, LA, B, LB, M) \ + (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + +/* SSE4.2 Compare Packed Data -- Greater Than. */ +/// Compares each of the corresponding 64-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are +/// greater than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTQ / PCMPGTQ instruction. +/// +/// \param __V1 +/// A 128-bit integer vector. +/// \param __V2 +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmpgt_epi64(__m128i __V1, __m128i __V2) +{ + return (__m128i)((__v2di)__V1 > (__v2di)__V2); +} + +/* SSE4.2 Accumulate CRC32. */ +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned char operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32B instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_crc32_u8(unsigned int __C, unsigned char __D) +{ + return __builtin_ia32_crc32qi(__C, __D); +} + +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned short operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32W instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_crc32_u16(unsigned int __C, unsigned short __D) +{ + return __builtin_ia32_crc32hi(__C, __D); +} + +/// Adds the first unsigned integer operand to the CRC-32C checksum of +/// the second unsigned integer operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32L instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_crc32_u32(unsigned int __C, unsigned int __D) +{ + return __builtin_ia32_crc32si(__C, __D); +} + +#ifdef __x86_64__ +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned 64-bit integer operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32Q instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm_crc32_u64(unsigned long long __C, unsigned long long __D) +{ + return __builtin_ia32_crc32di(__C, __D); +} +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#include + +#endif /* __SMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdalign.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdalign.h new file mode 100644 index 0000000..6ad25db --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdalign.h @@ -0,0 +1,21 @@ +/*===---- stdalign.h - Standard header for alignment ------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDALIGN_H +#define __STDALIGN_H + +#ifndef __cplusplus +#define alignas _Alignas +#define alignof _Alignof +#endif + +#define __alignas_is_defined 1 +#define __alignof_is_defined 1 + +#endif /* __STDALIGN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdarg.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdarg.h new file mode 100644 index 0000000..0bc3940 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdarg.h @@ -0,0 +1,35 @@ +/*===---- stdarg.h - Variable argument handling ----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDARG_H +#define __STDARG_H + +#ifndef _VA_LIST +typedef __builtin_va_list va_list; +#define _VA_LIST +#endif +#define va_start(ap, param) __builtin_va_start(ap, param) +#define va_end(ap) __builtin_va_end(ap) +#define va_arg(ap, type) __builtin_va_arg(ap, type) + +/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode + * or -ansi is not specified, since it was not part of C90. + */ +#define __va_copy(d,s) __builtin_va_copy(d,s) + +#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__) +#define va_copy(dest, src) __builtin_va_copy(dest, src) +#endif + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST 1 +typedef __builtin_va_list __gnuc_va_list; +#endif + +#endif /* __STDARG_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdatomic.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdatomic.h new file mode 100644 index 0000000..665551e --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdatomic.h @@ -0,0 +1,176 @@ +/*===---- stdatomic.h - Standard header for atomic types and operations -----=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H + +/* If we're hosted, fall back to the system's stdatomic.h. FreeBSD, for + * example, already has a Clang-compatible stdatomic.h header. + */ +#if __STDC_HOSTED__ && __has_include_next() +# include_next +#else + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* 7.17.1 Introduction */ + +#define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE +#define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE +#define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE +#define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE +#define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE + +/* 7.17.2 Initialization */ + +#define ATOMIC_VAR_INIT(value) (value) +#define atomic_init __c11_atomic_init + +/* 7.17.3 Order and consistency */ + +typedef enum memory_order { + memory_order_relaxed = __ATOMIC_RELAXED, + memory_order_consume = __ATOMIC_CONSUME, + memory_order_acquire = __ATOMIC_ACQUIRE, + memory_order_release = __ATOMIC_RELEASE, + memory_order_acq_rel = __ATOMIC_ACQ_REL, + memory_order_seq_cst = __ATOMIC_SEQ_CST +} memory_order; + +#define kill_dependency(y) (y) + +/* 7.17.4 Fences */ + +/* These should be provided by the libc implementation. */ +void atomic_thread_fence(memory_order); +void atomic_signal_fence(memory_order); + +#define atomic_thread_fence(order) __c11_atomic_thread_fence(order) +#define atomic_signal_fence(order) __c11_atomic_signal_fence(order) + +/* 7.17.5 Lock-free property */ + +#define atomic_is_lock_free(obj) __c11_atomic_is_lock_free(sizeof(*(obj))) + +/* 7.17.6 Atomic integer types */ + +#ifdef __cplusplus +typedef _Atomic(bool) atomic_bool; +#else +typedef _Atomic(_Bool) atomic_bool; +#endif +typedef _Atomic(char) atomic_char; +typedef _Atomic(signed char) atomic_schar; +typedef _Atomic(unsigned char) atomic_uchar; +typedef _Atomic(short) atomic_short; +typedef _Atomic(unsigned short) atomic_ushort; +typedef _Atomic(int) atomic_int; +typedef _Atomic(unsigned int) atomic_uint; +typedef _Atomic(long) atomic_long; +typedef _Atomic(unsigned long) atomic_ulong; +typedef _Atomic(long long) atomic_llong; +typedef _Atomic(unsigned long long) atomic_ullong; +typedef _Atomic(uint_least16_t) atomic_char16_t; +typedef _Atomic(uint_least32_t) atomic_char32_t; +typedef _Atomic(wchar_t) atomic_wchar_t; +typedef _Atomic(int_least8_t) atomic_int_least8_t; +typedef _Atomic(uint_least8_t) atomic_uint_least8_t; +typedef _Atomic(int_least16_t) atomic_int_least16_t; +typedef _Atomic(uint_least16_t) atomic_uint_least16_t; +typedef _Atomic(int_least32_t) atomic_int_least32_t; +typedef _Atomic(uint_least32_t) atomic_uint_least32_t; +typedef _Atomic(int_least64_t) atomic_int_least64_t; +typedef _Atomic(uint_least64_t) atomic_uint_least64_t; +typedef _Atomic(int_fast8_t) atomic_int_fast8_t; +typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t; +typedef _Atomic(int_fast16_t) atomic_int_fast16_t; +typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t; +typedef _Atomic(int_fast32_t) atomic_int_fast32_t; +typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t; +typedef _Atomic(int_fast64_t) atomic_int_fast64_t; +typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t; +typedef _Atomic(intptr_t) atomic_intptr_t; +typedef _Atomic(uintptr_t) atomic_uintptr_t; +typedef _Atomic(size_t) atomic_size_t; +typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t; +typedef _Atomic(intmax_t) atomic_intmax_t; +typedef _Atomic(uintmax_t) atomic_uintmax_t; + +/* 7.17.7 Operations on atomic types */ + +#define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST) +#define atomic_store_explicit __c11_atomic_store + +#define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST) +#define atomic_load_explicit __c11_atomic_load + +#define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST) +#define atomic_exchange_explicit __c11_atomic_exchange + +#define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) +#define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong + +#define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) +#define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak + +#define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST) +#define atomic_fetch_add_explicit __c11_atomic_fetch_add + +#define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_explicit __c11_atomic_fetch_sub + +#define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST) +#define atomic_fetch_or_explicit __c11_atomic_fetch_or + +#define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_explicit __c11_atomic_fetch_xor + +#define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST) +#define atomic_fetch_and_explicit __c11_atomic_fetch_and + +/* 7.17.8 Atomic flag type and operations */ + +typedef struct atomic_flag { atomic_bool _Value; } atomic_flag; + +#define ATOMIC_FLAG_INIT { 0 } + +/* These should be provided by the libc implementation. */ +#ifdef __cplusplus +bool atomic_flag_test_and_set(volatile atomic_flag *); +bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order); +#else +_Bool atomic_flag_test_and_set(volatile atomic_flag *); +_Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order); +#endif +void atomic_flag_clear(volatile atomic_flag *); +void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order); + +#define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST) +#define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order) + +#define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST) +#define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order) + +#ifdef __cplusplus +} +#endif + +#endif /* __STDC_HOSTED__ */ +#endif /* __CLANG_STDATOMIC_H */ + diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdbool.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdbool.h new file mode 100644 index 0000000..2525363 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdbool.h @@ -0,0 +1,31 @@ +/*===---- stdbool.h - Standard header for booleans -------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDBOOL_H +#define __STDBOOL_H + +/* Don't define bool, true, and false in C++, except as a GNU extension. */ +#ifndef __cplusplus +#define bool _Bool +#define true 1 +#define false 0 +#elif defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* Define _Bool as a GNU extension. */ +#define _Bool bool +#if __cplusplus < 201103L +/* For C++98, define bool, false, true as a GNU extension. */ +#define bool bool +#define false false +#define true true +#endif +#endif + +#define __bool_true_false_are_defined 1 + +#endif /* __STDBOOL_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stddef.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stddef.h new file mode 100644 index 0000000..15acd44 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stddef.h @@ -0,0 +1,121 @@ +/*===---- stddef.h - Basic type definitions --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined(__STDDEF_H) || defined(__need_ptrdiff_t) || \ + defined(__need_size_t) || defined(__need_wchar_t) || \ + defined(__need_NULL) || defined(__need_wint_t) + +#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \ + !defined(__need_wchar_t) && !defined(__need_NULL) && \ + !defined(__need_wint_t) +/* Always define miscellaneous pieces when modules are available. */ +#if !__has_feature(modules) +#define __STDDEF_H +#endif +#define __need_ptrdiff_t +#define __need_size_t +#define __need_wchar_t +#define __need_NULL +#define __need_STDDEF_H_misc +/* __need_wint_t is intentionally not defined here. */ +#endif + +#if defined(__need_ptrdiff_t) +#if !defined(_PTRDIFF_T) || __has_feature(modules) +/* Always define ptrdiff_t when modules are available. */ +#if !__has_feature(modules) +#define _PTRDIFF_T +#endif +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#endif +#undef __need_ptrdiff_t +#endif /* defined(__need_ptrdiff_t) */ + +#if defined(__need_size_t) +#if !defined(_SIZE_T) || __has_feature(modules) +/* Always define size_t when modules are available. */ +#if !__has_feature(modules) +#define _SIZE_T +#endif +typedef __SIZE_TYPE__ size_t; +#endif +#undef __need_size_t +#endif /*defined(__need_size_t) */ + +#if defined(__need_STDDEF_H_misc) +/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is + * enabled. */ +#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \ + !defined(_RSIZE_T)) || __has_feature(modules) +/* Always define rsize_t when modules are available. */ +#if !__has_feature(modules) +#define _RSIZE_T +#endif +typedef __SIZE_TYPE__ rsize_t; +#endif +#endif /* defined(__need_STDDEF_H_misc) */ + +#if defined(__need_wchar_t) +#ifndef __cplusplus +/* Always define wchar_t when modules are available. */ +#if !defined(_WCHAR_T) || __has_feature(modules) +#if !__has_feature(modules) +#define _WCHAR_T +#if defined(_MSC_EXTENSIONS) +#define _WCHAR_T_DEFINED +#endif +#endif +typedef __WCHAR_TYPE__ wchar_t; +#endif +#endif +#undef __need_wchar_t +#endif /* defined(__need_wchar_t) */ + +#if defined(__need_NULL) +#undef NULL +#ifdef __cplusplus +# if !defined(__MINGW32__) && !defined(_MSC_VER) +# define NULL __null +# else +# define NULL 0 +# endif +#else +# define NULL ((void*)0) +#endif +#ifdef __cplusplus +#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED) +namespace std { typedef decltype(nullptr) nullptr_t; } +using ::std::nullptr_t; +#endif +#endif +#undef __need_NULL +#endif /* defined(__need_NULL) */ + +#if defined(__need_STDDEF_H_misc) +#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L +#include "__stddef_max_align_t.h" +#endif +#define offsetof(t, d) __builtin_offsetof(t, d) +#undef __need_STDDEF_H_misc +#endif /* defined(__need_STDDEF_H_misc) */ + +/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use +__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */ +#if defined(__need_wint_t) +/* Always define wint_t when modules are available. */ +#if !defined(_WINT_T) || __has_feature(modules) +#if !__has_feature(modules) +#define _WINT_T +#endif +typedef __WINT_TYPE__ wint_t; +#endif +#undef __need_wint_t +#endif /* __need_wint_t */ + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdint.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdint.h new file mode 100644 index 0000000..192f653 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdint.h @@ -0,0 +1,693 @@ +/*===---- stdint.h - Standard header for sized integer types --------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __CLANG_STDINT_H +// AIX system headers need stdint.h to be re-enterable while _STD_TYPES_T +// is defined until an inclusion of it without _STD_TYPES_T occurs, in which +// case the header guard macro is defined. +#if !defined(_AIX) || !defined(_STD_TYPES_T) || !defined(__STDC_HOSTED__) +#define __CLANG_STDINT_H +#endif + +/* If we're hosted, fall back to the system's stdint.h, which might have + * additional definitions. + */ +#if __STDC_HOSTED__ && __has_include_next() + +// C99 7.18.3 Limits of other integer types +// +// Footnote 219, 220: C++ implementations should define these macros only when +// __STDC_LIMIT_MACROS is defined before is included. +// +// Footnote 222: C++ implementations should define these macros only when +// __STDC_CONSTANT_MACROS is defined before is included. +// +// C++11 [cstdint.syn]p2: +// +// The macros defined by are provided unconditionally. In particular, +// the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in +// footnotes 219, 220, and 222 in the C standard) play no role in C++. +// +// C11 removed the problematic footnotes. +// +// Work around this inconsistency by always defining those macros in C++ mode, +// so that a C library implementation which follows the C99 standard can be +// used in C++. +# ifdef __cplusplus +# if !defined(__STDC_LIMIT_MACROS) +# define __STDC_LIMIT_MACROS +# define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG +# endif +# if !defined(__STDC_CONSTANT_MACROS) +# define __STDC_CONSTANT_MACROS +# define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG +# endif +# endif + +# include_next + +# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG +# undef __STDC_LIMIT_MACROS +# undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG +# endif +# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG +# undef __STDC_CONSTANT_MACROS +# undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG +# endif + +#else + +/* C99 7.18.1.1 Exact-width integer types. + * C99 7.18.1.2 Minimum-width integer types. + * C99 7.18.1.3 Fastest minimum-width integer types. + * + * The standard requires that exact-width type be defined for 8-, 16-, 32-, and + * 64-bit types if they are implemented. Other exact width types are optional. + * This implementation defines an exact-width types for every integer width + * that is represented in the standard integer types. + * + * The standard also requires minimum-width types be defined for 8-, 16-, 32-, + * and 64-bit widths regardless of whether there are corresponding exact-width + * types. + * + * To accommodate targets that are missing types that are exactly 8, 16, 32, or + * 64 bits wide, this implementation takes an approach of cascading + * redefinitions, redefining __int_leastN_t to successively smaller exact-width + * types. It is therefore important that the types are defined in order of + * descending widths. + * + * We currently assume that the minimum-width types and the fastest + * minimum-width types are the same. This is allowed by the standard, but is + * suboptimal. + * + * In violation of the standard, some targets do not implement a type that is + * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit). + * To accommodate these targets, a required minimum-width type is only + * defined if there exists an exact-width type of equal or greater width. + */ + +#ifdef __INT64_TYPE__ +# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/ +typedef __INT64_TYPE__ int64_t; +# endif /* __int8_t_defined */ +typedef __UINT64_TYPE__ uint64_t; +# define __int_least64_t int64_t +# define __uint_least64_t uint64_t +# define __int_least32_t int64_t +# define __uint_least32_t uint64_t +# define __int_least16_t int64_t +# define __uint_least16_t uint64_t +# define __int_least8_t int64_t +# define __uint_least8_t uint64_t +#endif /* __INT64_TYPE__ */ + +#ifdef __int_least64_t +typedef __int_least64_t int_least64_t; +typedef __uint_least64_t uint_least64_t; +typedef __int_least64_t int_fast64_t; +typedef __uint_least64_t uint_fast64_t; +#endif /* __int_least64_t */ + +#ifdef __INT56_TYPE__ +typedef __INT56_TYPE__ int56_t; +typedef __UINT56_TYPE__ uint56_t; +typedef int56_t int_least56_t; +typedef uint56_t uint_least56_t; +typedef int56_t int_fast56_t; +typedef uint56_t uint_fast56_t; +# define __int_least32_t int56_t +# define __uint_least32_t uint56_t +# define __int_least16_t int56_t +# define __uint_least16_t uint56_t +# define __int_least8_t int56_t +# define __uint_least8_t uint56_t +#endif /* __INT56_TYPE__ */ + + +#ifdef __INT48_TYPE__ +typedef __INT48_TYPE__ int48_t; +typedef __UINT48_TYPE__ uint48_t; +typedef int48_t int_least48_t; +typedef uint48_t uint_least48_t; +typedef int48_t int_fast48_t; +typedef uint48_t uint_fast48_t; +# define __int_least32_t int48_t +# define __uint_least32_t uint48_t +# define __int_least16_t int48_t +# define __uint_least16_t uint48_t +# define __int_least8_t int48_t +# define __uint_least8_t uint48_t +#endif /* __INT48_TYPE__ */ + + +#ifdef __INT40_TYPE__ +typedef __INT40_TYPE__ int40_t; +typedef __UINT40_TYPE__ uint40_t; +typedef int40_t int_least40_t; +typedef uint40_t uint_least40_t; +typedef int40_t int_fast40_t; +typedef uint40_t uint_fast40_t; +# define __int_least32_t int40_t +# define __uint_least32_t uint40_t +# define __int_least16_t int40_t +# define __uint_least16_t uint40_t +# define __int_least8_t int40_t +# define __uint_least8_t uint40_t +#endif /* __INT40_TYPE__ */ + + +#ifdef __INT32_TYPE__ + +# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/ +typedef __INT32_TYPE__ int32_t; +# endif /* __int8_t_defined */ + +# ifndef __uint32_t_defined /* more glibc compatibility */ +# define __uint32_t_defined +typedef __UINT32_TYPE__ uint32_t; +# endif /* __uint32_t_defined */ + +# define __int_least32_t int32_t +# define __uint_least32_t uint32_t +# define __int_least16_t int32_t +# define __uint_least16_t uint32_t +# define __int_least8_t int32_t +# define __uint_least8_t uint32_t +#endif /* __INT32_TYPE__ */ + +#ifdef __int_least32_t +typedef __int_least32_t int_least32_t; +typedef __uint_least32_t uint_least32_t; +typedef __int_least32_t int_fast32_t; +typedef __uint_least32_t uint_fast32_t; +#endif /* __int_least32_t */ + +#ifdef __INT24_TYPE__ +typedef __INT24_TYPE__ int24_t; +typedef __UINT24_TYPE__ uint24_t; +typedef int24_t int_least24_t; +typedef uint24_t uint_least24_t; +typedef int24_t int_fast24_t; +typedef uint24_t uint_fast24_t; +# define __int_least16_t int24_t +# define __uint_least16_t uint24_t +# define __int_least8_t int24_t +# define __uint_least8_t uint24_t +#endif /* __INT24_TYPE__ */ + +#ifdef __INT16_TYPE__ +#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/ +typedef __INT16_TYPE__ int16_t; +#endif /* __int8_t_defined */ +typedef __UINT16_TYPE__ uint16_t; +# define __int_least16_t int16_t +# define __uint_least16_t uint16_t +# define __int_least8_t int16_t +# define __uint_least8_t uint16_t +#endif /* __INT16_TYPE__ */ + +#ifdef __int_least16_t +typedef __int_least16_t int_least16_t; +typedef __uint_least16_t uint_least16_t; +typedef __int_least16_t int_fast16_t; +typedef __uint_least16_t uint_fast16_t; +#endif /* __int_least16_t */ + + +#ifdef __INT8_TYPE__ +#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/ +typedef __INT8_TYPE__ int8_t; +#endif /* __int8_t_defined */ +typedef __UINT8_TYPE__ uint8_t; +# define __int_least8_t int8_t +# define __uint_least8_t uint8_t +#endif /* __INT8_TYPE__ */ + +#ifdef __int_least8_t +typedef __int_least8_t int_least8_t; +typedef __uint_least8_t uint_least8_t; +typedef __int_least8_t int_fast8_t; +typedef __uint_least8_t uint_fast8_t; +#endif /* __int_least8_t */ + +/* prevent glibc sys/types.h from defining conflicting types */ +#ifndef __int8_t_defined +# define __int8_t_defined +#endif /* __int8_t_defined */ + +/* C99 7.18.1.4 Integer types capable of holding object pointers. + */ +#define __stdint_join3(a,b,c) a ## b ## c + +#ifndef _INTPTR_T +#ifndef __intptr_t_defined +typedef __INTPTR_TYPE__ intptr_t; +#define __intptr_t_defined +#define _INTPTR_T +#endif +#endif + +#ifndef _UINTPTR_T +typedef __UINTPTR_TYPE__ uintptr_t; +#define _UINTPTR_T +#endif + +/* C99 7.18.1.5 Greatest-width integer types. + */ +typedef __INTMAX_TYPE__ intmax_t; +typedef __UINTMAX_TYPE__ uintmax_t; + +/* C99 7.18.4 Macros for minimum-width integer constants. + * + * The standard requires that integer constant macros be defined for all the + * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width + * types are required, the corresponding integer constant macros are defined + * here. This implementation also defines minimum-width types for every other + * integer width that the target implements, so corresponding macros are + * defined below, too. + * + * These macros are defined using the same successive-shrinking approach as + * the type definitions above. It is likewise important that macros are defined + * in order of decending width. + * + * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the + * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]). + */ + +#define __int_c_join(a, b) a ## b +#define __int_c(v, suffix) __int_c_join(v, suffix) +#define __uint_c(v, suffix) __int_c_join(v##U, suffix) + + +#ifdef __INT64_TYPE__ +# ifdef __INT64_C_SUFFIX__ +# define __int64_c_suffix __INT64_C_SUFFIX__ +# define __int32_c_suffix __INT64_C_SUFFIX__ +# define __int16_c_suffix __INT64_C_SUFFIX__ +# define __int8_c_suffix __INT64_C_SUFFIX__ +# else +# undef __int64_c_suffix +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT64_C_SUFFIX__ */ +#endif /* __INT64_TYPE__ */ + +#ifdef __int_least64_t +# ifdef __int64_c_suffix +# define INT64_C(v) __int_c(v, __int64_c_suffix) +# define UINT64_C(v) __uint_c(v, __int64_c_suffix) +# else +# define INT64_C(v) v +# define UINT64_C(v) v ## U +# endif /* __int64_c_suffix */ +#endif /* __int_least64_t */ + + +#ifdef __INT56_TYPE__ +# ifdef __INT56_C_SUFFIX__ +# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__) +# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__) +# define __int32_c_suffix __INT56_C_SUFFIX__ +# define __int16_c_suffix __INT56_C_SUFFIX__ +# define __int8_c_suffix __INT56_C_SUFFIX__ +# else +# define INT56_C(v) v +# define UINT56_C(v) v ## U +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT56_C_SUFFIX__ */ +#endif /* __INT56_TYPE__ */ + + +#ifdef __INT48_TYPE__ +# ifdef __INT48_C_SUFFIX__ +# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__) +# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__) +# define __int32_c_suffix __INT48_C_SUFFIX__ +# define __int16_c_suffix __INT48_C_SUFFIX__ +# define __int8_c_suffix __INT48_C_SUFFIX__ +# else +# define INT48_C(v) v +# define UINT48_C(v) v ## U +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT48_C_SUFFIX__ */ +#endif /* __INT48_TYPE__ */ + + +#ifdef __INT40_TYPE__ +# ifdef __INT40_C_SUFFIX__ +# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__) +# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__) +# define __int32_c_suffix __INT40_C_SUFFIX__ +# define __int16_c_suffix __INT40_C_SUFFIX__ +# define __int8_c_suffix __INT40_C_SUFFIX__ +# else +# define INT40_C(v) v +# define UINT40_C(v) v ## U +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT40_C_SUFFIX__ */ +#endif /* __INT40_TYPE__ */ + + +#ifdef __INT32_TYPE__ +# ifdef __INT32_C_SUFFIX__ +# define __int32_c_suffix __INT32_C_SUFFIX__ +# define __int16_c_suffix __INT32_C_SUFFIX__ +# define __int8_c_suffix __INT32_C_SUFFIX__ +#else +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT32_C_SUFFIX__ */ +#endif /* __INT32_TYPE__ */ + +#ifdef __int_least32_t +# ifdef __int32_c_suffix +# define INT32_C(v) __int_c(v, __int32_c_suffix) +# define UINT32_C(v) __uint_c(v, __int32_c_suffix) +# else +# define INT32_C(v) v +# define UINT32_C(v) v ## U +# endif /* __int32_c_suffix */ +#endif /* __int_least32_t */ + + +#ifdef __INT24_TYPE__ +# ifdef __INT24_C_SUFFIX__ +# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__) +# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__) +# define __int16_c_suffix __INT24_C_SUFFIX__ +# define __int8_c_suffix __INT24_C_SUFFIX__ +# else +# define INT24_C(v) v +# define UINT24_C(v) v ## U +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT24_C_SUFFIX__ */ +#endif /* __INT24_TYPE__ */ + + +#ifdef __INT16_TYPE__ +# ifdef __INT16_C_SUFFIX__ +# define __int16_c_suffix __INT16_C_SUFFIX__ +# define __int8_c_suffix __INT16_C_SUFFIX__ +#else +# undef __int16_c_suffix +# undef __int8_c_suffix +# endif /* __INT16_C_SUFFIX__ */ +#endif /* __INT16_TYPE__ */ + +#ifdef __int_least16_t +# ifdef __int16_c_suffix +# define INT16_C(v) __int_c(v, __int16_c_suffix) +# define UINT16_C(v) __uint_c(v, __int16_c_suffix) +# else +# define INT16_C(v) v +# define UINT16_C(v) v ## U +# endif /* __int16_c_suffix */ +#endif /* __int_least16_t */ + + +#ifdef __INT8_TYPE__ +# ifdef __INT8_C_SUFFIX__ +# define __int8_c_suffix __INT8_C_SUFFIX__ +#else +# undef __int8_c_suffix +# endif /* __INT8_C_SUFFIX__ */ +#endif /* __INT8_TYPE__ */ + +#ifdef __int_least8_t +# ifdef __int8_c_suffix +# define INT8_C(v) __int_c(v, __int8_c_suffix) +# define UINT8_C(v) __uint_c(v, __int8_c_suffix) +# else +# define INT8_C(v) v +# define UINT8_C(v) v ## U +# endif /* __int8_c_suffix */ +#endif /* __int_least8_t */ + + +/* C99 7.18.2.1 Limits of exact-width integer types. + * C99 7.18.2.2 Limits of minimum-width integer types. + * C99 7.18.2.3 Limits of fastest minimum-width integer types. + * + * The presence of limit macros are completely optional in C99. This + * implementation defines limits for all of the types (exact- and + * minimum-width) that it defines above, using the limits of the minimum-width + * type for any types that do not have exact-width representations. + * + * As in the type definitions, this section takes an approach of + * successive-shrinking to determine which limits to use for the standard (8, + * 16, 32, 64) bit widths when they don't have exact representations. It is + * therefore important that the definitions be kept in order of decending + * widths. + * + * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the + * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]). + */ + +#ifdef __INT64_TYPE__ +# define INT64_MAX INT64_C( 9223372036854775807) +# define INT64_MIN (-INT64_C( 9223372036854775807)-1) +# define UINT64_MAX UINT64_C(18446744073709551615) +# define __INT_LEAST64_MIN INT64_MIN +# define __INT_LEAST64_MAX INT64_MAX +# define __UINT_LEAST64_MAX UINT64_MAX +# define __INT_LEAST32_MIN INT64_MIN +# define __INT_LEAST32_MAX INT64_MAX +# define __UINT_LEAST32_MAX UINT64_MAX +# define __INT_LEAST16_MIN INT64_MIN +# define __INT_LEAST16_MAX INT64_MAX +# define __UINT_LEAST16_MAX UINT64_MAX +# define __INT_LEAST8_MIN INT64_MIN +# define __INT_LEAST8_MAX INT64_MAX +# define __UINT_LEAST8_MAX UINT64_MAX +#endif /* __INT64_TYPE__ */ + +#ifdef __INT_LEAST64_MIN +# define INT_LEAST64_MIN __INT_LEAST64_MIN +# define INT_LEAST64_MAX __INT_LEAST64_MAX +# define UINT_LEAST64_MAX __UINT_LEAST64_MAX +# define INT_FAST64_MIN __INT_LEAST64_MIN +# define INT_FAST64_MAX __INT_LEAST64_MAX +# define UINT_FAST64_MAX __UINT_LEAST64_MAX +#endif /* __INT_LEAST64_MIN */ + + +#ifdef __INT56_TYPE__ +# define INT56_MAX INT56_C(36028797018963967) +# define INT56_MIN (-INT56_C(36028797018963967)-1) +# define UINT56_MAX UINT56_C(72057594037927935) +# define INT_LEAST56_MIN INT56_MIN +# define INT_LEAST56_MAX INT56_MAX +# define UINT_LEAST56_MAX UINT56_MAX +# define INT_FAST56_MIN INT56_MIN +# define INT_FAST56_MAX INT56_MAX +# define UINT_FAST56_MAX UINT56_MAX +# define __INT_LEAST32_MIN INT56_MIN +# define __INT_LEAST32_MAX INT56_MAX +# define __UINT_LEAST32_MAX UINT56_MAX +# define __INT_LEAST16_MIN INT56_MIN +# define __INT_LEAST16_MAX INT56_MAX +# define __UINT_LEAST16_MAX UINT56_MAX +# define __INT_LEAST8_MIN INT56_MIN +# define __INT_LEAST8_MAX INT56_MAX +# define __UINT_LEAST8_MAX UINT56_MAX +#endif /* __INT56_TYPE__ */ + + +#ifdef __INT48_TYPE__ +# define INT48_MAX INT48_C(140737488355327) +# define INT48_MIN (-INT48_C(140737488355327)-1) +# define UINT48_MAX UINT48_C(281474976710655) +# define INT_LEAST48_MIN INT48_MIN +# define INT_LEAST48_MAX INT48_MAX +# define UINT_LEAST48_MAX UINT48_MAX +# define INT_FAST48_MIN INT48_MIN +# define INT_FAST48_MAX INT48_MAX +# define UINT_FAST48_MAX UINT48_MAX +# define __INT_LEAST32_MIN INT48_MIN +# define __INT_LEAST32_MAX INT48_MAX +# define __UINT_LEAST32_MAX UINT48_MAX +# define __INT_LEAST16_MIN INT48_MIN +# define __INT_LEAST16_MAX INT48_MAX +# define __UINT_LEAST16_MAX UINT48_MAX +# define __INT_LEAST8_MIN INT48_MIN +# define __INT_LEAST8_MAX INT48_MAX +# define __UINT_LEAST8_MAX UINT48_MAX +#endif /* __INT48_TYPE__ */ + + +#ifdef __INT40_TYPE__ +# define INT40_MAX INT40_C(549755813887) +# define INT40_MIN (-INT40_C(549755813887)-1) +# define UINT40_MAX UINT40_C(1099511627775) +# define INT_LEAST40_MIN INT40_MIN +# define INT_LEAST40_MAX INT40_MAX +# define UINT_LEAST40_MAX UINT40_MAX +# define INT_FAST40_MIN INT40_MIN +# define INT_FAST40_MAX INT40_MAX +# define UINT_FAST40_MAX UINT40_MAX +# define __INT_LEAST32_MIN INT40_MIN +# define __INT_LEAST32_MAX INT40_MAX +# define __UINT_LEAST32_MAX UINT40_MAX +# define __INT_LEAST16_MIN INT40_MIN +# define __INT_LEAST16_MAX INT40_MAX +# define __UINT_LEAST16_MAX UINT40_MAX +# define __INT_LEAST8_MIN INT40_MIN +# define __INT_LEAST8_MAX INT40_MAX +# define __UINT_LEAST8_MAX UINT40_MAX +#endif /* __INT40_TYPE__ */ + + +#ifdef __INT32_TYPE__ +# define INT32_MAX INT32_C(2147483647) +# define INT32_MIN (-INT32_C(2147483647)-1) +# define UINT32_MAX UINT32_C(4294967295) +# define __INT_LEAST32_MIN INT32_MIN +# define __INT_LEAST32_MAX INT32_MAX +# define __UINT_LEAST32_MAX UINT32_MAX +# define __INT_LEAST16_MIN INT32_MIN +# define __INT_LEAST16_MAX INT32_MAX +# define __UINT_LEAST16_MAX UINT32_MAX +# define __INT_LEAST8_MIN INT32_MIN +# define __INT_LEAST8_MAX INT32_MAX +# define __UINT_LEAST8_MAX UINT32_MAX +#endif /* __INT32_TYPE__ */ + +#ifdef __INT_LEAST32_MIN +# define INT_LEAST32_MIN __INT_LEAST32_MIN +# define INT_LEAST32_MAX __INT_LEAST32_MAX +# define UINT_LEAST32_MAX __UINT_LEAST32_MAX +# define INT_FAST32_MIN __INT_LEAST32_MIN +# define INT_FAST32_MAX __INT_LEAST32_MAX +# define UINT_FAST32_MAX __UINT_LEAST32_MAX +#endif /* __INT_LEAST32_MIN */ + + +#ifdef __INT24_TYPE__ +# define INT24_MAX INT24_C(8388607) +# define INT24_MIN (-INT24_C(8388607)-1) +# define UINT24_MAX UINT24_C(16777215) +# define INT_LEAST24_MIN INT24_MIN +# define INT_LEAST24_MAX INT24_MAX +# define UINT_LEAST24_MAX UINT24_MAX +# define INT_FAST24_MIN INT24_MIN +# define INT_FAST24_MAX INT24_MAX +# define UINT_FAST24_MAX UINT24_MAX +# define __INT_LEAST16_MIN INT24_MIN +# define __INT_LEAST16_MAX INT24_MAX +# define __UINT_LEAST16_MAX UINT24_MAX +# define __INT_LEAST8_MIN INT24_MIN +# define __INT_LEAST8_MAX INT24_MAX +# define __UINT_LEAST8_MAX UINT24_MAX +#endif /* __INT24_TYPE__ */ + + +#ifdef __INT16_TYPE__ +#define INT16_MAX INT16_C(32767) +#define INT16_MIN (-INT16_C(32767)-1) +#define UINT16_MAX UINT16_C(65535) +# define __INT_LEAST16_MIN INT16_MIN +# define __INT_LEAST16_MAX INT16_MAX +# define __UINT_LEAST16_MAX UINT16_MAX +# define __INT_LEAST8_MIN INT16_MIN +# define __INT_LEAST8_MAX INT16_MAX +# define __UINT_LEAST8_MAX UINT16_MAX +#endif /* __INT16_TYPE__ */ + +#ifdef __INT_LEAST16_MIN +# define INT_LEAST16_MIN __INT_LEAST16_MIN +# define INT_LEAST16_MAX __INT_LEAST16_MAX +# define UINT_LEAST16_MAX __UINT_LEAST16_MAX +# define INT_FAST16_MIN __INT_LEAST16_MIN +# define INT_FAST16_MAX __INT_LEAST16_MAX +# define UINT_FAST16_MAX __UINT_LEAST16_MAX +#endif /* __INT_LEAST16_MIN */ + + +#ifdef __INT8_TYPE__ +# define INT8_MAX INT8_C(127) +# define INT8_MIN (-INT8_C(127)-1) +# define UINT8_MAX UINT8_C(255) +# define __INT_LEAST8_MIN INT8_MIN +# define __INT_LEAST8_MAX INT8_MAX +# define __UINT_LEAST8_MAX UINT8_MAX +#endif /* __INT8_TYPE__ */ + +#ifdef __INT_LEAST8_MIN +# define INT_LEAST8_MIN __INT_LEAST8_MIN +# define INT_LEAST8_MAX __INT_LEAST8_MAX +# define UINT_LEAST8_MAX __UINT_LEAST8_MAX +# define INT_FAST8_MIN __INT_LEAST8_MIN +# define INT_FAST8_MAX __INT_LEAST8_MAX +# define UINT_FAST8_MAX __UINT_LEAST8_MAX +#endif /* __INT_LEAST8_MIN */ + +/* Some utility macros */ +#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN) +#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX) +#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX) +#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v)) +#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v)) + +/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */ +/* C99 7.18.3 Limits of other integer types. */ + +#define INTPTR_MIN (-__INTPTR_MAX__-1) +#define INTPTR_MAX __INTPTR_MAX__ +#define UINTPTR_MAX __UINTPTR_MAX__ +#define PTRDIFF_MIN (-__PTRDIFF_MAX__-1) +#define PTRDIFF_MAX __PTRDIFF_MAX__ +#define SIZE_MAX __SIZE_MAX__ + +/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__ + * is enabled. */ +#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 +#define RSIZE_MAX (SIZE_MAX >> 1) +#endif + +/* C99 7.18.2.5 Limits of greatest-width integer types. */ +#define INTMAX_MIN (-__INTMAX_MAX__-1) +#define INTMAX_MAX __INTMAX_MAX__ +#define UINTMAX_MAX __UINTMAX_MAX__ + +/* C99 7.18.3 Limits of other integer types. */ +#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__) +#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__) +#ifdef __WINT_UNSIGNED__ +# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0) +# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__) +#else +# define WINT_MIN __INTN_MIN(__WINT_WIDTH__) +# define WINT_MAX __INTN_MAX(__WINT_WIDTH__) +#endif + +#ifndef WCHAR_MAX +# define WCHAR_MAX __WCHAR_MAX__ +#endif +#ifndef WCHAR_MIN +# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__) +# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__) +# else +# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0) +# endif +#endif + +/* 7.18.4.2 Macros for greatest-width integer constants. */ +#define INTMAX_C(v) __int_c(v, __INTMAX_C_SUFFIX__) +#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__) + +#endif /* __STDC_HOSTED__ */ +#endif /* __CLANG_STDINT_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdnoreturn.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdnoreturn.h new file mode 100644 index 0000000..e83cd81 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/stdnoreturn.h @@ -0,0 +1,16 @@ +/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDNORETURN_H +#define __STDNORETURN_H + +#define noreturn _Noreturn +#define __noreturn_is_defined 1 + +#endif /* __STDNORETURN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tbmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tbmintrin.h new file mode 100644 index 0000000..f4e848a --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tbmintrin.h @@ -0,0 +1,140 @@ +/*===---- tbmintrin.h - TBM intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __TBMINTRIN_H +#define __TBMINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm"))) + +#define __bextri_u32(a, b) \ + ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \ + (unsigned int)(b))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcfill_u32(unsigned int __a) +{ + return __a & (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blci_u32(unsigned int __a) +{ + return __a | ~(__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcic_u32(unsigned int __a) +{ + return ~__a & (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcmsk_u32(unsigned int __a) +{ + return __a ^ (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcs_u32(unsigned int __a) +{ + return __a | (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsfill_u32(unsigned int __a) +{ + return __a | (__a - 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsic_u32(unsigned int __a) +{ + return ~__a | (__a - 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__t1mskc_u32(unsigned int __a) +{ + return ~__a | (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__tzmsk_u32(unsigned int __a) +{ + return ~__a & (__a - 1); +} + +#ifdef __x86_64__ +#define __bextri_u64(a, b) \ + ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \ + (unsigned long long)(b))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcfill_u64(unsigned long long __a) +{ + return __a & (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blci_u64(unsigned long long __a) +{ + return __a | ~(__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcic_u64(unsigned long long __a) +{ + return ~__a & (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcmsk_u64(unsigned long long __a) +{ + return __a ^ (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcs_u64(unsigned long long __a) +{ + return __a | (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsfill_u64(unsigned long long __a) +{ + return __a | (__a - 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsic_u64(unsigned long long __a) +{ + return ~__a | (__a - 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__t1mskc_u64(unsigned long long __a) +{ + return ~__a | (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__tzmsk_u64(unsigned long long __a) +{ + return ~__a & (__a - 1); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __TBMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tgmath.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tgmath.h new file mode 100644 index 0000000..7acf18b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tgmath.h @@ -0,0 +1,1368 @@ +/*===---- tgmath.h - Standard header for type generic math ----------------===*\ + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __CLANG_TGMATH_H +#define __CLANG_TGMATH_H + +/* C99 7.22 Type-generic math . */ +#include + +/* + * Allow additional definitions and implementation-defined values on Apple + * platforms. This is done after #include to avoid depcycle conflicts + * between libcxx and darwin in C++ modules builds. + */ +#if defined(__APPLE__) && __STDC_HOSTED__ && __has_include_next() +# include_next +#else + +/* C++ handles type genericity with overloading in math.h. */ +#ifndef __cplusplus +#include + +#define _TG_ATTRSp __attribute__((__overloadable__)) +#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__)) + +// promotion + +typedef void _Argument_type_is_not_arithmetic; +static _Argument_type_is_not_arithmetic __tg_promote(...) + __attribute__((__unavailable__,__overloadable__)); +static double _TG_ATTRSp __tg_promote(int); +static double _TG_ATTRSp __tg_promote(unsigned int); +static double _TG_ATTRSp __tg_promote(long); +static double _TG_ATTRSp __tg_promote(unsigned long); +static double _TG_ATTRSp __tg_promote(long long); +static double _TG_ATTRSp __tg_promote(unsigned long long); +static float _TG_ATTRSp __tg_promote(float); +static double _TG_ATTRSp __tg_promote(double); +static long double _TG_ATTRSp __tg_promote(long double); +static float _Complex _TG_ATTRSp __tg_promote(float _Complex); +static double _Complex _TG_ATTRSp __tg_promote(double _Complex); +static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex); + +#define __tg_promote1(__x) (__typeof__(__tg_promote(__x))) +#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \ + __tg_promote(__y))) +#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \ + __tg_promote(__y) + \ + __tg_promote(__z))) + +// acos + +static float + _TG_ATTRS + __tg_acos(float __x) {return acosf(__x);} + +static double + _TG_ATTRS + __tg_acos(double __x) {return acos(__x);} + +static long double + _TG_ATTRS + __tg_acos(long double __x) {return acosl(__x);} + +static float _Complex + _TG_ATTRS + __tg_acos(float _Complex __x) {return cacosf(__x);} + +static double _Complex + _TG_ATTRS + __tg_acos(double _Complex __x) {return cacos(__x);} + +static long double _Complex + _TG_ATTRS + __tg_acos(long double _Complex __x) {return cacosl(__x);} + +#undef acos +#define acos(__x) __tg_acos(__tg_promote1((__x))(__x)) + +// asin + +static float + _TG_ATTRS + __tg_asin(float __x) {return asinf(__x);} + +static double + _TG_ATTRS + __tg_asin(double __x) {return asin(__x);} + +static long double + _TG_ATTRS + __tg_asin(long double __x) {return asinl(__x);} + +static float _Complex + _TG_ATTRS + __tg_asin(float _Complex __x) {return casinf(__x);} + +static double _Complex + _TG_ATTRS + __tg_asin(double _Complex __x) {return casin(__x);} + +static long double _Complex + _TG_ATTRS + __tg_asin(long double _Complex __x) {return casinl(__x);} + +#undef asin +#define asin(__x) __tg_asin(__tg_promote1((__x))(__x)) + +// atan + +static float + _TG_ATTRS + __tg_atan(float __x) {return atanf(__x);} + +static double + _TG_ATTRS + __tg_atan(double __x) {return atan(__x);} + +static long double + _TG_ATTRS + __tg_atan(long double __x) {return atanl(__x);} + +static float _Complex + _TG_ATTRS + __tg_atan(float _Complex __x) {return catanf(__x);} + +static double _Complex + _TG_ATTRS + __tg_atan(double _Complex __x) {return catan(__x);} + +static long double _Complex + _TG_ATTRS + __tg_atan(long double _Complex __x) {return catanl(__x);} + +#undef atan +#define atan(__x) __tg_atan(__tg_promote1((__x))(__x)) + +// acosh + +static float + _TG_ATTRS + __tg_acosh(float __x) {return acoshf(__x);} + +static double + _TG_ATTRS + __tg_acosh(double __x) {return acosh(__x);} + +static long double + _TG_ATTRS + __tg_acosh(long double __x) {return acoshl(__x);} + +static float _Complex + _TG_ATTRS + __tg_acosh(float _Complex __x) {return cacoshf(__x);} + +static double _Complex + _TG_ATTRS + __tg_acosh(double _Complex __x) {return cacosh(__x);} + +static long double _Complex + _TG_ATTRS + __tg_acosh(long double _Complex __x) {return cacoshl(__x);} + +#undef acosh +#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x)) + +// asinh + +static float + _TG_ATTRS + __tg_asinh(float __x) {return asinhf(__x);} + +static double + _TG_ATTRS + __tg_asinh(double __x) {return asinh(__x);} + +static long double + _TG_ATTRS + __tg_asinh(long double __x) {return asinhl(__x);} + +static float _Complex + _TG_ATTRS + __tg_asinh(float _Complex __x) {return casinhf(__x);} + +static double _Complex + _TG_ATTRS + __tg_asinh(double _Complex __x) {return casinh(__x);} + +static long double _Complex + _TG_ATTRS + __tg_asinh(long double _Complex __x) {return casinhl(__x);} + +#undef asinh +#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x)) + +// atanh + +static float + _TG_ATTRS + __tg_atanh(float __x) {return atanhf(__x);} + +static double + _TG_ATTRS + __tg_atanh(double __x) {return atanh(__x);} + +static long double + _TG_ATTRS + __tg_atanh(long double __x) {return atanhl(__x);} + +static float _Complex + _TG_ATTRS + __tg_atanh(float _Complex __x) {return catanhf(__x);} + +static double _Complex + _TG_ATTRS + __tg_atanh(double _Complex __x) {return catanh(__x);} + +static long double _Complex + _TG_ATTRS + __tg_atanh(long double _Complex __x) {return catanhl(__x);} + +#undef atanh +#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x)) + +// cos + +static float + _TG_ATTRS + __tg_cos(float __x) {return cosf(__x);} + +static double + _TG_ATTRS + __tg_cos(double __x) {return cos(__x);} + +static long double + _TG_ATTRS + __tg_cos(long double __x) {return cosl(__x);} + +static float _Complex + _TG_ATTRS + __tg_cos(float _Complex __x) {return ccosf(__x);} + +static double _Complex + _TG_ATTRS + __tg_cos(double _Complex __x) {return ccos(__x);} + +static long double _Complex + _TG_ATTRS + __tg_cos(long double _Complex __x) {return ccosl(__x);} + +#undef cos +#define cos(__x) __tg_cos(__tg_promote1((__x))(__x)) + +// sin + +static float + _TG_ATTRS + __tg_sin(float __x) {return sinf(__x);} + +static double + _TG_ATTRS + __tg_sin(double __x) {return sin(__x);} + +static long double + _TG_ATTRS + __tg_sin(long double __x) {return sinl(__x);} + +static float _Complex + _TG_ATTRS + __tg_sin(float _Complex __x) {return csinf(__x);} + +static double _Complex + _TG_ATTRS + __tg_sin(double _Complex __x) {return csin(__x);} + +static long double _Complex + _TG_ATTRS + __tg_sin(long double _Complex __x) {return csinl(__x);} + +#undef sin +#define sin(__x) __tg_sin(__tg_promote1((__x))(__x)) + +// tan + +static float + _TG_ATTRS + __tg_tan(float __x) {return tanf(__x);} + +static double + _TG_ATTRS + __tg_tan(double __x) {return tan(__x);} + +static long double + _TG_ATTRS + __tg_tan(long double __x) {return tanl(__x);} + +static float _Complex + _TG_ATTRS + __tg_tan(float _Complex __x) {return ctanf(__x);} + +static double _Complex + _TG_ATTRS + __tg_tan(double _Complex __x) {return ctan(__x);} + +static long double _Complex + _TG_ATTRS + __tg_tan(long double _Complex __x) {return ctanl(__x);} + +#undef tan +#define tan(__x) __tg_tan(__tg_promote1((__x))(__x)) + +// cosh + +static float + _TG_ATTRS + __tg_cosh(float __x) {return coshf(__x);} + +static double + _TG_ATTRS + __tg_cosh(double __x) {return cosh(__x);} + +static long double + _TG_ATTRS + __tg_cosh(long double __x) {return coshl(__x);} + +static float _Complex + _TG_ATTRS + __tg_cosh(float _Complex __x) {return ccoshf(__x);} + +static double _Complex + _TG_ATTRS + __tg_cosh(double _Complex __x) {return ccosh(__x);} + +static long double _Complex + _TG_ATTRS + __tg_cosh(long double _Complex __x) {return ccoshl(__x);} + +#undef cosh +#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x)) + +// sinh + +static float + _TG_ATTRS + __tg_sinh(float __x) {return sinhf(__x);} + +static double + _TG_ATTRS + __tg_sinh(double __x) {return sinh(__x);} + +static long double + _TG_ATTRS + __tg_sinh(long double __x) {return sinhl(__x);} + +static float _Complex + _TG_ATTRS + __tg_sinh(float _Complex __x) {return csinhf(__x);} + +static double _Complex + _TG_ATTRS + __tg_sinh(double _Complex __x) {return csinh(__x);} + +static long double _Complex + _TG_ATTRS + __tg_sinh(long double _Complex __x) {return csinhl(__x);} + +#undef sinh +#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x)) + +// tanh + +static float + _TG_ATTRS + __tg_tanh(float __x) {return tanhf(__x);} + +static double + _TG_ATTRS + __tg_tanh(double __x) {return tanh(__x);} + +static long double + _TG_ATTRS + __tg_tanh(long double __x) {return tanhl(__x);} + +static float _Complex + _TG_ATTRS + __tg_tanh(float _Complex __x) {return ctanhf(__x);} + +static double _Complex + _TG_ATTRS + __tg_tanh(double _Complex __x) {return ctanh(__x);} + +static long double _Complex + _TG_ATTRS + __tg_tanh(long double _Complex __x) {return ctanhl(__x);} + +#undef tanh +#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x)) + +// exp + +static float + _TG_ATTRS + __tg_exp(float __x) {return expf(__x);} + +static double + _TG_ATTRS + __tg_exp(double __x) {return exp(__x);} + +static long double + _TG_ATTRS + __tg_exp(long double __x) {return expl(__x);} + +static float _Complex + _TG_ATTRS + __tg_exp(float _Complex __x) {return cexpf(__x);} + +static double _Complex + _TG_ATTRS + __tg_exp(double _Complex __x) {return cexp(__x);} + +static long double _Complex + _TG_ATTRS + __tg_exp(long double _Complex __x) {return cexpl(__x);} + +#undef exp +#define exp(__x) __tg_exp(__tg_promote1((__x))(__x)) + +// log + +static float + _TG_ATTRS + __tg_log(float __x) {return logf(__x);} + +static double + _TG_ATTRS + __tg_log(double __x) {return log(__x);} + +static long double + _TG_ATTRS + __tg_log(long double __x) {return logl(__x);} + +static float _Complex + _TG_ATTRS + __tg_log(float _Complex __x) {return clogf(__x);} + +static double _Complex + _TG_ATTRS + __tg_log(double _Complex __x) {return clog(__x);} + +static long double _Complex + _TG_ATTRS + __tg_log(long double _Complex __x) {return clogl(__x);} + +#undef log +#define log(__x) __tg_log(__tg_promote1((__x))(__x)) + +// pow + +static float + _TG_ATTRS + __tg_pow(float __x, float __y) {return powf(__x, __y);} + +static double + _TG_ATTRS + __tg_pow(double __x, double __y) {return pow(__x, __y);} + +static long double + _TG_ATTRS + __tg_pow(long double __x, long double __y) {return powl(__x, __y);} + +static float _Complex + _TG_ATTRS + __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);} + +static double _Complex + _TG_ATTRS + __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);} + +static long double _Complex + _TG_ATTRS + __tg_pow(long double _Complex __x, long double _Complex __y) + {return cpowl(__x, __y);} + +#undef pow +#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// sqrt + +static float + _TG_ATTRS + __tg_sqrt(float __x) {return sqrtf(__x);} + +static double + _TG_ATTRS + __tg_sqrt(double __x) {return sqrt(__x);} + +static long double + _TG_ATTRS + __tg_sqrt(long double __x) {return sqrtl(__x);} + +static float _Complex + _TG_ATTRS + __tg_sqrt(float _Complex __x) {return csqrtf(__x);} + +static double _Complex + _TG_ATTRS + __tg_sqrt(double _Complex __x) {return csqrt(__x);} + +static long double _Complex + _TG_ATTRS + __tg_sqrt(long double _Complex __x) {return csqrtl(__x);} + +#undef sqrt +#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x)) + +// fabs + +static float + _TG_ATTRS + __tg_fabs(float __x) {return fabsf(__x);} + +static double + _TG_ATTRS + __tg_fabs(double __x) {return fabs(__x);} + +static long double + _TG_ATTRS + __tg_fabs(long double __x) {return fabsl(__x);} + +static float + _TG_ATTRS + __tg_fabs(float _Complex __x) {return cabsf(__x);} + +static double + _TG_ATTRS + __tg_fabs(double _Complex __x) {return cabs(__x);} + +static long double + _TG_ATTRS + __tg_fabs(long double _Complex __x) {return cabsl(__x);} + +#undef fabs +#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x)) + +// atan2 + +static float + _TG_ATTRS + __tg_atan2(float __x, float __y) {return atan2f(__x, __y);} + +static double + _TG_ATTRS + __tg_atan2(double __x, double __y) {return atan2(__x, __y);} + +static long double + _TG_ATTRS + __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);} + +#undef atan2 +#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// cbrt + +static float + _TG_ATTRS + __tg_cbrt(float __x) {return cbrtf(__x);} + +static double + _TG_ATTRS + __tg_cbrt(double __x) {return cbrt(__x);} + +static long double + _TG_ATTRS + __tg_cbrt(long double __x) {return cbrtl(__x);} + +#undef cbrt +#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x)) + +// ceil + +static float + _TG_ATTRS + __tg_ceil(float __x) {return ceilf(__x);} + +static double + _TG_ATTRS + __tg_ceil(double __x) {return ceil(__x);} + +static long double + _TG_ATTRS + __tg_ceil(long double __x) {return ceill(__x);} + +#undef ceil +#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x)) + +// copysign + +static float + _TG_ATTRS + __tg_copysign(float __x, float __y) {return copysignf(__x, __y);} + +static double + _TG_ATTRS + __tg_copysign(double __x, double __y) {return copysign(__x, __y);} + +static long double + _TG_ATTRS + __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);} + +#undef copysign +#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// erf + +static float + _TG_ATTRS + __tg_erf(float __x) {return erff(__x);} + +static double + _TG_ATTRS + __tg_erf(double __x) {return erf(__x);} + +static long double + _TG_ATTRS + __tg_erf(long double __x) {return erfl(__x);} + +#undef erf +#define erf(__x) __tg_erf(__tg_promote1((__x))(__x)) + +// erfc + +static float + _TG_ATTRS + __tg_erfc(float __x) {return erfcf(__x);} + +static double + _TG_ATTRS + __tg_erfc(double __x) {return erfc(__x);} + +static long double + _TG_ATTRS + __tg_erfc(long double __x) {return erfcl(__x);} + +#undef erfc +#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x)) + +// exp2 + +static float + _TG_ATTRS + __tg_exp2(float __x) {return exp2f(__x);} + +static double + _TG_ATTRS + __tg_exp2(double __x) {return exp2(__x);} + +static long double + _TG_ATTRS + __tg_exp2(long double __x) {return exp2l(__x);} + +#undef exp2 +#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x)) + +// expm1 + +static float + _TG_ATTRS + __tg_expm1(float __x) {return expm1f(__x);} + +static double + _TG_ATTRS + __tg_expm1(double __x) {return expm1(__x);} + +static long double + _TG_ATTRS + __tg_expm1(long double __x) {return expm1l(__x);} + +#undef expm1 +#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x)) + +// fdim + +static float + _TG_ATTRS + __tg_fdim(float __x, float __y) {return fdimf(__x, __y);} + +static double + _TG_ATTRS + __tg_fdim(double __x, double __y) {return fdim(__x, __y);} + +static long double + _TG_ATTRS + __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);} + +#undef fdim +#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// floor + +static float + _TG_ATTRS + __tg_floor(float __x) {return floorf(__x);} + +static double + _TG_ATTRS + __tg_floor(double __x) {return floor(__x);} + +static long double + _TG_ATTRS + __tg_floor(long double __x) {return floorl(__x);} + +#undef floor +#define floor(__x) __tg_floor(__tg_promote1((__x))(__x)) + +// fma + +static float + _TG_ATTRS + __tg_fma(float __x, float __y, float __z) + {return fmaf(__x, __y, __z);} + +static double + _TG_ATTRS + __tg_fma(double __x, double __y, double __z) + {return fma(__x, __y, __z);} + +static long double + _TG_ATTRS + __tg_fma(long double __x,long double __y, long double __z) + {return fmal(__x, __y, __z);} + +#undef fma +#define fma(__x, __y, __z) \ + __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \ + __tg_promote3((__x), (__y), (__z))(__y), \ + __tg_promote3((__x), (__y), (__z))(__z)) + +// fmax + +static float + _TG_ATTRS + __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);} + +static double + _TG_ATTRS + __tg_fmax(double __x, double __y) {return fmax(__x, __y);} + +static long double + _TG_ATTRS + __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);} + +#undef fmax +#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// fmin + +static float + _TG_ATTRS + __tg_fmin(float __x, float __y) {return fminf(__x, __y);} + +static double + _TG_ATTRS + __tg_fmin(double __x, double __y) {return fmin(__x, __y);} + +static long double + _TG_ATTRS + __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);} + +#undef fmin +#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// fmod + +static float + _TG_ATTRS + __tg_fmod(float __x, float __y) {return fmodf(__x, __y);} + +static double + _TG_ATTRS + __tg_fmod(double __x, double __y) {return fmod(__x, __y);} + +static long double + _TG_ATTRS + __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);} + +#undef fmod +#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// frexp + +static float + _TG_ATTRS + __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);} + +static double + _TG_ATTRS + __tg_frexp(double __x, int* __y) {return frexp(__x, __y);} + +static long double + _TG_ATTRS + __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);} + +#undef frexp +#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y) + +// hypot + +static float + _TG_ATTRS + __tg_hypot(float __x, float __y) {return hypotf(__x, __y);} + +static double + _TG_ATTRS + __tg_hypot(double __x, double __y) {return hypot(__x, __y);} + +static long double + _TG_ATTRS + __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);} + +#undef hypot +#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// ilogb + +static int + _TG_ATTRS + __tg_ilogb(float __x) {return ilogbf(__x);} + +static int + _TG_ATTRS + __tg_ilogb(double __x) {return ilogb(__x);} + +static int + _TG_ATTRS + __tg_ilogb(long double __x) {return ilogbl(__x);} + +#undef ilogb +#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x)) + +// ldexp + +static float + _TG_ATTRS + __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);} + +static double + _TG_ATTRS + __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);} + +static long double + _TG_ATTRS + __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);} + +#undef ldexp +#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y) + +// lgamma + +static float + _TG_ATTRS + __tg_lgamma(float __x) {return lgammaf(__x);} + +static double + _TG_ATTRS + __tg_lgamma(double __x) {return lgamma(__x);} + +static long double + _TG_ATTRS + __tg_lgamma(long double __x) {return lgammal(__x);} + +#undef lgamma +#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x)) + +// llrint + +static long long + _TG_ATTRS + __tg_llrint(float __x) {return llrintf(__x);} + +static long long + _TG_ATTRS + __tg_llrint(double __x) {return llrint(__x);} + +static long long + _TG_ATTRS + __tg_llrint(long double __x) {return llrintl(__x);} + +#undef llrint +#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x)) + +// llround + +static long long + _TG_ATTRS + __tg_llround(float __x) {return llroundf(__x);} + +static long long + _TG_ATTRS + __tg_llround(double __x) {return llround(__x);} + +static long long + _TG_ATTRS + __tg_llround(long double __x) {return llroundl(__x);} + +#undef llround +#define llround(__x) __tg_llround(__tg_promote1((__x))(__x)) + +// log10 + +static float + _TG_ATTRS + __tg_log10(float __x) {return log10f(__x);} + +static double + _TG_ATTRS + __tg_log10(double __x) {return log10(__x);} + +static long double + _TG_ATTRS + __tg_log10(long double __x) {return log10l(__x);} + +#undef log10 +#define log10(__x) __tg_log10(__tg_promote1((__x))(__x)) + +// log1p + +static float + _TG_ATTRS + __tg_log1p(float __x) {return log1pf(__x);} + +static double + _TG_ATTRS + __tg_log1p(double __x) {return log1p(__x);} + +static long double + _TG_ATTRS + __tg_log1p(long double __x) {return log1pl(__x);} + +#undef log1p +#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x)) + +// log2 + +static float + _TG_ATTRS + __tg_log2(float __x) {return log2f(__x);} + +static double + _TG_ATTRS + __tg_log2(double __x) {return log2(__x);} + +static long double + _TG_ATTRS + __tg_log2(long double __x) {return log2l(__x);} + +#undef log2 +#define log2(__x) __tg_log2(__tg_promote1((__x))(__x)) + +// logb + +static float + _TG_ATTRS + __tg_logb(float __x) {return logbf(__x);} + +static double + _TG_ATTRS + __tg_logb(double __x) {return logb(__x);} + +static long double + _TG_ATTRS + __tg_logb(long double __x) {return logbl(__x);} + +#undef logb +#define logb(__x) __tg_logb(__tg_promote1((__x))(__x)) + +// lrint + +static long + _TG_ATTRS + __tg_lrint(float __x) {return lrintf(__x);} + +static long + _TG_ATTRS + __tg_lrint(double __x) {return lrint(__x);} + +static long + _TG_ATTRS + __tg_lrint(long double __x) {return lrintl(__x);} + +#undef lrint +#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x)) + +// lround + +static long + _TG_ATTRS + __tg_lround(float __x) {return lroundf(__x);} + +static long + _TG_ATTRS + __tg_lround(double __x) {return lround(__x);} + +static long + _TG_ATTRS + __tg_lround(long double __x) {return lroundl(__x);} + +#undef lround +#define lround(__x) __tg_lround(__tg_promote1((__x))(__x)) + +// nearbyint + +static float + _TG_ATTRS + __tg_nearbyint(float __x) {return nearbyintf(__x);} + +static double + _TG_ATTRS + __tg_nearbyint(double __x) {return nearbyint(__x);} + +static long double + _TG_ATTRS + __tg_nearbyint(long double __x) {return nearbyintl(__x);} + +#undef nearbyint +#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x)) + +// nextafter + +static float + _TG_ATTRS + __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);} + +static double + _TG_ATTRS + __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);} + +static long double + _TG_ATTRS + __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);} + +#undef nextafter +#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// nexttoward + +static float + _TG_ATTRS + __tg_nexttoward(float __x, long double __y) {return nexttowardf(__x, __y);} + +static double + _TG_ATTRS + __tg_nexttoward(double __x, long double __y) {return nexttoward(__x, __y);} + +static long double + _TG_ATTRS + __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);} + +#undef nexttoward +#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote1((__x))(__x), (__y)) + +// remainder + +static float + _TG_ATTRS + __tg_remainder(float __x, float __y) {return remainderf(__x, __y);} + +static double + _TG_ATTRS + __tg_remainder(double __x, double __y) {return remainder(__x, __y);} + +static long double + _TG_ATTRS + __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);} + +#undef remainder +#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y)) + +// remquo + +static float + _TG_ATTRS + __tg_remquo(float __x, float __y, int* __z) + {return remquof(__x, __y, __z);} + +static double + _TG_ATTRS + __tg_remquo(double __x, double __y, int* __z) + {return remquo(__x, __y, __z);} + +static long double + _TG_ATTRS + __tg_remquo(long double __x,long double __y, int* __z) + {return remquol(__x, __y, __z);} + +#undef remquo +#define remquo(__x, __y, __z) \ + __tg_remquo(__tg_promote2((__x), (__y))(__x), \ + __tg_promote2((__x), (__y))(__y), \ + (__z)) + +// rint + +static float + _TG_ATTRS + __tg_rint(float __x) {return rintf(__x);} + +static double + _TG_ATTRS + __tg_rint(double __x) {return rint(__x);} + +static long double + _TG_ATTRS + __tg_rint(long double __x) {return rintl(__x);} + +#undef rint +#define rint(__x) __tg_rint(__tg_promote1((__x))(__x)) + +// round + +static float + _TG_ATTRS + __tg_round(float __x) {return roundf(__x);} + +static double + _TG_ATTRS + __tg_round(double __x) {return round(__x);} + +static long double + _TG_ATTRS + __tg_round(long double __x) {return roundl(__x);} + +#undef round +#define round(__x) __tg_round(__tg_promote1((__x))(__x)) + +// scalbn + +static float + _TG_ATTRS + __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);} + +static double + _TG_ATTRS + __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);} + +static long double + _TG_ATTRS + __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);} + +#undef scalbn +#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y) + +// scalbln + +static float + _TG_ATTRS + __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);} + +static double + _TG_ATTRS + __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);} + +static long double + _TG_ATTRS + __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);} + +#undef scalbln +#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y) + +// tgamma + +static float + _TG_ATTRS + __tg_tgamma(float __x) {return tgammaf(__x);} + +static double + _TG_ATTRS + __tg_tgamma(double __x) {return tgamma(__x);} + +static long double + _TG_ATTRS + __tg_tgamma(long double __x) {return tgammal(__x);} + +#undef tgamma +#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x)) + +// trunc + +static float + _TG_ATTRS + __tg_trunc(float __x) {return truncf(__x);} + +static double + _TG_ATTRS + __tg_trunc(double __x) {return trunc(__x);} + +static long double + _TG_ATTRS + __tg_trunc(long double __x) {return truncl(__x);} + +#undef trunc +#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x)) + +// carg + +static float + _TG_ATTRS + __tg_carg(float __x) {return atan2f(0.F, __x);} + +static double + _TG_ATTRS + __tg_carg(double __x) {return atan2(0., __x);} + +static long double + _TG_ATTRS + __tg_carg(long double __x) {return atan2l(0.L, __x);} + +static float + _TG_ATTRS + __tg_carg(float _Complex __x) {return cargf(__x);} + +static double + _TG_ATTRS + __tg_carg(double _Complex __x) {return carg(__x);} + +static long double + _TG_ATTRS + __tg_carg(long double _Complex __x) {return cargl(__x);} + +#undef carg +#define carg(__x) __tg_carg(__tg_promote1((__x))(__x)) + +// cimag + +static float + _TG_ATTRS + __tg_cimag(float __x) {return 0;} + +static double + _TG_ATTRS + __tg_cimag(double __x) {return 0;} + +static long double + _TG_ATTRS + __tg_cimag(long double __x) {return 0;} + +static float + _TG_ATTRS + __tg_cimag(float _Complex __x) {return cimagf(__x);} + +static double + _TG_ATTRS + __tg_cimag(double _Complex __x) {return cimag(__x);} + +static long double + _TG_ATTRS + __tg_cimag(long double _Complex __x) {return cimagl(__x);} + +#undef cimag +#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x)) + +// conj + +static float _Complex + _TG_ATTRS + __tg_conj(float __x) {return __x;} + +static double _Complex + _TG_ATTRS + __tg_conj(double __x) {return __x;} + +static long double _Complex + _TG_ATTRS + __tg_conj(long double __x) {return __x;} + +static float _Complex + _TG_ATTRS + __tg_conj(float _Complex __x) {return conjf(__x);} + +static double _Complex + _TG_ATTRS + __tg_conj(double _Complex __x) {return conj(__x);} + +static long double _Complex + _TG_ATTRS + __tg_conj(long double _Complex __x) {return conjl(__x);} + +#undef conj +#define conj(__x) __tg_conj(__tg_promote1((__x))(__x)) + +// cproj + +static float _Complex + _TG_ATTRS + __tg_cproj(float __x) {return cprojf(__x);} + +static double _Complex + _TG_ATTRS + __tg_cproj(double __x) {return cproj(__x);} + +static long double _Complex + _TG_ATTRS + __tg_cproj(long double __x) {return cprojl(__x);} + +static float _Complex + _TG_ATTRS + __tg_cproj(float _Complex __x) {return cprojf(__x);} + +static double _Complex + _TG_ATTRS + __tg_cproj(double _Complex __x) {return cproj(__x);} + +static long double _Complex + _TG_ATTRS + __tg_cproj(long double _Complex __x) {return cprojl(__x);} + +#undef cproj +#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x)) + +// creal + +static float + _TG_ATTRS + __tg_creal(float __x) {return __x;} + +static double + _TG_ATTRS + __tg_creal(double __x) {return __x;} + +static long double + _TG_ATTRS + __tg_creal(long double __x) {return __x;} + +static float + _TG_ATTRS + __tg_creal(float _Complex __x) {return crealf(__x);} + +static double + _TG_ATTRS + __tg_creal(double _Complex __x) {return creal(__x);} + +static long double + _TG_ATTRS + __tg_creal(long double _Complex __x) {return creall(__x);} + +#undef creal +#define creal(__x) __tg_creal(__tg_promote1((__x))(__x)) + +#undef _TG_ATTRSp +#undef _TG_ATTRS + +#endif /* __cplusplus */ +#endif /* __has_include_next */ +#endif /* __CLANG_TGMATH_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tmmintrin.h new file mode 100644 index 0000000..35533e1 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tmmintrin.h @@ -0,0 +1,771 @@ +/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __TMMINTRIN_H +#define __TMMINTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3"), __min_vector_width__(64))) +#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,ssse3"), __min_vector_width__(64))) + +/// Computes the absolute value of each of the packed 8-bit signed +/// integers in the source operand and stores the 8-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSB instruction. +/// +/// \param __a +/// A 64-bit vector of [8 x i8]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_abs_pi8(__m64 __a) +{ + return (__m64)__builtin_ia32_pabsb((__v8qi)__a); +} + +/// Computes the absolute value of each of the packed 8-bit signed +/// integers in the source operand and stores the 8-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSB instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi8(__m128i __a) +{ + return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a); +} + +/// Computes the absolute value of each of the packed 16-bit signed +/// integers in the source operand and stores the 16-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_abs_pi16(__m64 __a) +{ + return (__m64)__builtin_ia32_pabsw((__v4hi)__a); +} + +/// Computes the absolute value of each of the packed 16-bit signed +/// integers in the source operand and stores the 16-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi16(__m128i __a) +{ + return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a); +} + +/// Computes the absolute value of each of the packed 32-bit signed +/// integers in the source operand and stores the 32-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_abs_pi32(__m64 __a) +{ + return (__m64)__builtin_ia32_pabsd((__v2si)__a); +} + +/// Computes the absolute value of each of the packed 32-bit signed +/// integers in the source operand and stores the 32-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi32(__m128i __a) +{ + return (__m128i)__builtin_ia32_pabsd128((__v4si)__a); +} + +/// Concatenates the two 128-bit integer vector operands, and +/// right-shifts the result by the number of bytes specified in the immediate +/// operand. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_alignr_epi8(__m128i a, __m128i b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c PALIGNR instruction. +/// +/// \param a +/// A 128-bit vector of [16 x i8] containing one of the source operands. +/// \param b +/// A 128-bit vector of [16 x i8] containing one of the source operands. +/// \param n +/// An immediate operand specifying how many bytes to right-shift the result. +/// \returns A 128-bit integer vector containing the concatenated right-shifted +/// value. +#define _mm_alignr_epi8(a, b, n) \ + (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (n)) + +/// Concatenates the two 64-bit integer vector operands, and right-shifts +/// the result by the number of bytes specified in the immediate operand. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_alignr_pi8(__m64 a, __m64 b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c PALIGNR instruction. +/// +/// \param a +/// A 64-bit vector of [8 x i8] containing one of the source operands. +/// \param b +/// A 64-bit vector of [8 x i8] containing one of the source operands. +/// \param n +/// An immediate operand specifying how many bytes to right-shift the result. +/// \returns A 64-bit integer vector containing the concatenated right-shifted +/// value. +#define _mm_alignr_pi8(a, b, n) \ + (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)) + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal sums of +/// both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hadd_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [4 x i32] containing the horizontal sums of +/// both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hadd_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both +/// operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hadd_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both +/// operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hadd_pi32(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are +/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to +/// 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated +/// sums of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hadds_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are +/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to +/// 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated +/// sums of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hadds_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal differences +/// of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsub_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [4 x i32] containing the horizontal differences +/// of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsub_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences +/// of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hsub_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences +/// of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hsub_pi32(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [8 x i16]. Positive differences greater than +/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are +/// saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated +/// differences of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [4 x i16]. Positive differences greater than +/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are +/// saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated +/// differences of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hsubs_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b); +} + +/// Multiplies corresponding pairs of packed 8-bit unsigned integer +/// values contained in the first source operand and packed 8-bit signed +/// integer values contained in the second source operand, adds pairs of +/// contiguous products with signed saturation, and writes the 16-bit sums to +/// the corresponding bits in the destination. +/// +/// For example, bits [7:0] of both operands are multiplied, bits [15:8] of +/// both operands are multiplied, and the sum of both results is written to +/// bits [15:0] of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDUBSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the first source operand. +/// \param __b +/// A 128-bit integer vector containing the second source operand. +/// \returns A 128-bit integer vector containing the sums of products of both +/// operands: \n +/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n +/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n +/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n +/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) \n +/// \a R4 := (\a __a8 * \a __b8) + (\a __a9 * \a __b9) \n +/// \a R5 := (\a __a10 * \a __b10) + (\a __a11 * \a __b11) \n +/// \a R6 := (\a __a12 * \a __b12) + (\a __a13 * \a __b13) \n +/// \a R7 := (\a __a14 * \a __b14) + (\a __a15 * \a __b15) +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maddubs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b); +} + +/// Multiplies corresponding pairs of packed 8-bit unsigned integer +/// values contained in the first source operand and packed 8-bit signed +/// integer values contained in the second source operand, adds pairs of +/// contiguous products with signed saturation, and writes the 16-bit sums to +/// the corresponding bits in the destination. +/// +/// For example, bits [7:0] of both operands are multiplied, bits [15:8] of +/// both operands are multiplied, and the sum of both results is written to +/// bits [15:0] of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PMADDUBSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the first source operand. +/// \param __b +/// A 64-bit integer vector containing the second source operand. +/// \returns A 64-bit integer vector containing the sums of products of both +/// operands: \n +/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n +/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n +/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n +/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_maddubs_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b); +} + +/// Multiplies packed 16-bit signed integer values, truncates the 32-bit +/// products to the 18 most significant bits by right-shifting, rounds the +/// truncated value by adding 1, and writes bits [16:1] to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHRSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. +/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled +/// products of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mulhrs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies packed 16-bit signed integer values, truncates the 32-bit +/// products to the 18 most significant bits by right-shifting, rounds the +/// truncated value by adding 1, and writes bits [16:1] to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PMULHRSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. +/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled +/// products of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_mulhrs_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b); +} + +/// Copies the 8-bit integers from a 128-bit integer vector to the +/// destination or clears 8-bit values in the destination, as specified by +/// the second source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control bytes corresponding to +/// positions in the destination: +/// Bit 7: \n +/// 1: Clear the corresponding byte in the destination. \n +/// 0: Copy the selected source byte to the corresponding byte in the +/// destination. \n +/// Bits [6:4] Reserved. \n +/// Bits [3:0] select the source byte to be copied. +/// \returns A 128-bit integer vector containing the copied or cleared values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shuffle_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b); +} + +/// Copies the 8-bit integers from a 64-bit integer vector to the +/// destination or clears 8-bit values in the destination, as specified by +/// the second source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSHUFB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control bytes corresponding to +/// positions in the destination: +/// Bit 7: \n +/// 1: Clear the corresponding byte in the destination. \n +/// 0: Copy the selected source byte to the corresponding byte in the +/// destination. \n +/// Bits [3:0] select the source byte to be copied. +/// \returns A 64-bit integer vector containing the copied or cleared values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_shuffle_pi8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b); +} + +/// For each 8-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the byte in the second source is negative, calculate the two's +/// complement of the corresponding byte in the first source, and write that +/// value to the destination. If the byte in the second source is positive, +/// copy the corresponding byte from the first source to the destination. If +/// the byte in the second source is zero, clear the corresponding byte in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control bytes corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sign_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b); +} + +/// For each 16-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the word in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the word in the second source is positive, +/// copy the corresponding word from the first source to the destination. If +/// the word in the second source is zero, clear the corresponding word in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control words corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sign_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b); +} + +/// For each 32-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the doubleword in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the doubleword in the second source is +/// positive, copy the corresponding word from the first source to the +/// destination. If the doubleword in the second source is zero, clear the +/// corresponding word in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGND instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control doublewords corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sign_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b); +} + +/// For each 8-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the byte in the second source is negative, calculate the two's +/// complement of the corresponding byte in the first source, and write that +/// value to the destination. If the byte in the second source is positive, +/// copy the corresponding byte from the first source to the destination. If +/// the byte in the second source is zero, clear the corresponding byte in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGNB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control bytes corresponding to +/// positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sign_pi8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b); +} + +/// For each 16-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the word in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the word in the second source is positive, +/// copy the corresponding word from the first source to the destination. If +/// the word in the second source is zero, clear the corresponding word in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGNW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control words corresponding to +/// positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sign_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b); +} + +/// For each 32-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the doubleword in the second source is negative, calculate the two's +/// complement of the corresponding doubleword in the first source, and +/// write that value to the destination. If the doubleword in the second +/// source is positive, copy the corresponding doubleword from the first +/// source to the destination. If the doubleword in the second source is +/// zero, clear the corresponding doubleword in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGND instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing two control doublewords corresponding +/// to positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sign_pi32(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_MMX + +#endif /* __TMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tsxldtrkintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tsxldtrkintrin.h new file mode 100644 index 0000000..491823e9 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/tsxldtrkintrin.h @@ -0,0 +1,56 @@ +/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __TSXLDTRKINTRIN_H +#define __TSXLDTRKINTRIN_H + +/* Define the default attributes for the functions in this file */ +#define _DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("tsxldtrk"))) + +/// Marks the start of an TSX (RTM) suspend load address tracking region. If +/// this intrinsic is used inside a transactional region, subsequent loads +/// are not added to the read set of the transaction. If it's used inside a +/// suspend load address tracking region it will cause transaction abort. +/// If it's used outside of a transactional region it behaves like a NOP. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XSUSLDTRK instruction. +/// +static __inline__ void _DEFAULT_FN_ATTRS +_xsusldtrk (void) +{ + __builtin_ia32_xsusldtrk(); +} + +/// Marks the end of an TSX (RTM) suspend load address tracking region. If this +/// intrinsic is used inside a suspend load address tracking region it will +/// end the suspend region and all following load addresses will be added to +/// the transaction read set. If it's used inside an active transaction but +/// not in a suspend region it will cause transaction abort. If it's used +/// outside of a transactional region it behaves like a NOP. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XRESLDTRK instruction. +/// +static __inline__ void _DEFAULT_FN_ATTRS +_xresldtrk (void) +{ + __builtin_ia32_xresldtrk(); +} + +#undef _DEFAULT_FN_ATTRS + +#endif /* __TSXLDTRKINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/uintrintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/uintrintrin.h new file mode 100644 index 0000000..e3839dc --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/uintrintrin.h @@ -0,0 +1,157 @@ +/*===------------------ uintrintrin.h - UINTR intrinsics -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __UINTRINTRIN_H +#define __UINTRINTRIN_H + +/* Define the default attributes for the functions in this file */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("uintr"))) + +#ifdef __x86_64__ + +struct __uintr_frame +{ + unsigned long long rip; + unsigned long long rflags; + unsigned long long rsp; +}; + +/// Clears the user interrupt flag (UIF). Its effect takes place immediately: a +/// user interrupt cannot be delivered on the instruction boundary following +/// CLUI. Can be executed only if CR4.UINT = 1, the logical processor is in +/// 64-bit mode, and software is not executing inside an enclave; otherwise, +/// each causes an invalid-opcode exception. Causes a transactional abort if +/// executed inside a transactional region; the abort loads EAX as it would +/// had it been due to an execution of CLI. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLUI instruction. +/// +/// \operation +/// UIF := 0 +/// \endoperation +static __inline__ void __DEFAULT_FN_ATTRS +_clui (void) +{ + __builtin_ia32_clui(); +} + +/// Sets the user interrupt flag (UIF). Its effect takes place immediately; a +/// user interrupt may be delivered on the instruction boundary following +/// STUI. Can be executed only if CR4.UINT = 1, the logical processor is in +/// 64-bit mode, and software is not executing inside an enclave; otherwise, +/// each causes an invalid-opcode exception. Causes a transactional abort if +/// executed inside a transactional region; the abort loads EAX as it would +/// had it been due to an execution of STI. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the STUI instruction. +/// +/// \operation +/// UIF := 1 +/// \endoperation +static __inline__ void __DEFAULT_FN_ATTRS +_stui (void) +{ + __builtin_ia32_stui(); +} + +/// Get the current value of the user interrupt flag (UIF). Can be executed +/// regardless of CPL and inside a transactional region. Can be executed only +/// if CR4.UINT = 1, the logical processor is in 64-bit mode, and software is +/// not executing inside an enclave; otherwise, it causes an invalid-opcode +/// exception. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TESTUI instruction. +/// +/// \returns The current value of the user interrupt flag (UIF). +/// +/// \operation +/// CF := UIF +/// ZF := 0 +/// AF := 0 +/// OF := 0 +/// PF := 0 +/// SF := 0 +/// dst := CF +/// \endoperation +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_testui (void) +{ + return __builtin_ia32_testui(); +} + +/// Send interprocessor user interrupt. Can be executed only if +/// CR4.UINT = IA32_UINT_TT[0] = 1, the logical processor is in 64-bit mode, +/// and software is not executing inside an enclave; otherwise, it causes an +/// invalid-opcode exception. May be executed at any privilege level, all of +/// its memory accesses are performed with supervisor privilege. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SENDUIPI instruction +/// +/// \param __a +/// Index of user-interrupt target table entry in user-interrupt target +/// table. +/// +/// \operation +/// IF __a > UITTSZ +/// GP (0) +/// FI +/// tempUITTE := MEM[UITTADDR + (a<<4)] +/// // tempUITTE must be valid, and can't have any reserved bit set +/// IF (tempUITTE.V == 0 OR tempUITTE[7:1] != 0) +/// GP (0) +/// FI +/// tempUPID := MEM[tempUITTE.UPIDADDR] // under lock +/// // tempUPID can't have any reserved bit set +/// IF (tempUPID[15:2] != 0 OR tempUPID[31:24] != 0) +/// GP (0) // release lock +/// FI +/// tempUPID.PIR[tempUITTE.UV] := 1; +/// IF (tempUPID.SN == 0 AND tempUPID.ON == 0) +/// tempUPID.ON := 1 +/// sendNotify := 1 +/// ELSE +/// sendNotify := 0 +/// FI +/// MEM[tempUITTE.UPIDADDR] := tempUPID // release lock +/// IF sendNotify == 1 +/// IF IA32_APIC_BASE[10] == 1 // local APIC is in x2APIC mode +/// // send ordinary IPI with vector tempUPID.NV to 32-bit physical APIC +/// // ID tempUPID.NDST +/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST) +/// ELSE +/// // send ordinary IPI with vector tempUPID.NV to 8-bit physical APIC +/// // ID tempUPID.NDST[15:8] +/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8]) +/// FI +/// FI +/// \endoperation +static __inline__ void __DEFAULT_FN_ATTRS +_senduipi (unsigned long long __a) +{ + __builtin_ia32_senduipi(__a); +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __UINTRINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/unwind.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/unwind.h new file mode 100644 index 0000000..029524b --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/unwind.h @@ -0,0 +1,327 @@ +/*===---- unwind.h - Stack unwinding ----------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/ + +#ifndef __CLANG_UNWIND_H +#define __CLANG_UNWIND_H + +#if defined(__APPLE__) && __has_include_next() +/* Darwin (from 11.x on) provide an unwind.h. If that's available, + * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE, + * so define that around the include.*/ +# ifndef _GNU_SOURCE +# define _SHOULD_UNDEFINE_GNU_SOURCE +# define _GNU_SOURCE +# endif +// libunwind's unwind.h reflects the current visibility. However, Mozilla +// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the +// visibility to default and export its contents. gcc also allows users to +// override its override by #defining HIDE_EXPORTS (but note, this only obeys +// the user's -fvisibility setting; it doesn't hide any exports on its own). We +// imitate gcc's header here: +# ifdef HIDE_EXPORTS +# include_next +# else +# pragma GCC visibility push(default) +# include_next +# pragma GCC visibility pop +# endif +# ifdef _SHOULD_UNDEFINE_GNU_SOURCE +# undef _GNU_SOURCE +# undef _SHOULD_UNDEFINE_GNU_SOURCE +# endif +#else + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* It is a bit strange for a header to play with the visibility of the + symbols it declares, but this matches gcc's behavior and some programs + depend on it */ +#ifndef HIDE_EXPORTS +#pragma GCC visibility push(default) +#endif + +typedef uintptr_t _Unwind_Word __attribute__((__mode__(__unwind_word__))); +typedef intptr_t _Unwind_Sword __attribute__((__mode__(__unwind_word__))); +typedef uintptr_t _Unwind_Ptr; +typedef uintptr_t _Unwind_Internal_Ptr; +typedef uint64_t _Unwind_Exception_Class; + +typedef intptr_t _sleb128_t; +typedef uintptr_t _uleb128_t; + +struct _Unwind_Context; +#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__)) +struct _Unwind_Control_Block; +typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */ +#else +struct _Unwind_Exception; +typedef struct _Unwind_Exception _Unwind_Exception; +#endif +typedef enum { + _URC_NO_REASON = 0, +#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \ + !defined(__ARM_DWARF_EH__) + _URC_OK = 0, /* used by ARM EHABI */ +#endif + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8, +#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \ + !defined(__ARM_DWARF_EH__) + _URC_FAILURE = 9 /* used by ARM EHABI */ +#endif +} _Unwind_Reason_Code; + +typedef enum { + _UA_SEARCH_PHASE = 1, + _UA_CLEANUP_PHASE = 2, + + _UA_HANDLER_FRAME = 4, + _UA_FORCE_UNWIND = 8, + _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */ +} _Unwind_Action; + +typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code, + _Unwind_Exception *); + +#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__)) +typedef struct _Unwind_Control_Block _Unwind_Control_Block; +typedef uint32_t _Unwind_EHT_Header; + +struct _Unwind_Control_Block { + uint64_t exception_class; + void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *); + /* unwinder cache (private fields for the unwinder's use) */ + struct { + uint32_t reserved1; /* forced unwind stop function, 0 if not forced */ + uint32_t reserved2; /* personality routine */ + uint32_t reserved3; /* callsite */ + uint32_t reserved4; /* forced unwind stop argument */ + uint32_t reserved5; + } unwinder_cache; + /* propagation barrier cache (valid after phase 1) */ + struct { + uint32_t sp; + uint32_t bitpattern[5]; + } barrier_cache; + /* cleanup cache (preserved over cleanup) */ + struct { + uint32_t bitpattern[4]; + } cleanup_cache; + /* personality cache (for personality's benefit) */ + struct { + uint32_t fnstart; /* function start address */ + _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */ + uint32_t additional; /* additional data */ + uint32_t reserved1; + } pr_cache; + long long int : 0; /* force alignment of next item to 8-byte boundary */ +} __attribute__((__aligned__(8))); +#else +struct _Unwind_Exception { + _Unwind_Exception_Class exception_class; + _Unwind_Exception_Cleanup_Fn exception_cleanup; +#if !defined (__USING_SJLJ_EXCEPTIONS__) && defined (__SEH__) + _Unwind_Word private_[6]; +#else + _Unwind_Word private_1; + _Unwind_Word private_2; +#endif + /* The Itanium ABI requires that _Unwind_Exception objects are "double-word + * aligned". GCC has interpreted this to mean "use the maximum useful + * alignment for the target"; so do we. */ +} __attribute__((__aligned__)); +#endif + +typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action, + _Unwind_Exception_Class, + _Unwind_Exception *, + struct _Unwind_Context *, + void *); + +typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(int, _Unwind_Action, + _Unwind_Exception_Class, + _Unwind_Exception *, + struct _Unwind_Context *); +typedef _Unwind_Personality_Fn __personality_routine; + +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *, + void *); + +#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__)) +typedef enum { + _UVRSC_CORE = 0, /* integer register */ + _UVRSC_VFP = 1, /* vfp */ + _UVRSC_WMMXD = 3, /* Intel WMMX data register */ + _UVRSC_WMMXC = 4 /* Intel WMMX control register */ +} _Unwind_VRS_RegClass; + +typedef enum { + _UVRSD_UINT32 = 0, + _UVRSD_VFPX = 1, + _UVRSD_UINT64 = 3, + _UVRSD_FLOAT = 4, + _UVRSD_DOUBLE = 5 +} _Unwind_VRS_DataRepresentation; + +typedef enum { + _UVRSR_OK = 0, + _UVRSR_NOT_IMPLEMENTED = 1, + _UVRSR_FAILED = 2 +} _Unwind_VRS_Result; + +typedef uint32_t _Unwind_State; +#define _US_VIRTUAL_UNWIND_FRAME ((_Unwind_State)0) +#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1) +#define _US_UNWIND_FRAME_RESUME ((_Unwind_State)2) +#define _US_ACTION_MASK ((_Unwind_State)3) +#define _US_FORCE_UNWIND ((_Unwind_State)8) + +_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context, + _Unwind_VRS_RegClass __regclass, + uint32_t __regno, + _Unwind_VRS_DataRepresentation __representation, + void *__valuep); + +_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context, + _Unwind_VRS_RegClass __regclass, + uint32_t __regno, + _Unwind_VRS_DataRepresentation __representation, + void *__valuep); + +static __inline__ +_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) { + _Unwind_Word __value; + _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value); + return __value; +} + +static __inline__ +void _Unwind_SetGR(struct _Unwind_Context *__context, int __index, + _Unwind_Word __value) { + _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value); +} + +static __inline__ +_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) { + _Unwind_Word __ip = _Unwind_GetGR(__context, 15); + return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */ +} + +static __inline__ +void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) { + _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1; + _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit); +} +#else +_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int); +void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word); + +_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *); +void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word); +#endif + + +_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *); + +_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *); + +_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *); + +void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *); + +_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *); + +/* DWARF EH functions; currently not available on Darwin/ARM */ +#if !defined(__APPLE__) || !defined(__arm__) +_Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Exception *); +_Unwind_Reason_Code _Unwind_ForcedUnwind(_Unwind_Exception *, _Unwind_Stop_Fn, + void *); +void _Unwind_DeleteException(_Unwind_Exception *); +void _Unwind_Resume(_Unwind_Exception *); +_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(_Unwind_Exception *); + +#endif + +_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *); + +/* setjmp(3)/longjmp(3) stuff */ +typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t; + +void _Unwind_SjLj_Register(_Unwind_FunctionContext_t); +void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t); +_Unwind_Reason_Code _Unwind_SjLj_RaiseException(_Unwind_Exception *); +_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(_Unwind_Exception *, + _Unwind_Stop_Fn, void *); +void _Unwind_SjLj_Resume(_Unwind_Exception *); +_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(_Unwind_Exception *); + +void *_Unwind_FindEnclosingFunction(void *); + +#ifdef __APPLE__ + +_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *) + __attribute__((__unavailable__)); +_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *) + __attribute__((__unavailable__)); + +/* Darwin-specific functions */ +void __register_frame(const void *); +void __deregister_frame(const void *); + +struct dwarf_eh_bases { + uintptr_t tbase; + uintptr_t dbase; + uintptr_t func; +}; +void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *); + +void __register_frame_info_bases(const void *, void *, void *, void *) + __attribute__((__unavailable__)); +void __register_frame_info(const void *, void *) __attribute__((__unavailable__)); +void __register_frame_info_table_bases(const void *, void*, void *, void *) + __attribute__((__unavailable__)); +void __register_frame_info_table(const void *, void *) + __attribute__((__unavailable__)); +void __register_frame_table(const void *) __attribute__((__unavailable__)); +void __deregister_frame_info(const void *) __attribute__((__unavailable__)); +void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__)); + +#else + +_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *); +_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *); + +#endif + + +#ifndef HIDE_EXPORTS +#pragma GCC visibility pop +#endif + +#ifdef __cplusplus +} +#endif + +#endif + +#endif /* __CLANG_UNWIND_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vadefs.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vadefs.h new file mode 100644 index 0000000..b617568 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vadefs.h @@ -0,0 +1,51 @@ +/* ===-------- vadefs.h ---------------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we are aiming for MSVC compatibility. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __clang_vadefs_h +#define __clang_vadefs_h + +#include_next + +/* Override macros from vadefs.h with definitions that work with Clang. */ +#ifdef _crt_va_start +#undef _crt_va_start +#define _crt_va_start(ap, param) __builtin_va_start(ap, param) +#endif +#ifdef _crt_va_end +#undef _crt_va_end +#define _crt_va_end(ap) __builtin_va_end(ap) +#endif +#ifdef _crt_va_arg +#undef _crt_va_arg +#define _crt_va_arg(ap, type) __builtin_va_arg(ap, type) +#endif + +/* VS 2015 switched to double underscore names, which is an improvement, but now + * we have to intercept those names too. + */ +#ifdef __crt_va_start +#undef __crt_va_start +#define __crt_va_start(ap, param) __builtin_va_start(ap, param) +#endif +#ifdef __crt_va_end +#undef __crt_va_end +#define __crt_va_end(ap) __builtin_va_end(ap) +#endif +#ifdef __crt_va_arg +#undef __crt_va_arg +#define __crt_va_arg(ap, type) __builtin_va_arg(ap, type) +#endif + +#endif +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vaesintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vaesintrin.h new file mode 100644 index 0000000..f3c0807 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vaesintrin.h @@ -0,0 +1,85 @@ +/*===------------------ vaesintrin.h - VAES intrinsics ---------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VAESINTRIN_H +#define __VAESINTRIN_H + +/* Default attributes for YMM forms. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes"), __min_vector_width__(256))) + +/* Default attributes for ZMM forms. */ +#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512f,vaes"), __min_vector_width__(512))) + + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesenc_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesenc256((__v4di) __A, + (__v4di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesdec_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesdec256((__v4di) __A, + (__v4di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesenclast_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A, + (__v4di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesdeclast_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A, + (__v4di) __B); +} + +#ifdef __AVX512FINTRIN_H +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesenc_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesenc512((__v8di) __A, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesdec_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesdec512((__v8di) __A, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesenclast_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesdeclast_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesdeclast512((__v8di) __A, + (__v8di) __B); +} +#endif // __AVX512FINTRIN_H + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_F + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/varargs.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/varargs.h new file mode 100644 index 0000000..d241b7d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/varargs.h @@ -0,0 +1,12 @@ +/*===---- varargs.h - Variable argument handling -------------------------------------=== +* +* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +* See https://llvm.org/LICENSE.txt for license information. +* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +* +*===-----------------------------------------------------------------------=== +*/ +#ifndef __VARARGS_H +#define __VARARGS_H + #error "Please use instead of " +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vecintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vecintrin.h new file mode 100644 index 0000000..ec1dbfd --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vecintrin.h @@ -0,0 +1,11127 @@ +/*===---- vecintrin.h - Vector intrinsics ----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if defined(__s390x__) && defined(__VEC__) + +#define __ATTRS_ai __attribute__((__always_inline__)) +#define __ATTRS_o __attribute__((__overloadable__)) +#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) + +#define __constant(PARM) \ + __attribute__((__enable_if__ ((PARM) == (PARM), \ + "argument must be a constant integer"))) +#define __constant_range(PARM, LOW, HIGH) \ + __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \ + "argument must be a constant integer from " #LOW " to " #HIGH))) +#define __constant_pow2_range(PARM, LOW, HIGH) \ + __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \ + ((PARM) & ((PARM) - 1)) == 0, \ + "argument must be a constant power of 2 from " #LOW " to " #HIGH))) + +/*-- __lcbb -----------------------------------------------------------------*/ + +extern __ATTRS_o unsigned int +__lcbb(const void *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \ + __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \ + ((Y) == 64 ? 0 : \ + (Y) == 128 ? 1 : \ + (Y) == 256 ? 2 : \ + (Y) == 512 ? 3 : \ + (Y) == 1024 ? 4 : \ + (Y) == 2048 ? 5 : \ + (Y) == 4096 ? 6 : 0) : 0)) + +/*-- vec_extract ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai signed char +vec_extract(__vector signed char __vec, int __index) { + return __vec[__index & 15]; +} + +static inline __ATTRS_o_ai unsigned char +vec_extract(__vector __bool char __vec, int __index) { + return __vec[__index & 15]; +} + +static inline __ATTRS_o_ai unsigned char +vec_extract(__vector unsigned char __vec, int __index) { + return __vec[__index & 15]; +} + +static inline __ATTRS_o_ai signed short +vec_extract(__vector signed short __vec, int __index) { + return __vec[__index & 7]; +} + +static inline __ATTRS_o_ai unsigned short +vec_extract(__vector __bool short __vec, int __index) { + return __vec[__index & 7]; +} + +static inline __ATTRS_o_ai unsigned short +vec_extract(__vector unsigned short __vec, int __index) { + return __vec[__index & 7]; +} + +static inline __ATTRS_o_ai signed int +vec_extract(__vector signed int __vec, int __index) { + return __vec[__index & 3]; +} + +static inline __ATTRS_o_ai unsigned int +vec_extract(__vector __bool int __vec, int __index) { + return __vec[__index & 3]; +} + +static inline __ATTRS_o_ai unsigned int +vec_extract(__vector unsigned int __vec, int __index) { + return __vec[__index & 3]; +} + +static inline __ATTRS_o_ai signed long long +vec_extract(__vector signed long long __vec, int __index) { + return __vec[__index & 1]; +} + +static inline __ATTRS_o_ai unsigned long long +vec_extract(__vector __bool long long __vec, int __index) { + return __vec[__index & 1]; +} + +static inline __ATTRS_o_ai unsigned long long +vec_extract(__vector unsigned long long __vec, int __index) { + return __vec[__index & 1]; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai float +vec_extract(__vector float __vec, int __index) { + return __vec[__index & 3]; +} +#endif + +static inline __ATTRS_o_ai double +vec_extract(__vector double __vec, int __index) { + return __vec[__index & 1]; +} + +/*-- vec_insert -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_insert(signed char __scalar, __vector signed char __vec, int __index) { + __vec[__index & 15] = __scalar; + return __vec; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_insert(unsigned char __scalar, __vector __bool char __vec, int __index) { + __vector unsigned char __newvec = (__vector unsigned char)__vec; + __newvec[__index & 15] = (unsigned char)__scalar; + return __newvec; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_insert(unsigned char __scalar, __vector unsigned char __vec, int __index) { + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed short +vec_insert(signed short __scalar, __vector signed short __vec, int __index) { + __vec[__index & 7] = __scalar; + return __vec; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_insert(unsigned short __scalar, __vector __bool short __vec, + int __index) { + __vector unsigned short __newvec = (__vector unsigned short)__vec; + __newvec[__index & 7] = (unsigned short)__scalar; + return __newvec; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_insert(unsigned short __scalar, __vector unsigned short __vec, + int __index) { + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed int +vec_insert(signed int __scalar, __vector signed int __vec, int __index) { + __vec[__index & 3] = __scalar; + return __vec; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_insert(unsigned int __scalar, __vector __bool int __vec, int __index) { + __vector unsigned int __newvec = (__vector unsigned int)__vec; + __newvec[__index & 3] = __scalar; + return __newvec; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_insert(unsigned int __scalar, __vector unsigned int __vec, int __index) { + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_insert(signed long long __scalar, __vector signed long long __vec, + int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_insert(unsigned long long __scalar, __vector __bool long long __vec, + int __index) { + __vector unsigned long long __newvec = (__vector unsigned long long)__vec; + __newvec[__index & 1] = __scalar; + return __newvec; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_insert(unsigned long long __scalar, __vector unsigned long long __vec, + int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_insert(float __scalar, __vector float __vec, int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_insert(double __scalar, __vector double __vec, int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} + +/*-- vec_promote ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_promote(signed char __scalar, int __index) { + const __vector signed char __zero = (__vector signed char)0; + __vector signed char __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_promote(unsigned char __scalar, int __index) { + const __vector unsigned char __zero = (__vector unsigned char)0; + __vector unsigned char __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed short +vec_promote(signed short __scalar, int __index) { + const __vector signed short __zero = (__vector signed short)0; + __vector signed short __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_promote(unsigned short __scalar, int __index) { + const __vector unsigned short __zero = (__vector unsigned short)0; + __vector unsigned short __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed int +vec_promote(signed int __scalar, int __index) { + const __vector signed int __zero = (__vector signed int)0; + __vector signed int __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1); + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_promote(unsigned int __scalar, int __index) { + const __vector unsigned int __zero = (__vector unsigned int)0; + __vector unsigned int __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1); + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_promote(signed long long __scalar, int __index) { + const __vector signed long long __zero = (__vector signed long long)0; + __vector signed long long __vec = __builtin_shufflevector(__zero, __zero, + -1, -1); + __vec[__index & 1] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_promote(unsigned long long __scalar, int __index) { + const __vector unsigned long long __zero = (__vector unsigned long long)0; + __vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero, + -1, -1); + __vec[__index & 1] = __scalar; + return __vec; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_promote(float __scalar, int __index) { + const __vector float __zero = (__vector float)0.0f; + __vector float __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1); + __vec[__index & 3] = __scalar; + return __vec; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_promote(double __scalar, int __index) { + const __vector double __zero = (__vector double)0.0; + __vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1); + __vec[__index & 1] = __scalar; + return __vec; +} + +/*-- vec_insert_and_zero ----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_insert_and_zero(const signed char *__ptr) { + __vector signed char __vec = (__vector signed char)0; + __vec[7] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_insert_and_zero(const unsigned char *__ptr) { + __vector unsigned char __vec = (__vector unsigned char)0; + __vec[7] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed short +vec_insert_and_zero(const signed short *__ptr) { + __vector signed short __vec = (__vector signed short)0; + __vec[3] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_insert_and_zero(const unsigned short *__ptr) { + __vector unsigned short __vec = (__vector unsigned short)0; + __vec[3] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed int +vec_insert_and_zero(const signed int *__ptr) { + __vector signed int __vec = (__vector signed int)0; + __vec[1] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_insert_and_zero(const unsigned int *__ptr) { + __vector unsigned int __vec = (__vector unsigned int)0; + __vec[1] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_insert_and_zero(const signed long long *__ptr) { + __vector signed long long __vec = (__vector signed long long)0; + __vec[0] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_insert_and_zero(const unsigned long long *__ptr) { + __vector unsigned long long __vec = (__vector unsigned long long)0; + __vec[0] = *__ptr; + return __vec; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_insert_and_zero(const float *__ptr) { + __vector float __vec = (__vector float)0.0f; + __vec[1] = *__ptr; + return __vec; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_insert_and_zero(const double *__ptr) { + __vector double __vec = (__vector double)0.0; + __vec[0] = *__ptr; + return __vec; +} + +/*-- vec_perm ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_perm(__vector signed char __a, __vector signed char __b, + __vector unsigned char __c) { + return (__vector signed char)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_perm(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return (__vector unsigned char)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_perm(__vector __bool char __a, __vector __bool char __b, + __vector unsigned char __c) { + return (__vector __bool char)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector signed short +vec_perm(__vector signed short __a, __vector signed short __b, + __vector unsigned char __c) { + return (__vector signed short)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_perm(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned char __c) { + return (__vector unsigned short)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_perm(__vector __bool short __a, __vector __bool short __b, + __vector unsigned char __c) { + return (__vector __bool short)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector signed int +vec_perm(__vector signed int __a, __vector signed int __b, + __vector unsigned char __c) { + return (__vector signed int)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_perm(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned char __c) { + return (__vector unsigned int)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_perm(__vector __bool int __a, __vector __bool int __b, + __vector unsigned char __c) { + return (__vector __bool int)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_perm(__vector signed long long __a, __vector signed long long __b, + __vector unsigned char __c) { + return (__vector signed long long)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_perm(__vector unsigned long long __a, __vector unsigned long long __b, + __vector unsigned char __c) { + return (__vector unsigned long long)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_perm(__vector __bool long long __a, __vector __bool long long __b, + __vector unsigned char __c) { + return (__vector __bool long long)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_perm(__vector float __a, __vector float __b, + __vector unsigned char __c) { + return (__vector float)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_perm(__vector double __a, __vector double __b, + __vector unsigned char __c) { + return (__vector double)__builtin_s390_vperm( + (__vector unsigned char)__a, (__vector unsigned char)__b, __c); +} + +/*-- vec_permi --------------------------------------------------------------*/ + +// This prototype is deprecated. +extern __ATTRS_o __vector signed long long +vec_permi(__vector signed long long __a, __vector signed long long __b, + int __c) + __constant_range(__c, 0, 3); + +// This prototype is deprecated. +extern __ATTRS_o __vector unsigned long long +vec_permi(__vector unsigned long long __a, __vector unsigned long long __b, + int __c) + __constant_range(__c, 0, 3); + +// This prototype is deprecated. +extern __ATTRS_o __vector __bool long long +vec_permi(__vector __bool long long __a, __vector __bool long long __b, + int __c) + __constant_range(__c, 0, 3); + +// This prototype is deprecated. +extern __ATTRS_o __vector double +vec_permi(__vector double __a, __vector double __b, int __c) + __constant_range(__c, 0, 3); + +#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \ + __builtin_s390_vpdi((__vector unsigned long long)(X), \ + (__vector unsigned long long)(Y), \ + (((Z) & 2) << 1) | ((Z) & 1))) + +/*-- vec_bperm_u128 ---------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_ai __vector unsigned long long +vec_bperm_u128(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vbperm(__a, __b); +} +#endif + +/*-- vec_revb ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_revb(__vector signed short __vec) { + return (__vector signed short) + __builtin_s390_vlbrh((__vector unsigned short)__vec); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_revb(__vector unsigned short __vec) { + return __builtin_s390_vlbrh(__vec); +} + +static inline __ATTRS_o_ai __vector signed int +vec_revb(__vector signed int __vec) { + return (__vector signed int) + __builtin_s390_vlbrf((__vector unsigned int)__vec); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_revb(__vector unsigned int __vec) { + return __builtin_s390_vlbrf(__vec); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_revb(__vector signed long long __vec) { + return (__vector signed long long) + __builtin_s390_vlbrg((__vector unsigned long long)__vec); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_revb(__vector unsigned long long __vec) { + return __builtin_s390_vlbrg(__vec); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_revb(__vector float __vec) { + return (__vector float) + __builtin_s390_vlbrf((__vector unsigned int)__vec); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_revb(__vector double __vec) { + return (__vector double) + __builtin_s390_vlbrg((__vector unsigned long long)__vec); +} + +/*-- vec_reve ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_reve(__vector signed char __vec) { + return (__vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12], + __vec[11], __vec[10], __vec[9], __vec[8], + __vec[7], __vec[6], __vec[5], __vec[4], + __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_reve(__vector unsigned char __vec) { + return (__vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12], + __vec[11], __vec[10], __vec[9], __vec[8], + __vec[7], __vec[6], __vec[5], __vec[4], + __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector __bool char +vec_reve(__vector __bool char __vec) { + return (__vector __bool char) { __vec[15], __vec[14], __vec[13], __vec[12], + __vec[11], __vec[10], __vec[9], __vec[8], + __vec[7], __vec[6], __vec[5], __vec[4], + __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector signed short +vec_reve(__vector signed short __vec) { + return (__vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4], + __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_reve(__vector unsigned short __vec) { + return (__vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4], + __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector __bool short +vec_reve(__vector __bool short __vec) { + return (__vector __bool short) { __vec[7], __vec[6], __vec[5], __vec[4], + __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector signed int +vec_reve(__vector signed int __vec) { + return (__vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_reve(__vector unsigned int __vec) { + return (__vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector __bool int +vec_reve(__vector __bool int __vec) { + return (__vector __bool int) { __vec[3], __vec[2], __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_reve(__vector signed long long __vec) { + return (__vector signed long long) { __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_reve(__vector unsigned long long __vec) { + return (__vector unsigned long long) { __vec[1], __vec[0] }; +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_reve(__vector __bool long long __vec) { + return (__vector __bool long long) { __vec[1], __vec[0] }; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_reve(__vector float __vec) { + return (__vector float) { __vec[3], __vec[2], __vec[1], __vec[0] }; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_reve(__vector double __vec) { + return (__vector double) { __vec[1], __vec[0] }; +} + +/*-- vec_sel ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_sel(__vector signed char __a, __vector signed char __b, + __vector unsigned char __c) { + return (((__vector signed char)__c & __b) | + (~(__vector signed char)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed char +vec_sel(__vector signed char __a, __vector signed char __b, + __vector __bool char __c) { + return (((__vector signed char)__c & __b) | + (~(__vector signed char)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_sel(__vector __bool char __a, __vector __bool char __b, + __vector unsigned char __c) { + return (((__vector __bool char)__c & __b) | + (~(__vector __bool char)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_sel(__vector __bool char __a, __vector __bool char __b, + __vector __bool char __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_sel(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_sel(__vector unsigned char __a, __vector unsigned char __b, + __vector __bool char __c) { + return (((__vector unsigned char)__c & __b) | + (~(__vector unsigned char)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed short +vec_sel(__vector signed short __a, __vector signed short __b, + __vector unsigned short __c) { + return (((__vector signed short)__c & __b) | + (~(__vector signed short)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed short +vec_sel(__vector signed short __a, __vector signed short __b, + __vector __bool short __c) { + return (((__vector signed short)__c & __b) | + (~(__vector signed short)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_sel(__vector __bool short __a, __vector __bool short __b, + __vector unsigned short __c) { + return (((__vector __bool short)__c & __b) | + (~(__vector __bool short)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_sel(__vector __bool short __a, __vector __bool short __b, + __vector __bool short __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_sel(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_sel(__vector unsigned short __a, __vector unsigned short __b, + __vector __bool short __c) { + return (((__vector unsigned short)__c & __b) | + (~(__vector unsigned short)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed int +vec_sel(__vector signed int __a, __vector signed int __b, + __vector unsigned int __c) { + return (((__vector signed int)__c & __b) | + (~(__vector signed int)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed int +vec_sel(__vector signed int __a, __vector signed int __b, + __vector __bool int __c) { + return (((__vector signed int)__c & __b) | + (~(__vector signed int)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_sel(__vector __bool int __a, __vector __bool int __b, + __vector unsigned int __c) { + return (((__vector __bool int)__c & __b) | + (~(__vector __bool int)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_sel(__vector __bool int __a, __vector __bool int __b, + __vector __bool int __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_sel(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_sel(__vector unsigned int __a, __vector unsigned int __b, + __vector __bool int __c) { + return (((__vector unsigned int)__c & __b) | + (~(__vector unsigned int)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_sel(__vector signed long long __a, __vector signed long long __b, + __vector unsigned long long __c) { + return (((__vector signed long long)__c & __b) | + (~(__vector signed long long)__c & __a)); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_sel(__vector signed long long __a, __vector signed long long __b, + __vector __bool long long __c) { + return (((__vector signed long long)__c & __b) | + (~(__vector signed long long)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_sel(__vector __bool long long __a, __vector __bool long long __b, + __vector unsigned long long __c) { + return (((__vector __bool long long)__c & __b) | + (~(__vector __bool long long)__c & __a)); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_sel(__vector __bool long long __a, __vector __bool long long __b, + __vector __bool long long __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_sel(__vector unsigned long long __a, __vector unsigned long long __b, + __vector unsigned long long __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_sel(__vector unsigned long long __a, __vector unsigned long long __b, + __vector __bool long long __c) { + return (((__vector unsigned long long)__c & __b) | + (~(__vector unsigned long long)__c & __a)); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_sel(__vector float __a, __vector float __b, __vector unsigned int __c) { + return (__vector float)((__c & (__vector unsigned int)__b) | + (~__c & (__vector unsigned int)__a)); +} + +static inline __ATTRS_o_ai __vector float +vec_sel(__vector float __a, __vector float __b, __vector __bool int __c) { + __vector unsigned int __ac = (__vector unsigned int)__a; + __vector unsigned int __bc = (__vector unsigned int)__b; + __vector unsigned int __cc = (__vector unsigned int)__c; + return (__vector float)((__cc & __bc) | (~__cc & __ac)); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_sel(__vector double __a, __vector double __b, + __vector unsigned long long __c) { + return (__vector double)((__c & (__vector unsigned long long)__b) | + (~__c & (__vector unsigned long long)__a)); +} + +static inline __ATTRS_o_ai __vector double +vec_sel(__vector double __a, __vector double __b, + __vector __bool long long __c) { + __vector unsigned long long __ac = (__vector unsigned long long)__a; + __vector unsigned long long __bc = (__vector unsigned long long)__b; + __vector unsigned long long __cc = (__vector unsigned long long)__c; + return (__vector double)((__cc & __bc) | (~__cc & __ac)); +} + +/*-- vec_gather_element -----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed int +vec_gather_element(__vector signed int __vec, + __vector unsigned int __offset, + const signed int *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const signed int *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai __vector __bool int +vec_gather_element(__vector __bool int __vec, + __vector unsigned int __offset, + const unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const unsigned int *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_gather_element(__vector unsigned int __vec, + __vector unsigned int __offset, + const unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const unsigned int *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_gather_element(__vector signed long long __vec, + __vector unsigned long long __offset, + const signed long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const signed long long *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_gather_element(__vector __bool long long __vec, + __vector unsigned long long __offset, + const unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const unsigned long long *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_gather_element(__vector unsigned long long __vec, + __vector unsigned long long __offset, + const unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const unsigned long long *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_gather_element(__vector float __vec, + __vector unsigned int __offset, + const float *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const float *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_gather_element(__vector double __vec, + __vector unsigned long long __offset, + const double *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const double *)( + (const char *)__ptr + __offset[__index]); + return __vec; +} + +/*-- vec_scatter_element ----------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector signed int __vec, + __vector unsigned int __offset, + signed int *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(signed int *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector __bool int __vec, + __vector unsigned int __offset, + unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(unsigned int *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector unsigned int __vec, + __vector unsigned int __offset, + unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(unsigned int *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector signed long long __vec, + __vector unsigned long long __offset, + signed long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(signed long long *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector __bool long long __vec, + __vector unsigned long long __offset, + unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(unsigned long long *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector unsigned long long __vec, + __vector unsigned long long __offset, + unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(unsigned long long *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai void +vec_scatter_element(__vector float __vec, + __vector unsigned int __offset, + float *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(float *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} +#endif + +static inline __ATTRS_o_ai void +vec_scatter_element(__vector double __vec, + __vector unsigned long long __offset, + double *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(double *)((char *)__ptr + __offset[__index]) = + __vec[__index]; +} + +/*-- vec_xl -----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_xl(long __offset, const signed char *__ptr) { + __vector signed char V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed char)); + return V; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_xl(long __offset, const unsigned char *__ptr) { + __vector unsigned char V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned char)); + return V; +} + +static inline __ATTRS_o_ai __vector signed short +vec_xl(long __offset, const signed short *__ptr) { + __vector signed short V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed short)); + return V; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_xl(long __offset, const unsigned short *__ptr) { + __vector unsigned short V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned short)); + return V; +} + +static inline __ATTRS_o_ai __vector signed int +vec_xl(long __offset, const signed int *__ptr) { + __vector signed int V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed int)); + return V; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_xl(long __offset, const unsigned int *__ptr) { + __vector unsigned int V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned int)); + return V; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_xl(long __offset, const signed long long *__ptr) { + __vector signed long long V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed long long)); + return V; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_xl(long __offset, const unsigned long long *__ptr) { + __vector unsigned long long V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned long long)); + return V; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_xl(long __offset, const float *__ptr) { + __vector float V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector float)); + return V; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_xl(long __offset, const double *__ptr) { + __vector double V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector double)); + return V; +} + +/*-- vec_xld2 ---------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_xld2(long __offset, const signed char *__ptr) { + __vector signed char V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed char)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_xld2(long __offset, const unsigned char *__ptr) { + __vector unsigned char V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned char)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_xld2(long __offset, const signed short *__ptr) { + __vector signed short V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed short)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_xld2(long __offset, const unsigned short *__ptr) { + __vector unsigned short V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned short)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_xld2(long __offset, const signed int *__ptr) { + __vector signed int V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed int)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_xld2(long __offset, const unsigned int *__ptr) { + __vector unsigned int V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned int)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_xld2(long __offset, const signed long long *__ptr) { + __vector signed long long V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed long long)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_xld2(long __offset, const unsigned long long *__ptr) { + __vector unsigned long long V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned long long)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_xld2(long __offset, const double *__ptr) { + __vector double V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector double)); + return V; +} + +/*-- vec_xlw4 ---------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_xlw4(long __offset, const signed char *__ptr) { + __vector signed char V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed char)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_xlw4(long __offset, const unsigned char *__ptr) { + __vector unsigned char V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned char)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_xlw4(long __offset, const signed short *__ptr) { + __vector signed short V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed short)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_xlw4(long __offset, const unsigned short *__ptr) { + __vector unsigned short V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned short)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_xlw4(long __offset, const signed int *__ptr) { + __vector signed int V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector signed int)); + return V; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_xlw4(long __offset, const unsigned int *__ptr) { + __vector unsigned int V; + __builtin_memcpy(&V, ((const char *)__ptr + __offset), + sizeof(__vector unsigned int)); + return V; +} + +/*-- vec_xst ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_xst(__vector signed char __vec, long __offset, signed char *__ptr) { + __vector signed char V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed char)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector unsigned char __vec, long __offset, unsigned char *__ptr) { + __vector unsigned char V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned char)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector signed short __vec, long __offset, signed short *__ptr) { + __vector signed short V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed short)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector unsigned short __vec, long __offset, unsigned short *__ptr) { + __vector unsigned short V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned short)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector signed int __vec, long __offset, signed int *__ptr) { + __vector signed int V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector signed int)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector unsigned int __vec, long __offset, unsigned int *__ptr) { + __vector unsigned int V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned int)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector signed long long __vec, long __offset, + signed long long *__ptr) { + __vector signed long long V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed long long)); +} + +static inline __ATTRS_o_ai void +vec_xst(__vector unsigned long long __vec, long __offset, + unsigned long long *__ptr) { + __vector unsigned long long V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned long long)); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai void +vec_xst(__vector float __vec, long __offset, float *__ptr) { + __vector float V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector float)); +} +#endif + +static inline __ATTRS_o_ai void +vec_xst(__vector double __vec, long __offset, double *__ptr) { + __vector double V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector double)); +} + +/*-- vec_xstd2 --------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector signed char __vec, long __offset, signed char *__ptr) { + __vector signed char V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed char)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector unsigned char __vec, long __offset, unsigned char *__ptr) { + __vector unsigned char V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned char)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector signed short __vec, long __offset, signed short *__ptr) { + __vector signed short V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed short)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector unsigned short __vec, long __offset, unsigned short *__ptr) { + __vector unsigned short V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned short)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector signed int __vec, long __offset, signed int *__ptr) { + __vector signed int V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector signed int)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector unsigned int __vec, long __offset, unsigned int *__ptr) { + __vector unsigned int V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned int)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector signed long long __vec, long __offset, + signed long long *__ptr) { + __vector signed long long V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed long long)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector unsigned long long __vec, long __offset, + unsigned long long *__ptr) { + __vector unsigned long long V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned long long)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstd2(__vector double __vec, long __offset, double *__ptr) { + __vector double V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector double)); +} + +/*-- vec_xstw4 --------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstw4(__vector signed char __vec, long __offset, signed char *__ptr) { + __vector signed char V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed char)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstw4(__vector unsigned char __vec, long __offset, unsigned char *__ptr) { + __vector unsigned char V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned char)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstw4(__vector signed short __vec, long __offset, signed short *__ptr) { + __vector signed short V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector signed short)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstw4(__vector unsigned short __vec, long __offset, unsigned short *__ptr) { + __vector unsigned short V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned short)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstw4(__vector signed int __vec, long __offset, signed int *__ptr) { + __vector signed int V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector signed int)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai void +vec_xstw4(__vector unsigned int __vec, long __offset, unsigned int *__ptr) { + __vector unsigned int V = __vec; + __builtin_memcpy(((char *)__ptr + __offset), &V, + sizeof(__vector unsigned int)); +} + +/*-- vec_load_bndry ---------------------------------------------------------*/ + +extern __ATTRS_o __vector signed char +vec_load_bndry(const signed char *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector unsigned char +vec_load_bndry(const unsigned char *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector signed short +vec_load_bndry(const signed short *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector unsigned short +vec_load_bndry(const unsigned short *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector signed int +vec_load_bndry(const signed int *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector unsigned int +vec_load_bndry(const unsigned int *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector signed long long +vec_load_bndry(const signed long long *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o __vector unsigned long long +vec_load_bndry(const unsigned long long *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +#if __ARCH__ >= 12 +extern __ATTRS_o __vector float +vec_load_bndry(const float *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); +#endif + +extern __ATTRS_o __vector double +vec_load_bndry(const double *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \ + __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \ + (Y) == 128 ? 1 : \ + (Y) == 256 ? 2 : \ + (Y) == 512 ? 3 : \ + (Y) == 1024 ? 4 : \ + (Y) == 2048 ? 5 : \ + (Y) == 4096 ? 6 : -1))) + +/*-- vec_load_len -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_load_len(const signed char *__ptr, unsigned int __len) { + return (__vector signed char)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_load_len(const unsigned char *__ptr, unsigned int __len) { + return (__vector unsigned char)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector signed short +vec_load_len(const signed short *__ptr, unsigned int __len) { + return (__vector signed short)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_load_len(const unsigned short *__ptr, unsigned int __len) { + return (__vector unsigned short)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector signed int +vec_load_len(const signed int *__ptr, unsigned int __len) { + return (__vector signed int)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_load_len(const unsigned int *__ptr, unsigned int __len) { + return (__vector unsigned int)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_load_len(const signed long long *__ptr, unsigned int __len) { + return (__vector signed long long)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_load_len(const unsigned long long *__ptr, unsigned int __len) { + return (__vector unsigned long long)__builtin_s390_vll(__len, __ptr); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_load_len(const float *__ptr, unsigned int __len) { + return (__vector float)__builtin_s390_vll(__len, __ptr); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_load_len(const double *__ptr, unsigned int __len) { + return (__vector double)__builtin_s390_vll(__len, __ptr); +} + +/*-- vec_load_len_r ---------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_ai __vector unsigned char +vec_load_len_r(const unsigned char *__ptr, unsigned int __len) { + return (__vector unsigned char)__builtin_s390_vlrl(__len, __ptr); +} +#endif + +/*-- vec_store_len ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_store_len(__vector signed char __vec, signed char *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector unsigned char __vec, unsigned char *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector signed short __vec, signed short *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector unsigned short __vec, unsigned short *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector signed int __vec, signed int *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector unsigned int __vec, unsigned int *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector signed long long __vec, signed long long *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(__vector unsigned long long __vec, unsigned long long *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai void +vec_store_len(__vector float __vec, float *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} +#endif + +static inline __ATTRS_o_ai void +vec_store_len(__vector double __vec, double *__ptr, + unsigned int __len) { + __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr); +} + +/*-- vec_store_len_r --------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_ai void +vec_store_len_r(__vector unsigned char __vec, unsigned char *__ptr, + unsigned int __len) { + __builtin_s390_vstrl((__vector signed char)__vec, __len, __ptr); +} +#endif + +/*-- vec_load_pair ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed long long +vec_load_pair(signed long long __a, signed long long __b) { + return (__vector signed long long)(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_load_pair(unsigned long long __a, unsigned long long __b) { + return (__vector unsigned long long)(__a, __b); +} + +/*-- vec_genmask ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_genmask(unsigned short __mask) + __constant(__mask) { + return (__vector unsigned char)( + __mask & 0x8000 ? 0xff : 0, + __mask & 0x4000 ? 0xff : 0, + __mask & 0x2000 ? 0xff : 0, + __mask & 0x1000 ? 0xff : 0, + __mask & 0x0800 ? 0xff : 0, + __mask & 0x0400 ? 0xff : 0, + __mask & 0x0200 ? 0xff : 0, + __mask & 0x0100 ? 0xff : 0, + __mask & 0x0080 ? 0xff : 0, + __mask & 0x0040 ? 0xff : 0, + __mask & 0x0020 ? 0xff : 0, + __mask & 0x0010 ? 0xff : 0, + __mask & 0x0008 ? 0xff : 0, + __mask & 0x0004 ? 0xff : 0, + __mask & 0x0002 ? 0xff : 0, + __mask & 0x0001 ? 0xff : 0); +} + +/*-- vec_genmasks_* ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_genmasks_8(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 7; + unsigned char __bit2 = __last & 7; + unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1; + unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1; + unsigned char __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (__vector unsigned char)__value; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_genmasks_16(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 15; + unsigned char __bit2 = __last & 15; + unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1; + unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1; + unsigned short __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (__vector unsigned short)__value; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_genmasks_32(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 31; + unsigned char __bit2 = __last & 31; + unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1; + unsigned int __mask2 = (1U << (31 - __bit2)) - 1; + unsigned int __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (__vector unsigned int)__value; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_genmasks_64(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 63; + unsigned char __bit2 = __last & 63; + unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1; + unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1; + unsigned long long __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (__vector unsigned long long)__value; +} + +/*-- vec_splat --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_splat(__vector signed char __vec, int __index) + __constant_range(__index, 0, 15) { + return (__vector signed char)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector __bool char +vec_splat(__vector __bool char __vec, int __index) + __constant_range(__index, 0, 15) { + return (__vector __bool char)(__vector unsigned char)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_splat(__vector unsigned char __vec, int __index) + __constant_range(__index, 0, 15) { + return (__vector unsigned char)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector signed short +vec_splat(__vector signed short __vec, int __index) + __constant_range(__index, 0, 7) { + return (__vector signed short)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector __bool short +vec_splat(__vector __bool short __vec, int __index) + __constant_range(__index, 0, 7) { + return (__vector __bool short)(__vector unsigned short)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_splat(__vector unsigned short __vec, int __index) + __constant_range(__index, 0, 7) { + return (__vector unsigned short)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector signed int +vec_splat(__vector signed int __vec, int __index) + __constant_range(__index, 0, 3) { + return (__vector signed int)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector __bool int +vec_splat(__vector __bool int __vec, int __index) + __constant_range(__index, 0, 3) { + return (__vector __bool int)(__vector unsigned int)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_splat(__vector unsigned int __vec, int __index) + __constant_range(__index, 0, 3) { + return (__vector unsigned int)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_splat(__vector signed long long __vec, int __index) + __constant_range(__index, 0, 1) { + return (__vector signed long long)__vec[__index]; +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_splat(__vector __bool long long __vec, int __index) + __constant_range(__index, 0, 1) { + return ((__vector __bool long long) + (__vector unsigned long long)__vec[__index]); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_splat(__vector unsigned long long __vec, int __index) + __constant_range(__index, 0, 1) { + return (__vector unsigned long long)__vec[__index]; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_splat(__vector float __vec, int __index) + __constant_range(__index, 0, 3) { + return (__vector float)__vec[__index]; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_splat(__vector double __vec, int __index) + __constant_range(__index, 0, 1) { + return (__vector double)__vec[__index]; +} + +/*-- vec_splat_s* -----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector signed char +vec_splat_s8(signed char __scalar) + __constant(__scalar) { + return (__vector signed char)__scalar; +} + +static inline __ATTRS_ai __vector signed short +vec_splat_s16(signed short __scalar) + __constant(__scalar) { + return (__vector signed short)__scalar; +} + +static inline __ATTRS_ai __vector signed int +vec_splat_s32(signed short __scalar) + __constant(__scalar) { + return (__vector signed int)(signed int)__scalar; +} + +static inline __ATTRS_ai __vector signed long long +vec_splat_s64(signed short __scalar) + __constant(__scalar) { + return (__vector signed long long)(signed long)__scalar; +} + +/*-- vec_splat_u* -----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_splat_u8(unsigned char __scalar) + __constant(__scalar) { + return (__vector unsigned char)__scalar; +} + +static inline __ATTRS_ai __vector unsigned short +vec_splat_u16(unsigned short __scalar) + __constant(__scalar) { + return (__vector unsigned short)__scalar; +} + +static inline __ATTRS_ai __vector unsigned int +vec_splat_u32(signed short __scalar) + __constant(__scalar) { + return (__vector unsigned int)(signed int)__scalar; +} + +static inline __ATTRS_ai __vector unsigned long long +vec_splat_u64(signed short __scalar) + __constant(__scalar) { + return (__vector unsigned long long)(signed long long)__scalar; +} + +/*-- vec_splats -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_splats(signed char __scalar) { + return (__vector signed char)__scalar; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_splats(unsigned char __scalar) { + return (__vector unsigned char)__scalar; +} + +static inline __ATTRS_o_ai __vector signed short +vec_splats(signed short __scalar) { + return (__vector signed short)__scalar; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_splats(unsigned short __scalar) { + return (__vector unsigned short)__scalar; +} + +static inline __ATTRS_o_ai __vector signed int +vec_splats(signed int __scalar) { + return (__vector signed int)__scalar; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_splats(unsigned int __scalar) { + return (__vector unsigned int)__scalar; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_splats(signed long long __scalar) { + return (__vector signed long long)__scalar; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_splats(unsigned long long __scalar) { + return (__vector unsigned long long)__scalar; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_splats(float __scalar) { + return (__vector float)__scalar; +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_splats(double __scalar) { + return (__vector double)__scalar; +} + +/*-- vec_extend_s64 ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed long long +vec_extend_s64(__vector signed char __a) { + return (__vector signed long long)(__a[7], __a[15]); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_extend_s64(__vector signed short __a) { + return (__vector signed long long)(__a[3], __a[7]); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_extend_s64(__vector signed int __a) { + return (__vector signed long long)(__a[1], __a[3]); +} + +/*-- vec_mergeh -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_mergeh(__vector signed char __a, __vector signed char __b) { + return (__vector signed char)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3], + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_mergeh(__vector __bool char __a, __vector __bool char __b) { + return (__vector __bool char)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3], + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_mergeh(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector unsigned char)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3], + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai __vector signed short +vec_mergeh(__vector signed short __a, __vector signed short __b) { + return (__vector signed short)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_mergeh(__vector __bool short __a, __vector __bool short __b) { + return (__vector __bool short)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mergeh(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai __vector signed int +vec_mergeh(__vector signed int __a, __vector signed int __b) { + return (__vector signed int)(__a[0], __b[0], __a[1], __b[1]); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_mergeh(__vector __bool int __a, __vector __bool int __b) { + return (__vector __bool int)(__a[0], __b[0], __a[1], __b[1]); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mergeh(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)(__a[0], __b[0], __a[1], __b[1]); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_mergeh(__vector signed long long __a, __vector signed long long __b) { + return (__vector signed long long)(__a[0], __b[0]); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_mergeh(__vector __bool long long __a, __vector __bool long long __b) { + return (__vector __bool long long)(__a[0], __b[0]); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_mergeh(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector unsigned long long)(__a[0], __b[0]); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_mergeh(__vector float __a, __vector float __b) { + return (__vector float)(__a[0], __b[0], __a[1], __b[1]); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_mergeh(__vector double __a, __vector double __b) { + return (__vector double)(__a[0], __b[0]); +} + +/*-- vec_mergel -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_mergel(__vector signed char __a, __vector signed char __b) { + return (__vector signed char)( + __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11], + __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_mergel(__vector __bool char __a, __vector __bool char __b) { + return (__vector __bool char)( + __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11], + __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_mergel(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector unsigned char)( + __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11], + __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]); +} + +static inline __ATTRS_o_ai __vector signed short +vec_mergel(__vector signed short __a, __vector signed short __b) { + return (__vector signed short)( + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_mergel(__vector __bool short __a, __vector __bool short __b) { + return (__vector __bool short)( + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mergel(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)( + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai __vector signed int +vec_mergel(__vector signed int __a, __vector signed int __b) { + return (__vector signed int)(__a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_mergel(__vector __bool int __a, __vector __bool int __b) { + return (__vector __bool int)(__a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mergel(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)(__a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_mergel(__vector signed long long __a, __vector signed long long __b) { + return (__vector signed long long)(__a[1], __b[1]); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_mergel(__vector __bool long long __a, __vector __bool long long __b) { + return (__vector __bool long long)(__a[1], __b[1]); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_mergel(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector unsigned long long)(__a[1], __b[1]); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_mergel(__vector float __a, __vector float __b) { + return (__vector float)(__a[2], __b[2], __a[3], __b[3]); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_mergel(__vector double __a, __vector double __b) { + return (__vector double)(__a[1], __b[1]); +} + +/*-- vec_pack ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_pack(__vector signed short __a, __vector signed short __b) { + __vector signed char __ac = (__vector signed char)__a; + __vector signed char __bc = (__vector signed char)__b; + return (__vector signed char)( + __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15], + __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_pack(__vector __bool short __a, __vector __bool short __b) { + __vector __bool char __ac = (__vector __bool char)__a; + __vector __bool char __bc = (__vector __bool char)__b; + return (__vector __bool char)( + __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15], + __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_pack(__vector unsigned short __a, __vector unsigned short __b) { + __vector unsigned char __ac = (__vector unsigned char)__a; + __vector unsigned char __bc = (__vector unsigned char)__b; + return (__vector unsigned char)( + __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15], + __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]); +} + +static inline __ATTRS_o_ai __vector signed short +vec_pack(__vector signed int __a, __vector signed int __b) { + __vector signed short __ac = (__vector signed short)__a; + __vector signed short __bc = (__vector signed short)__b; + return (__vector signed short)( + __ac[1], __ac[3], __ac[5], __ac[7], + __bc[1], __bc[3], __bc[5], __bc[7]); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_pack(__vector __bool int __a, __vector __bool int __b) { + __vector __bool short __ac = (__vector __bool short)__a; + __vector __bool short __bc = (__vector __bool short)__b; + return (__vector __bool short)( + __ac[1], __ac[3], __ac[5], __ac[7], + __bc[1], __bc[3], __bc[5], __bc[7]); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_pack(__vector unsigned int __a, __vector unsigned int __b) { + __vector unsigned short __ac = (__vector unsigned short)__a; + __vector unsigned short __bc = (__vector unsigned short)__b; + return (__vector unsigned short)( + __ac[1], __ac[3], __ac[5], __ac[7], + __bc[1], __bc[3], __bc[5], __bc[7]); +} + +static inline __ATTRS_o_ai __vector signed int +vec_pack(__vector signed long long __a, __vector signed long long __b) { + __vector signed int __ac = (__vector signed int)__a; + __vector signed int __bc = (__vector signed int)__b; + return (__vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_pack(__vector __bool long long __a, __vector __bool long long __b) { + __vector __bool int __ac = (__vector __bool int)__a; + __vector __bool int __bc = (__vector __bool int)__b; + return (__vector __bool int)(__ac[1], __ac[3], __bc[1], __bc[3]); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_pack(__vector unsigned long long __a, __vector unsigned long long __b) { + __vector unsigned int __ac = (__vector unsigned int)__a; + __vector unsigned int __bc = (__vector unsigned int)__b; + return (__vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]); +} + +/*-- vec_packs --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_packs(__vector signed short __a, __vector signed short __b) { + return __builtin_s390_vpksh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_packs(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vpklsh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_packs(__vector signed int __a, __vector signed int __b) { + return __builtin_s390_vpksf(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_packs(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vpklsf(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_packs(__vector signed long long __a, __vector signed long long __b) { + return __builtin_s390_vpksg(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_packs(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_vpklsg(__a, __b); +} + +/*-- vec_packs_cc -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_packs_cc(__vector signed short __a, __vector signed short __b, int *__cc) { + return __builtin_s390_vpkshs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_packs_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return __builtin_s390_vpklshs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_packs_cc(__vector signed int __a, __vector signed int __b, int *__cc) { + return __builtin_s390_vpksfs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_packs_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) { + return __builtin_s390_vpklsfs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_packs_cc(__vector signed long long __a, __vector signed long long __b, + int *__cc) { + return __builtin_s390_vpksgs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_packs_cc(__vector unsigned long long __a, __vector unsigned long long __b, + int *__cc) { + return __builtin_s390_vpklsgs(__a, __b, __cc); +} + +/*-- vec_packsu -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_packsu(__vector signed short __a, __vector signed short __b) { + const __vector signed short __zero = (__vector signed short)0; + return __builtin_s390_vpklsh( + (__vector unsigned short)(__a >= __zero) & (__vector unsigned short)__a, + (__vector unsigned short)(__b >= __zero) & (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_packsu(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vpklsh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_packsu(__vector signed int __a, __vector signed int __b) { + const __vector signed int __zero = (__vector signed int)0; + return __builtin_s390_vpklsf( + (__vector unsigned int)(__a >= __zero) & (__vector unsigned int)__a, + (__vector unsigned int)(__b >= __zero) & (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_packsu(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vpklsf(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_packsu(__vector signed long long __a, __vector signed long long __b) { + const __vector signed long long __zero = (__vector signed long long)0; + return __builtin_s390_vpklsg( + (__vector unsigned long long)(__a >= __zero) & + (__vector unsigned long long)__a, + (__vector unsigned long long)(__b >= __zero) & + (__vector unsigned long long)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_packsu(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_vpklsg(__a, __b); +} + +/*-- vec_packsu_cc ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_packsu_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return __builtin_s390_vpklshs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_packsu_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) { + return __builtin_s390_vpklsfs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_packsu_cc(__vector unsigned long long __a, __vector unsigned long long __b, + int *__cc) { + return __builtin_s390_vpklsgs(__a, __b, __cc); +} + +/*-- vec_unpackh ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_unpackh(__vector signed char __a) { + return __builtin_s390_vuphb(__a); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_unpackh(__vector __bool char __a) { + return ((__vector __bool short) + __builtin_s390_vuphb((__vector signed char)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_unpackh(__vector unsigned char __a) { + return __builtin_s390_vuplhb(__a); +} + +static inline __ATTRS_o_ai __vector signed int +vec_unpackh(__vector signed short __a) { + return __builtin_s390_vuphh(__a); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_unpackh(__vector __bool short __a) { + return (__vector __bool int)__builtin_s390_vuphh((__vector signed short)__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_unpackh(__vector unsigned short __a) { + return __builtin_s390_vuplhh(__a); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_unpackh(__vector signed int __a) { + return __builtin_s390_vuphf(__a); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_unpackh(__vector __bool int __a) { + return ((__vector __bool long long) + __builtin_s390_vuphf((__vector signed int)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_unpackh(__vector unsigned int __a) { + return __builtin_s390_vuplhf(__a); +} + +/*-- vec_unpackl ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_unpackl(__vector signed char __a) { + return __builtin_s390_vuplb(__a); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_unpackl(__vector __bool char __a) { + return ((__vector __bool short) + __builtin_s390_vuplb((__vector signed char)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_unpackl(__vector unsigned char __a) { + return __builtin_s390_vupllb(__a); +} + +static inline __ATTRS_o_ai __vector signed int +vec_unpackl(__vector signed short __a) { + return __builtin_s390_vuplhw(__a); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_unpackl(__vector __bool short __a) { + return ((__vector __bool int) + __builtin_s390_vuplhw((__vector signed short)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_unpackl(__vector unsigned short __a) { + return __builtin_s390_vupllh(__a); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_unpackl(__vector signed int __a) { + return __builtin_s390_vuplf(__a); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_unpackl(__vector __bool int __a) { + return ((__vector __bool long long) + __builtin_s390_vuplf((__vector signed int)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_unpackl(__vector unsigned int __a) { + return __builtin_s390_vupllf(__a); +} + +/*-- vec_cmpeq --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpeq(__vector __bool char __a, __vector __bool char __b) { + return (__vector __bool char)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpeq(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpeq(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpeq(__vector __bool short __a, __vector __bool short __b) { + return (__vector __bool short)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpeq(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpeq(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpeq(__vector __bool int __a, __vector __bool int __b) { + return (__vector __bool int)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpeq(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpeq(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpeq(__vector __bool long long __a, __vector __bool long long __b) { + return (__vector __bool long long)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpeq(__vector signed long long __a, __vector signed long long __b) { + return (__vector __bool long long)(__a == __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpeq(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector __bool long long)(__a == __b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool int +vec_cmpeq(__vector float __a, __vector float __b) { + return (__vector __bool int)(__a == __b); +} +#endif + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpeq(__vector double __a, __vector double __b) { + return (__vector __bool long long)(__a == __b); +} + +/*-- vec_cmpge --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpge(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpge(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpge(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpge(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpge(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpge(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpge(__vector signed long long __a, __vector signed long long __b) { + return (__vector __bool long long)(__a >= __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpge(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector __bool long long)(__a >= __b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool int +vec_cmpge(__vector float __a, __vector float __b) { + return (__vector __bool int)(__a >= __b); +} +#endif + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpge(__vector double __a, __vector double __b) { + return (__vector __bool long long)(__a >= __b); +} + +/*-- vec_cmpgt --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpgt(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpgt(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpgt(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpgt(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpgt(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpgt(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpgt(__vector signed long long __a, __vector signed long long __b) { + return (__vector __bool long long)(__a > __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpgt(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector __bool long long)(__a > __b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool int +vec_cmpgt(__vector float __a, __vector float __b) { + return (__vector __bool int)(__a > __b); +} +#endif + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmpgt(__vector double __a, __vector double __b) { + return (__vector __bool long long)(__a > __b); +} + +/*-- vec_cmple --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmple(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cmple(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmple(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmple(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmple(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmple(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmple(__vector signed long long __a, __vector signed long long __b) { + return (__vector __bool long long)(__a <= __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmple(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector __bool long long)(__a <= __b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool int +vec_cmple(__vector float __a, __vector float __b) { + return (__vector __bool int)(__a <= __b); +} +#endif + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmple(__vector double __a, __vector double __b) { + return (__vector __bool long long)(__a <= __b); +} + +/*-- vec_cmplt --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmplt(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cmplt(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmplt(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmplt(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmplt(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmplt(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmplt(__vector signed long long __a, __vector signed long long __b) { + return (__vector __bool long long)(__a < __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmplt(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector __bool long long)(__a < __b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool int +vec_cmplt(__vector float __a, __vector float __b) { + return (__vector __bool int)(__a < __b); +} +#endif + +static inline __ATTRS_o_ai __vector __bool long long +vec_cmplt(__vector double __a, __vector double __b) { + return (__vector __bool long long)(__a < __b); +} + +/*-- vec_all_eq -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 0; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_eq(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfcesbs(__a, __b, &__cc); + return __cc == 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_eq(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc == 0; +} + +/*-- vec_all_ne -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc == 3; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_ne(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfcesbs(__a, __b, &__cc); + return __cc == 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_ne(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc == 3; +} + +/*-- vec_all_ge -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, + (__vector unsigned char)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, + (__vector unsigned short)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, + (__vector unsigned int)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_ge(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, + (__vector unsigned long long)__a, &__cc); + return __cc == 3; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_ge(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__a, __b, &__cc); + return __cc == 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_ge(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc == 0; +} + +/*-- vec_all_gt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, + (__vector unsigned char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, + (__vector unsigned short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, + (__vector unsigned int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_gt(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, + (__vector unsigned long long)__b, &__cc); + return __cc == 0; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_gt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__a, __b, &__cc); + return __cc == 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_gt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc == 0; +} + +/*-- vec_all_le -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_le(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, + (__vector unsigned char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, + (__vector unsigned short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, + (__vector unsigned int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc); + return __cc == 3; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_le(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, + (__vector unsigned long long)__b, &__cc); + return __cc == 3; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_le(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__b, __a, &__cc); + return __cc == 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_le(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc == 0; +} + +/*-- vec_all_lt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, + (__vector unsigned char)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, + (__vector unsigned short)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, + (__vector unsigned int)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc); + return __cc == 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_all_lt(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, + (__vector unsigned long long)__a, &__cc); + return __cc == 0; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_lt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__b, __a, &__cc); + return __cc == 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_lt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc == 0; +} + +/*-- vec_all_nge ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_nge(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__a, __b, &__cc); + return __cc == 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_nge(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc == 3; +} + +/*-- vec_all_ngt ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_ngt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__a, __b, &__cc); + return __cc == 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_ngt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc == 3; +} + +/*-- vec_all_nle ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_nle(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__b, __a, &__cc); + return __cc == 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_nle(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc == 3; +} + +/*-- vec_all_nlt ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_nlt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__b, __a, &__cc); + return __cc == 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_nlt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc == 3; +} + +/*-- vec_all_nan ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_nan(__vector float __a) { + int __cc; + __builtin_s390_vftcisb(__a, 15, &__cc); + return __cc == 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_nan(__vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc == 0; +} + +/*-- vec_all_numeric --------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_all_numeric(__vector float __a) { + int __cc; + __builtin_s390_vftcisb(__a, 15, &__cc); + return __cc == 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_all_numeric(__vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc == 3; +} + +/*-- vec_any_eq -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc <= 1; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_eq(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfcesbs(__a, __b, &__cc); + return __cc <= 1; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_eq(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc <= 1; +} + +/*-- vec_any_ne -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vceqbs((__vector signed char)__a, + (__vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vceqhs((__vector signed short)__a, + (__vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vceqfs((__vector signed int)__a, + (__vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vceqgs((__vector signed long long)__a, + (__vector signed long long)__b, &__cc); + return __cc != 0; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_ne(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfcesbs(__a, __b, &__cc); + return __cc != 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_ne(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc != 0; +} + +/*-- vec_any_ge -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, + (__vector unsigned char)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, + (__vector unsigned short)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, + (__vector unsigned int)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_ge(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, + (__vector unsigned long long)__a, &__cc); + return __cc != 0; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_ge(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__a, __b, &__cc); + return __cc <= 1; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_ge(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc <= 1; +} + +/*-- vec_any_gt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, + (__vector unsigned char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, + (__vector unsigned short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, + (__vector unsigned int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_gt(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, + (__vector unsigned long long)__b, &__cc); + return __cc <= 1; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_gt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__a, __b, &__cc); + return __cc <= 1; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_gt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc <= 1; +} + +/*-- vec_any_le -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_le(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__a, + (__vector unsigned char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__a, + (__vector unsigned short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__a, + (__vector unsigned int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc); + return __cc != 0; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_le(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__a, + (__vector unsigned long long)__b, &__cc); + return __cc != 0; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_le(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__b, __a, &__cc); + return __cc <= 1; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_le(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc <= 1; +} + +/*-- vec_any_lt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool char __a, __vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool char __a, __vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool char __a, __vector __bool char __b) { + int __cc; + __builtin_s390_vchlbs((__vector unsigned char)__b, + (__vector unsigned char)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool short __a, __vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool short __a, __vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool short __a, __vector __bool short __b) { + int __cc; + __builtin_s390_vchlhs((__vector unsigned short)__b, + (__vector unsigned short)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool int __a, __vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool int __a, __vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool int __a, __vector __bool int __b) { + int __cc; + __builtin_s390_vchlfs((__vector unsigned int)__b, + (__vector unsigned int)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector signed long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool long long __a, __vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector unsigned long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool long long __a, __vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc); + return __cc <= 1; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai int +vec_any_lt(__vector __bool long long __a, __vector __bool long long __b) { + int __cc; + __builtin_s390_vchlgs((__vector unsigned long long)__b, + (__vector unsigned long long)__a, &__cc); + return __cc <= 1; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_lt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__b, __a, &__cc); + return __cc <= 1; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_lt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc <= 1; +} + +/*-- vec_any_nge ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_nge(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__a, __b, &__cc); + return __cc != 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_nge(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc != 0; +} + +/*-- vec_any_ngt ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_ngt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__a, __b, &__cc); + return __cc != 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_ngt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc != 0; +} + +/*-- vec_any_nle ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_nle(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchesbs(__b, __a, &__cc); + return __cc != 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_nle(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc != 0; +} + +/*-- vec_any_nlt ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_nlt(__vector float __a, __vector float __b) { + int __cc; + __builtin_s390_vfchsbs(__b, __a, &__cc); + return __cc != 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_nlt(__vector double __a, __vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc != 0; +} + +/*-- vec_any_nan ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_nan(__vector float __a) { + int __cc; + __builtin_s390_vftcisb(__a, 15, &__cc); + return __cc != 3; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_nan(__vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc != 3; +} + +/*-- vec_any_numeric --------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_any_numeric(__vector float __a) { + int __cc; + __builtin_s390_vftcisb(__a, 15, &__cc); + return __cc != 0; +} +#endif + +static inline __ATTRS_o_ai int +vec_any_numeric(__vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc != 0; +} + +/*-- vec_andc ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_andc(__vector __bool char __a, __vector __bool char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector signed char +vec_andc(__vector signed char __a, __vector signed char __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_andc(__vector __bool char __a, __vector signed char __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_andc(__vector signed char __a, __vector __bool char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_andc(__vector unsigned char __a, __vector unsigned char __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_andc(__vector __bool char __a, __vector unsigned char __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_andc(__vector unsigned char __a, __vector __bool char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector __bool short +vec_andc(__vector __bool short __a, __vector __bool short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector signed short +vec_andc(__vector signed short __a, __vector signed short __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_andc(__vector __bool short __a, __vector signed short __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_andc(__vector signed short __a, __vector __bool short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_andc(__vector unsigned short __a, __vector unsigned short __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_andc(__vector __bool short __a, __vector unsigned short __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_andc(__vector unsigned short __a, __vector __bool short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector __bool int +vec_andc(__vector __bool int __a, __vector __bool int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector signed int +vec_andc(__vector signed int __a, __vector signed int __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_andc(__vector __bool int __a, __vector signed int __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_andc(__vector signed int __a, __vector __bool int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_andc(__vector unsigned int __a, __vector unsigned int __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_andc(__vector __bool int __a, __vector unsigned int __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_andc(__vector unsigned int __a, __vector __bool int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_andc(__vector __bool long long __a, __vector __bool long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_andc(__vector signed long long __a, __vector signed long long __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_andc(__vector __bool long long __a, __vector signed long long __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_andc(__vector signed long long __a, __vector __bool long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_andc(__vector unsigned long long __a, __vector unsigned long long __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_andc(__vector __bool long long __a, __vector unsigned long long __b) { + return __a & ~__b; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_andc(__vector unsigned long long __a, __vector __bool long long __b) { + return __a & ~__b; +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_andc(__vector float __a, __vector float __b) { + return (__vector float)((__vector unsigned int)__a & + ~(__vector unsigned int)__b); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_andc(__vector double __a, __vector double __b) { + return (__vector double)((__vector unsigned long long)__a & + ~(__vector unsigned long long)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_andc(__vector __bool long long __a, __vector double __b) { + return (__vector double)((__vector unsigned long long)__a & + ~(__vector unsigned long long)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_andc(__vector double __a, __vector __bool long long __b) { + return (__vector double)((__vector unsigned long long)__a & + ~(__vector unsigned long long)__b); +} + +/*-- vec_nor ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_nor(__vector __bool char __a, __vector __bool char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector signed char +vec_nor(__vector signed char __a, __vector signed char __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_nor(__vector __bool char __a, __vector signed char __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_nor(__vector signed char __a, __vector __bool char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_nor(__vector unsigned char __a, __vector unsigned char __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_nor(__vector __bool char __a, __vector unsigned char __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_nor(__vector unsigned char __a, __vector __bool char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_nor(__vector __bool short __a, __vector __bool short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_nor(__vector signed short __a, __vector signed short __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_nor(__vector __bool short __a, __vector signed short __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_nor(__vector signed short __a, __vector __bool short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_nor(__vector unsigned short __a, __vector unsigned short __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_nor(__vector __bool short __a, __vector unsigned short __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_nor(__vector unsigned short __a, __vector __bool short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_nor(__vector __bool int __a, __vector __bool int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_nor(__vector signed int __a, __vector signed int __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_nor(__vector __bool int __a, __vector signed int __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_nor(__vector signed int __a, __vector __bool int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_nor(__vector unsigned int __a, __vector unsigned int __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_nor(__vector __bool int __a, __vector unsigned int __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_nor(__vector unsigned int __a, __vector __bool int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_nor(__vector __bool long long __a, __vector __bool long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_nor(__vector signed long long __a, __vector signed long long __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_nor(__vector __bool long long __a, __vector signed long long __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_nor(__vector signed long long __a, __vector __bool long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_nor(__vector unsigned long long __a, __vector unsigned long long __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_nor(__vector __bool long long __a, __vector unsigned long long __b) { + return ~(__a | __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_nor(__vector unsigned long long __a, __vector __bool long long __b) { + return ~(__a | __b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_nor(__vector float __a, __vector float __b) { + return (__vector float)~((__vector unsigned int)__a | + (__vector unsigned int)__b); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_nor(__vector double __a, __vector double __b) { + return (__vector double)~((__vector unsigned long long)__a | + (__vector unsigned long long)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_nor(__vector __bool long long __a, __vector double __b) { + return (__vector double)~((__vector unsigned long long)__a | + (__vector unsigned long long)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_nor(__vector double __a, __vector __bool long long __b) { + return (__vector double)~((__vector unsigned long long)__a | + (__vector unsigned long long)__b); +} + +/*-- vec_orc ----------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool char +vec_orc(__vector __bool char __a, __vector __bool char __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector signed char +vec_orc(__vector signed char __a, __vector signed char __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_orc(__vector unsigned char __a, __vector unsigned char __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector __bool short +vec_orc(__vector __bool short __a, __vector __bool short __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector signed short +vec_orc(__vector signed short __a, __vector signed short __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_orc(__vector unsigned short __a, __vector unsigned short __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector __bool int +vec_orc(__vector __bool int __a, __vector __bool int __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector signed int +vec_orc(__vector signed int __a, __vector signed int __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_orc(__vector unsigned int __a, __vector unsigned int __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_orc(__vector __bool long long __a, __vector __bool long long __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector signed long long +vec_orc(__vector signed long long __a, __vector signed long long __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_orc(__vector unsigned long long __a, __vector unsigned long long __b) { + return __a | ~__b; +} + +static inline __ATTRS_o_ai __vector float +vec_orc(__vector float __a, __vector float __b) { + return (__vector float)((__vector unsigned int)__a | + ~(__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector double +vec_orc(__vector double __a, __vector double __b) { + return (__vector double)((__vector unsigned long long)__a | + ~(__vector unsigned long long)__b); +} +#endif + +/*-- vec_nand ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool char +vec_nand(__vector __bool char __a, __vector __bool char __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector signed char +vec_nand(__vector signed char __a, __vector signed char __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_nand(__vector unsigned char __a, __vector unsigned char __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_nand(__vector __bool short __a, __vector __bool short __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_nand(__vector signed short __a, __vector signed short __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_nand(__vector unsigned short __a, __vector unsigned short __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_nand(__vector __bool int __a, __vector __bool int __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_nand(__vector signed int __a, __vector signed int __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_nand(__vector unsigned int __a, __vector unsigned int __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_nand(__vector __bool long long __a, __vector __bool long long __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_nand(__vector signed long long __a, __vector signed long long __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_nand(__vector unsigned long long __a, __vector unsigned long long __b) { + return ~(__a & __b); +} + +static inline __ATTRS_o_ai __vector float +vec_nand(__vector float __a, __vector float __b) { + return (__vector float)~((__vector unsigned int)__a & + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector double +vec_nand(__vector double __a, __vector double __b) { + return (__vector double)~((__vector unsigned long long)__a & + (__vector unsigned long long)__b); +} +#endif + +/*-- vec_eqv ----------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector __bool char +vec_eqv(__vector __bool char __a, __vector __bool char __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector signed char +vec_eqv(__vector signed char __a, __vector signed char __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_eqv(__vector unsigned char __a, __vector unsigned char __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_eqv(__vector __bool short __a, __vector __bool short __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_eqv(__vector signed short __a, __vector signed short __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_eqv(__vector unsigned short __a, __vector unsigned short __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_eqv(__vector __bool int __a, __vector __bool int __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_eqv(__vector signed int __a, __vector signed int __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_eqv(__vector unsigned int __a, __vector unsigned int __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector __bool long long +vec_eqv(__vector __bool long long __a, __vector __bool long long __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_eqv(__vector signed long long __a, __vector signed long long __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_eqv(__vector unsigned long long __a, __vector unsigned long long __b) { + return ~(__a ^ __b); +} + +static inline __ATTRS_o_ai __vector float +vec_eqv(__vector float __a, __vector float __b) { + return (__vector float)~((__vector unsigned int)__a ^ + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector double +vec_eqv(__vector double __a, __vector double __b) { + return (__vector double)~((__vector unsigned long long)__a ^ + (__vector unsigned long long)__b); +} +#endif + +/*-- vec_cntlz --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cntlz(__vector signed char __a) { + return __builtin_s390_vclzb((__vector unsigned char)__a); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cntlz(__vector unsigned char __a) { + return __builtin_s390_vclzb(__a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cntlz(__vector signed short __a) { + return __builtin_s390_vclzh((__vector unsigned short)__a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cntlz(__vector unsigned short __a) { + return __builtin_s390_vclzh(__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cntlz(__vector signed int __a) { + return __builtin_s390_vclzf((__vector unsigned int)__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cntlz(__vector unsigned int __a) { + return __builtin_s390_vclzf(__a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_cntlz(__vector signed long long __a) { + return __builtin_s390_vclzg((__vector unsigned long long)__a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_cntlz(__vector unsigned long long __a) { + return __builtin_s390_vclzg(__a); +} + +/*-- vec_cnttz --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cnttz(__vector signed char __a) { + return __builtin_s390_vctzb((__vector unsigned char)__a); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cnttz(__vector unsigned char __a) { + return __builtin_s390_vctzb(__a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cnttz(__vector signed short __a) { + return __builtin_s390_vctzh((__vector unsigned short)__a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cnttz(__vector unsigned short __a) { + return __builtin_s390_vctzh(__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cnttz(__vector signed int __a) { + return __builtin_s390_vctzf((__vector unsigned int)__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cnttz(__vector unsigned int __a) { + return __builtin_s390_vctzf(__a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_cnttz(__vector signed long long __a) { + return __builtin_s390_vctzg((__vector unsigned long long)__a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_cnttz(__vector unsigned long long __a) { + return __builtin_s390_vctzg(__a); +} + +/*-- vec_popcnt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_popcnt(__vector signed char __a) { + return __builtin_s390_vpopctb((__vector unsigned char)__a); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_popcnt(__vector unsigned char __a) { + return __builtin_s390_vpopctb(__a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_popcnt(__vector signed short __a) { + return __builtin_s390_vpopcth((__vector unsigned short)__a); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_popcnt(__vector unsigned short __a) { + return __builtin_s390_vpopcth(__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_popcnt(__vector signed int __a) { + return __builtin_s390_vpopctf((__vector unsigned int)__a); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_popcnt(__vector unsigned int __a) { + return __builtin_s390_vpopctf(__a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_popcnt(__vector signed long long __a) { + return __builtin_s390_vpopctg((__vector unsigned long long)__a); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_popcnt(__vector unsigned long long __a) { + return __builtin_s390_vpopctg(__a); +} + +/*-- vec_rl -----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_rl(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_verllvb( + (__vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_rl(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_verllvb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_rl(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_verllvh( + (__vector unsigned short)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_rl(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_verllvh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_rl(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_verllvf( + (__vector unsigned int)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_rl(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_verllvf(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_rl(__vector signed long long __a, __vector unsigned long long __b) { + return (__vector signed long long)__builtin_s390_verllvg( + (__vector unsigned long long)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_rl(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_verllvg(__a, __b); +} + +/*-- vec_rli ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_rli(__vector signed char __a, unsigned long __b) { + return (__vector signed char)__builtin_s390_verllb( + (__vector unsigned char)__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_rli(__vector unsigned char __a, unsigned long __b) { + return __builtin_s390_verllb(__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_rli(__vector signed short __a, unsigned long __b) { + return (__vector signed short)__builtin_s390_verllh( + (__vector unsigned short)__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_rli(__vector unsigned short __a, unsigned long __b) { + return __builtin_s390_verllh(__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_rli(__vector signed int __a, unsigned long __b) { + return (__vector signed int)__builtin_s390_verllf( + (__vector unsigned int)__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_rli(__vector unsigned int __a, unsigned long __b) { + return __builtin_s390_verllf(__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_rli(__vector signed long long __a, unsigned long __b) { + return (__vector signed long long)__builtin_s390_verllg( + (__vector unsigned long long)__a, (int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_rli(__vector unsigned long long __a, unsigned long __b) { + return __builtin_s390_verllg(__a, (int)__b); +} + +/*-- vec_rl_mask ------------------------------------------------------------*/ + +extern __ATTRS_o __vector signed char +vec_rl_mask(__vector signed char __a, __vector unsigned char __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector unsigned char +vec_rl_mask(__vector unsigned char __a, __vector unsigned char __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector signed short +vec_rl_mask(__vector signed short __a, __vector unsigned short __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector unsigned short +vec_rl_mask(__vector unsigned short __a, __vector unsigned short __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector signed int +vec_rl_mask(__vector signed int __a, __vector unsigned int __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector unsigned int +vec_rl_mask(__vector unsigned int __a, __vector unsigned int __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector signed long long +vec_rl_mask(__vector signed long long __a, __vector unsigned long long __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o __vector unsigned long long +vec_rl_mask(__vector unsigned long long __a, __vector unsigned long long __b, + unsigned char __c) __constant(__c); + +#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \ + __extension__ ({ \ + __vector unsigned char __res; \ + __vector unsigned char __x = (__vector unsigned char)(X); \ + __vector unsigned char __y = (__vector unsigned char)(Y); \ + switch (sizeof ((X)[0])) { \ + case 1: __res = (__vector unsigned char) __builtin_s390_verimb( \ + (__vector unsigned char)__x, (__vector unsigned char)__x, \ + (__vector unsigned char)__y, (Z)); break; \ + case 2: __res = (__vector unsigned char) __builtin_s390_verimh( \ + (__vector unsigned short)__x, (__vector unsigned short)__x, \ + (__vector unsigned short)__y, (Z)); break; \ + case 4: __res = (__vector unsigned char) __builtin_s390_verimf( \ + (__vector unsigned int)__x, (__vector unsigned int)__x, \ + (__vector unsigned int)__y, (Z)); break; \ + default: __res = (__vector unsigned char) __builtin_s390_verimg( \ + (__vector unsigned long long)__x, (__vector unsigned long long)__x, \ + (__vector unsigned long long)__y, (Z)); break; \ + } __res; })) + +/*-- vec_sll ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_sll(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_sll(__vector signed char __a, __vector unsigned short __b) { + return (__vector signed char)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_sll(__vector signed char __a, __vector unsigned int __b) { + return (__vector signed char)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_sll(__vector __bool char __a, __vector unsigned char __b) { + return (__vector __bool char)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_sll(__vector __bool char __a, __vector unsigned short __b) { + return (__vector __bool char)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_sll(__vector __bool char __a, __vector unsigned int __b) { + return (__vector __bool char)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_sll(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsl(__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_sll(__vector unsigned char __a, __vector unsigned short __b) { + return __builtin_s390_vsl(__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_sll(__vector unsigned char __a, __vector unsigned int __b) { + return __builtin_s390_vsl(__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_sll(__vector signed short __a, __vector unsigned char __b) { + return (__vector signed short)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_sll(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_sll(__vector signed short __a, __vector unsigned int __b) { + return (__vector signed short)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_sll(__vector __bool short __a, __vector unsigned char __b) { + return (__vector __bool short)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_sll(__vector __bool short __a, __vector unsigned short __b) { + return (__vector __bool short)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_sll(__vector __bool short __a, __vector unsigned int __b) { + return (__vector __bool short)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_sll(__vector unsigned short __a, __vector unsigned char __b) { + return (__vector unsigned short)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_sll(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_sll(__vector unsigned short __a, __vector unsigned int __b) { + return (__vector unsigned short)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_sll(__vector signed int __a, __vector unsigned char __b) { + return (__vector signed int)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_sll(__vector signed int __a, __vector unsigned short __b) { + return (__vector signed int)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_sll(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_sll(__vector __bool int __a, __vector unsigned char __b) { + return (__vector __bool int)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_sll(__vector __bool int __a, __vector unsigned short __b) { + return (__vector __bool int)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_sll(__vector __bool int __a, __vector unsigned int __b) { + return (__vector __bool int)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_sll(__vector unsigned int __a, __vector unsigned char __b) { + return (__vector unsigned int)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_sll(__vector unsigned int __a, __vector unsigned short __b) { + return (__vector unsigned int)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_sll(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_sll(__vector signed long long __a, __vector unsigned char __b) { + return (__vector signed long long)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_sll(__vector signed long long __a, __vector unsigned short __b) { + return (__vector signed long long)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_sll(__vector signed long long __a, __vector unsigned int __b) { + return (__vector signed long long)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_sll(__vector __bool long long __a, __vector unsigned char __b) { + return (__vector __bool long long)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_sll(__vector __bool long long __a, __vector unsigned short __b) { + return (__vector __bool long long)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_sll(__vector __bool long long __a, __vector unsigned int __b) { + return (__vector __bool long long)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_sll(__vector unsigned long long __a, __vector unsigned char __b) { + return (__vector unsigned long long)__builtin_s390_vsl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_sll(__vector unsigned long long __a, __vector unsigned short __b) { + return (__vector unsigned long long)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_sll(__vector unsigned long long __a, __vector unsigned int __b) { + return (__vector unsigned long long)__builtin_s390_vsl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +/*-- vec_slb ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_slb(__vector signed char __a, __vector signed char __b) { + return (__vector signed char)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed char +vec_slb(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_vslb( + (__vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_slb(__vector unsigned char __a, __vector signed char __b) { + return __builtin_s390_vslb(__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_slb(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vslb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_slb(__vector signed short __a, __vector signed short __b) { + return (__vector signed short)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_slb(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_slb(__vector unsigned short __a, __vector signed short __b) { + return (__vector unsigned short)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_slb(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_slb(__vector signed int __a, __vector signed int __b) { + return (__vector signed int)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_slb(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_slb(__vector unsigned int __a, __vector signed int __b) { + return (__vector unsigned int)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_slb(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_slb(__vector signed long long __a, __vector signed long long __b) { + return (__vector signed long long)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_slb(__vector signed long long __a, __vector unsigned long long __b) { + return (__vector signed long long)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_slb(__vector unsigned long long __a, __vector signed long long __b) { + return (__vector unsigned long long)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_slb(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector unsigned long long)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_slb(__vector float __a, __vector signed int __b) { + return (__vector float)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector float +vec_slb(__vector float __a, __vector unsigned int __b) { + return (__vector float)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_slb(__vector double __a, __vector signed long long __b) { + return (__vector double)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector double +vec_slb(__vector double __a, __vector unsigned long long __b) { + return (__vector double)__builtin_s390_vslb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +/*-- vec_sld ----------------------------------------------------------------*/ + +extern __ATTRS_o __vector signed char +vec_sld(__vector signed char __a, __vector signed char __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector __bool char +vec_sld(__vector __bool char __a, __vector __bool char __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector unsigned char +vec_sld(__vector unsigned char __a, __vector unsigned char __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector signed short +vec_sld(__vector signed short __a, __vector signed short __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector __bool short +vec_sld(__vector __bool short __a, __vector __bool short __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector unsigned short +vec_sld(__vector unsigned short __a, __vector unsigned short __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector signed int +vec_sld(__vector signed int __a, __vector signed int __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector __bool int +vec_sld(__vector __bool int __a, __vector __bool int __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector unsigned int +vec_sld(__vector unsigned int __a, __vector unsigned int __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector signed long long +vec_sld(__vector signed long long __a, __vector signed long long __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector __bool long long +vec_sld(__vector __bool long long __a, __vector __bool long long __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o __vector unsigned long long +vec_sld(__vector unsigned long long __a, __vector unsigned long long __b, + int __c) + __constant_range(__c, 0, 15); + +#if __ARCH__ >= 12 +extern __ATTRS_o __vector float +vec_sld(__vector float __a, __vector float __b, int __c) + __constant_range(__c, 0, 15); +#endif + +extern __ATTRS_o __vector double +vec_sld(__vector double __a, __vector double __b, int __c) + __constant_range(__c, 0, 15); + +#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \ + __builtin_s390_vsldb((__vector unsigned char)(X), \ + (__vector unsigned char)(Y), (Z))) + +/*-- vec_sldw ---------------------------------------------------------------*/ + +extern __ATTRS_o __vector signed char +vec_sldw(__vector signed char __a, __vector signed char __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector unsigned char +vec_sldw(__vector unsigned char __a, __vector unsigned char __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector signed short +vec_sldw(__vector signed short __a, __vector signed short __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector unsigned short +vec_sldw(__vector unsigned short __a, __vector unsigned short __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector signed int +vec_sldw(__vector signed int __a, __vector signed int __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector unsigned int +vec_sldw(__vector unsigned int __a, __vector unsigned int __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector signed long long +vec_sldw(__vector signed long long __a, __vector signed long long __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o __vector unsigned long long +vec_sldw(__vector unsigned long long __a, __vector unsigned long long __b, + int __c) + __constant_range(__c, 0, 3); + +// This prototype is deprecated. +extern __ATTRS_o __vector double +vec_sldw(__vector double __a, __vector double __b, int __c) + __constant_range(__c, 0, 3); + +#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \ + __builtin_s390_vsldb((__vector unsigned char)(X), \ + (__vector unsigned char)(Y), (Z) * 4)) + +/*-- vec_sldb ---------------------------------------------------------------*/ + +#if __ARCH__ >= 13 + +extern __ATTRS_o __vector signed char +vec_sldb(__vector signed char __a, __vector signed char __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned char +vec_sldb(__vector unsigned char __a, __vector unsigned char __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector signed short +vec_sldb(__vector signed short __a, __vector signed short __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned short +vec_sldb(__vector unsigned short __a, __vector unsigned short __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector signed int +vec_sldb(__vector signed int __a, __vector signed int __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned int +vec_sldb(__vector unsigned int __a, __vector unsigned int __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector signed long long +vec_sldb(__vector signed long long __a, __vector signed long long __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned long long +vec_sldb(__vector unsigned long long __a, __vector unsigned long long __b, + int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector float +vec_sldb(__vector float __a, __vector float __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector double +vec_sldb(__vector double __a, __vector double __b, int __c) + __constant_range(__c, 0, 7); + +#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \ + __builtin_s390_vsld((__vector unsigned char)(X), \ + (__vector unsigned char)(Y), (Z))) + +#endif + +/*-- vec_sral ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_sral(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_sral(__vector signed char __a, __vector unsigned short __b) { + return (__vector signed char)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_sral(__vector signed char __a, __vector unsigned int __b) { + return (__vector signed char)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_sral(__vector __bool char __a, __vector unsigned char __b) { + return (__vector __bool char)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_sral(__vector __bool char __a, __vector unsigned short __b) { + return (__vector __bool char)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_sral(__vector __bool char __a, __vector unsigned int __b) { + return (__vector __bool char)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_sral(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsra(__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_sral(__vector unsigned char __a, __vector unsigned short __b) { + return __builtin_s390_vsra(__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_sral(__vector unsigned char __a, __vector unsigned int __b) { + return __builtin_s390_vsra(__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_sral(__vector signed short __a, __vector unsigned char __b) { + return (__vector signed short)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_sral(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_sral(__vector signed short __a, __vector unsigned int __b) { + return (__vector signed short)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_sral(__vector __bool short __a, __vector unsigned char __b) { + return (__vector __bool short)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_sral(__vector __bool short __a, __vector unsigned short __b) { + return (__vector __bool short)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_sral(__vector __bool short __a, __vector unsigned int __b) { + return (__vector __bool short)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_sral(__vector unsigned short __a, __vector unsigned char __b) { + return (__vector unsigned short)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_sral(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_sral(__vector unsigned short __a, __vector unsigned int __b) { + return (__vector unsigned short)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_sral(__vector signed int __a, __vector unsigned char __b) { + return (__vector signed int)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_sral(__vector signed int __a, __vector unsigned short __b) { + return (__vector signed int)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_sral(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_sral(__vector __bool int __a, __vector unsigned char __b) { + return (__vector __bool int)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_sral(__vector __bool int __a, __vector unsigned short __b) { + return (__vector __bool int)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_sral(__vector __bool int __a, __vector unsigned int __b) { + return (__vector __bool int)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_sral(__vector unsigned int __a, __vector unsigned char __b) { + return (__vector unsigned int)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_sral(__vector unsigned int __a, __vector unsigned short __b) { + return (__vector unsigned int)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_sral(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_sral(__vector signed long long __a, __vector unsigned char __b) { + return (__vector signed long long)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_sral(__vector signed long long __a, __vector unsigned short __b) { + return (__vector signed long long)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_sral(__vector signed long long __a, __vector unsigned int __b) { + return (__vector signed long long)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_sral(__vector __bool long long __a, __vector unsigned char __b) { + return (__vector __bool long long)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_sral(__vector __bool long long __a, __vector unsigned short __b) { + return (__vector __bool long long)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_sral(__vector __bool long long __a, __vector unsigned int __b) { + return (__vector __bool long long)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_sral(__vector unsigned long long __a, __vector unsigned char __b) { + return (__vector unsigned long long)__builtin_s390_vsra( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_sral(__vector unsigned long long __a, __vector unsigned short __b) { + return (__vector unsigned long long)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_sral(__vector unsigned long long __a, __vector unsigned int __b) { + return (__vector unsigned long long)__builtin_s390_vsra( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +/*-- vec_srab ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_srab(__vector signed char __a, __vector signed char __b) { + return (__vector signed char)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed char +vec_srab(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_vsrab( + (__vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_srab(__vector unsigned char __a, __vector signed char __b) { + return __builtin_s390_vsrab(__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_srab(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsrab(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_srab(__vector signed short __a, __vector signed short __b) { + return (__vector signed short)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_srab(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_srab(__vector unsigned short __a, __vector signed short __b) { + return (__vector unsigned short)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_srab(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_srab(__vector signed int __a, __vector signed int __b) { + return (__vector signed int)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_srab(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_srab(__vector unsigned int __a, __vector signed int __b) { + return (__vector unsigned int)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_srab(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_srab(__vector signed long long __a, __vector signed long long __b) { + return (__vector signed long long)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_srab(__vector signed long long __a, __vector unsigned long long __b) { + return (__vector signed long long)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_srab(__vector unsigned long long __a, __vector signed long long __b) { + return (__vector unsigned long long)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_srab(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector unsigned long long)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_srab(__vector float __a, __vector signed int __b) { + return (__vector float)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector float +vec_srab(__vector float __a, __vector unsigned int __b) { + return (__vector float)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_srab(__vector double __a, __vector signed long long __b) { + return (__vector double)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector double +vec_srab(__vector double __a, __vector unsigned long long __b) { + return (__vector double)__builtin_s390_vsrab( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +/*-- vec_srl ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_srl(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_srl(__vector signed char __a, __vector unsigned short __b) { + return (__vector signed char)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_srl(__vector signed char __a, __vector unsigned int __b) { + return (__vector signed char)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_srl(__vector __bool char __a, __vector unsigned char __b) { + return (__vector __bool char)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_srl(__vector __bool char __a, __vector unsigned short __b) { + return (__vector __bool char)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool char +vec_srl(__vector __bool char __a, __vector unsigned int __b) { + return (__vector __bool char)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_srl(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsrl(__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_srl(__vector unsigned char __a, __vector unsigned short __b) { + return __builtin_s390_vsrl(__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_srl(__vector unsigned char __a, __vector unsigned int __b) { + return __builtin_s390_vsrl(__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_srl(__vector signed short __a, __vector unsigned char __b) { + return (__vector signed short)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_srl(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_srl(__vector signed short __a, __vector unsigned int __b) { + return (__vector signed short)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_srl(__vector __bool short __a, __vector unsigned char __b) { + return (__vector __bool short)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_srl(__vector __bool short __a, __vector unsigned short __b) { + return (__vector __bool short)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool short +vec_srl(__vector __bool short __a, __vector unsigned int __b) { + return (__vector __bool short)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_srl(__vector unsigned short __a, __vector unsigned char __b) { + return (__vector unsigned short)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_srl(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_srl(__vector unsigned short __a, __vector unsigned int __b) { + return (__vector unsigned short)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_srl(__vector signed int __a, __vector unsigned char __b) { + return (__vector signed int)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_srl(__vector signed int __a, __vector unsigned short __b) { + return (__vector signed int)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_srl(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_srl(__vector __bool int __a, __vector unsigned char __b) { + return (__vector __bool int)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_srl(__vector __bool int __a, __vector unsigned short __b) { + return (__vector __bool int)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool int +vec_srl(__vector __bool int __a, __vector unsigned int __b) { + return (__vector __bool int)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_srl(__vector unsigned int __a, __vector unsigned char __b) { + return (__vector unsigned int)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_srl(__vector unsigned int __a, __vector unsigned short __b) { + return (__vector unsigned int)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_srl(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_srl(__vector signed long long __a, __vector unsigned char __b) { + return (__vector signed long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_srl(__vector signed long long __a, __vector unsigned short __b) { + return (__vector signed long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_srl(__vector signed long long __a, __vector unsigned int __b) { + return (__vector signed long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_srl(__vector __bool long long __a, __vector unsigned char __b) { + return (__vector __bool long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_srl(__vector __bool long long __a, __vector unsigned short __b) { + return (__vector __bool long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector __bool long long +vec_srl(__vector __bool long long __a, __vector unsigned int __b) { + return (__vector __bool long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_srl(__vector unsigned long long __a, __vector unsigned char __b) { + return (__vector unsigned long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, __b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_srl(__vector unsigned long long __a, __vector unsigned short __b) { + return (__vector unsigned long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_srl(__vector unsigned long long __a, __vector unsigned int __b) { + return (__vector unsigned long long)__builtin_s390_vsrl( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +/*-- vec_srb ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_srb(__vector signed char __a, __vector signed char __b) { + return (__vector signed char)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed char +vec_srb(__vector signed char __a, __vector unsigned char __b) { + return (__vector signed char)__builtin_s390_vsrlb( + (__vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_srb(__vector unsigned char __a, __vector signed char __b) { + return __builtin_s390_vsrlb(__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_srb(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsrlb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_srb(__vector signed short __a, __vector signed short __b) { + return (__vector signed short)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_srb(__vector signed short __a, __vector unsigned short __b) { + return (__vector signed short)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_srb(__vector unsigned short __a, __vector signed short __b) { + return (__vector unsigned short)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_srb(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector unsigned short)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_srb(__vector signed int __a, __vector signed int __b) { + return (__vector signed int)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_srb(__vector signed int __a, __vector unsigned int __b) { + return (__vector signed int)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_srb(__vector unsigned int __a, __vector signed int __b) { + return (__vector unsigned int)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_srb(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector unsigned int)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_srb(__vector signed long long __a, __vector signed long long __b) { + return (__vector signed long long)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_srb(__vector signed long long __a, __vector unsigned long long __b) { + return (__vector signed long long)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_srb(__vector unsigned long long __a, __vector signed long long __b) { + return (__vector unsigned long long)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_srb(__vector unsigned long long __a, __vector unsigned long long __b) { + return (__vector unsigned long long)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_srb(__vector float __a, __vector signed int __b) { + return (__vector float)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector float +vec_srb(__vector float __a, __vector unsigned int __b) { + return (__vector float)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_srb(__vector double __a, __vector signed long long __b) { + return (__vector double)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector double +vec_srb(__vector double __a, __vector unsigned long long __b) { + return (__vector double)__builtin_s390_vsrlb( + (__vector unsigned char)__a, (__vector unsigned char)__b); +} + +/*-- vec_srdb ---------------------------------------------------------------*/ + +#if __ARCH__ >= 13 + +extern __ATTRS_o __vector signed char +vec_srdb(__vector signed char __a, __vector signed char __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned char +vec_srdb(__vector unsigned char __a, __vector unsigned char __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector signed short +vec_srdb(__vector signed short __a, __vector signed short __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned short +vec_srdb(__vector unsigned short __a, __vector unsigned short __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector signed int +vec_srdb(__vector signed int __a, __vector signed int __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned int +vec_srdb(__vector unsigned int __a, __vector unsigned int __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector signed long long +vec_srdb(__vector signed long long __a, __vector signed long long __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector unsigned long long +vec_srdb(__vector unsigned long long __a, __vector unsigned long long __b, + int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector float +vec_srdb(__vector float __a, __vector float __b, int __c) + __constant_range(__c, 0, 7); + +extern __ATTRS_o __vector double +vec_srdb(__vector double __a, __vector double __b, int __c) + __constant_range(__c, 0, 7); + +#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \ + __builtin_s390_vsrd((__vector unsigned char)(X), \ + (__vector unsigned char)(Y), (Z))) + +#endif + +/*-- vec_abs ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_abs(__vector signed char __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed char)0)); +} + +static inline __ATTRS_o_ai __vector signed short +vec_abs(__vector signed short __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed short)0)); +} + +static inline __ATTRS_o_ai __vector signed int +vec_abs(__vector signed int __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed int)0)); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_abs(__vector signed long long __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed long long)0)); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_abs(__vector float __a) { + return __builtin_s390_vflpsb(__a); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_abs(__vector double __a) { + return __builtin_s390_vflpdb(__a); +} + +/*-- vec_nabs ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_nabs(__vector float __a) { + return __builtin_s390_vflnsb(__a); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_nabs(__vector double __a) { + return __builtin_s390_vflndb(__a); +} + +/*-- vec_max ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_max(__vector signed char __a, __vector signed char __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_max(__vector signed char __a, __vector __bool char __b) { + __vector signed char __bc = (__vector signed char)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_max(__vector __bool char __a, __vector signed char __b) { + __vector signed char __ac = (__vector signed char)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_max(__vector unsigned char __a, __vector unsigned char __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_max(__vector unsigned char __a, __vector __bool char __b) { + __vector unsigned char __bc = (__vector unsigned char)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_max(__vector __bool char __a, __vector unsigned char __b) { + __vector unsigned char __ac = (__vector unsigned char)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector signed short +vec_max(__vector signed short __a, __vector signed short __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_max(__vector signed short __a, __vector __bool short __b) { + __vector signed short __bc = (__vector signed short)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_max(__vector __bool short __a, __vector signed short __b) { + __vector signed short __ac = (__vector signed short)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_max(__vector unsigned short __a, __vector unsigned short __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_max(__vector unsigned short __a, __vector __bool short __b) { + __vector unsigned short __bc = (__vector unsigned short)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_max(__vector __bool short __a, __vector unsigned short __b) { + __vector unsigned short __ac = (__vector unsigned short)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector signed int +vec_max(__vector signed int __a, __vector signed int __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_max(__vector signed int __a, __vector __bool int __b) { + __vector signed int __bc = (__vector signed int)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_max(__vector __bool int __a, __vector signed int __b) { + __vector signed int __ac = (__vector signed int)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_max(__vector unsigned int __a, __vector unsigned int __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_max(__vector unsigned int __a, __vector __bool int __b) { + __vector unsigned int __bc = (__vector unsigned int)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_max(__vector __bool int __a, __vector unsigned int __b) { + __vector unsigned int __ac = (__vector unsigned int)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_max(__vector signed long long __a, __vector signed long long __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_max(__vector signed long long __a, __vector __bool long long __b) { + __vector signed long long __bc = (__vector signed long long)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_max(__vector __bool long long __a, __vector signed long long __b) { + __vector signed long long __ac = (__vector signed long long)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_max(__vector unsigned long long __a, __vector unsigned long long __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_max(__vector unsigned long long __a, __vector __bool long long __b) { + __vector unsigned long long __bc = (__vector unsigned long long)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_max(__vector __bool long long __a, __vector unsigned long long __b) { + __vector unsigned long long __ac = (__vector unsigned long long)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_max(__vector float __a, __vector float __b) { + return __builtin_s390_vfmaxsb(__a, __b, 0); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_max(__vector double __a, __vector double __b) { +#if __ARCH__ >= 12 + return __builtin_s390_vfmaxdb(__a, __b, 0); +#else + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +#endif +} + +/*-- vec_min ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_min(__vector signed char __a, __vector signed char __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_min(__vector signed char __a, __vector __bool char __b) { + __vector signed char __bc = (__vector signed char)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed char +vec_min(__vector __bool char __a, __vector signed char __b) { + __vector signed char __ac = (__vector signed char)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_min(__vector unsigned char __a, __vector unsigned char __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_min(__vector unsigned char __a, __vector __bool char __b) { + __vector unsigned char __bc = (__vector unsigned char)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned char +vec_min(__vector __bool char __a, __vector unsigned char __b) { + __vector unsigned char __ac = (__vector unsigned char)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector signed short +vec_min(__vector signed short __a, __vector signed short __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_min(__vector signed short __a, __vector __bool short __b) { + __vector signed short __bc = (__vector signed short)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed short +vec_min(__vector __bool short __a, __vector signed short __b) { + __vector signed short __ac = (__vector signed short)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_min(__vector unsigned short __a, __vector unsigned short __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_min(__vector unsigned short __a, __vector __bool short __b) { + __vector unsigned short __bc = (__vector unsigned short)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned short +vec_min(__vector __bool short __a, __vector unsigned short __b) { + __vector unsigned short __ac = (__vector unsigned short)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector signed int +vec_min(__vector signed int __a, __vector signed int __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_min(__vector signed int __a, __vector __bool int __b) { + __vector signed int __bc = (__vector signed int)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed int +vec_min(__vector __bool int __a, __vector signed int __b) { + __vector signed int __ac = (__vector signed int)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_min(__vector unsigned int __a, __vector unsigned int __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_min(__vector unsigned int __a, __vector __bool int __b) { + __vector unsigned int __bc = (__vector unsigned int)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned int +vec_min(__vector __bool int __a, __vector unsigned int __b) { + __vector unsigned int __ac = (__vector unsigned int)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_min(__vector signed long long __a, __vector signed long long __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_min(__vector signed long long __a, __vector __bool long long __b) { + __vector signed long long __bc = (__vector signed long long)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_min(__vector __bool long long __a, __vector signed long long __b) { + __vector signed long long __ac = (__vector signed long long)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_min(__vector unsigned long long __a, __vector unsigned long long __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_min(__vector unsigned long long __a, __vector __bool long long __b) { + __vector unsigned long long __bc = (__vector unsigned long long)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_min(__vector __bool long long __a, __vector unsigned long long __b) { + __vector unsigned long long __ac = (__vector unsigned long long)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_min(__vector float __a, __vector float __b) { + return __builtin_s390_vfminsb(__a, __b, 0); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_min(__vector double __a, __vector double __b) { +#if __ARCH__ >= 12 + return __builtin_s390_vfmindb(__a, __b, 0); +#else + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +#endif +} + +/*-- vec_add_u128 -----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vaq(__a, __b); +} + +/*-- vec_addc ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_addc(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vaccb(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_addc(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vacch(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_addc(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vaccf(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_vaccg(__a, __b); +} + +/*-- vec_addc_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vaccq(__a, __b); +} + +/*-- vec_adde_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vacq(__a, __b, __c); +} + +/*-- vec_addec_u128 ---------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vacccq(__a, __b, __c); +} + +/*-- vec_avg ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_avg(__vector signed char __a, __vector signed char __b) { + return __builtin_s390_vavgb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_avg(__vector signed short __a, __vector signed short __b) { + return __builtin_s390_vavgh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_avg(__vector signed int __a, __vector signed int __b) { + return __builtin_s390_vavgf(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_avg(__vector signed long long __a, __vector signed long long __b) { + return __builtin_s390_vavgg(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_avg(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vavglb(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_avg(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vavglh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_avg(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vavglf(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_avg(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_vavglg(__a, __b); +} + +/*-- vec_checksum -----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned int +vec_checksum(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vcksm(__a, __b); +} + +/*-- vec_gfmsum -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned short +vec_gfmsum(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vgfmb(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_gfmsum(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vgfmh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vgfmf(__a, __b); +} + +/*-- vec_gfmsum_128 ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_gfmsum_128(__vector unsigned long long __a, + __vector unsigned long long __b) { + return __builtin_s390_vgfmg(__a, __b); +} + +/*-- vec_gfmsum_accum -------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned short +vec_gfmsum_accum(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned short __c) { + return __builtin_s390_vgfmab(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_gfmsum_accum(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned int __c) { + return __builtin_s390_vgfmah(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_gfmsum_accum(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned long long __c) { + return __builtin_s390_vgfmaf(__a, __b, __c); +} + +/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_gfmsum_accum_128(__vector unsigned long long __a, + __vector unsigned long long __b, + __vector unsigned char __c) { + return __builtin_s390_vgfmag(__a, __b, __c); +} + +/*-- vec_mladd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_mladd(__vector signed char __a, __vector signed char __b, + __vector signed char __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed char +vec_mladd(__vector unsigned char __a, __vector signed char __b, + __vector signed char __c) { + return (__vector signed char)__a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed char +vec_mladd(__vector signed char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __a * (__vector signed char)__b + (__vector signed char)__c; +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_mladd(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed short +vec_mladd(__vector signed short __a, __vector signed short __b, + __vector signed short __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed short +vec_mladd(__vector unsigned short __a, __vector signed short __b, + __vector signed short __c) { + return (__vector signed short)__a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed short +vec_mladd(__vector signed short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __a * (__vector signed short)__b + (__vector signed short)__c; +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mladd(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed int +vec_mladd(__vector signed int __a, __vector signed int __b, + __vector signed int __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed int +vec_mladd(__vector unsigned int __a, __vector signed int __b, + __vector signed int __c) { + return (__vector signed int)__a * __b + __c; +} + +static inline __ATTRS_o_ai __vector signed int +vec_mladd(__vector signed int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __a * (__vector signed int)__b + (__vector signed int)__c; +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mladd(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __a * __b + __c; +} + +/*-- vec_mhadd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_mhadd(__vector signed char __a, __vector signed char __b, + __vector signed char __c) { + return __builtin_s390_vmahb(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_mhadd(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vmalhb(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector signed short +vec_mhadd(__vector signed short __a, __vector signed short __b, + __vector signed short __c) { + return __builtin_s390_vmahh(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mhadd(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __builtin_s390_vmalhh(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector signed int +vec_mhadd(__vector signed int __a, __vector signed int __b, + __vector signed int __c) { + return __builtin_s390_vmahf(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mhadd(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __builtin_s390_vmalhf(__a, __b, __c); +} + +/*-- vec_meadd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_meadd(__vector signed char __a, __vector signed char __b, + __vector signed short __c) { + return __builtin_s390_vmaeb(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_meadd(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned short __c) { + return __builtin_s390_vmaleb(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector signed int +vec_meadd(__vector signed short __a, __vector signed short __b, + __vector signed int __c) { + return __builtin_s390_vmaeh(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_meadd(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned int __c) { + return __builtin_s390_vmaleh(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_meadd(__vector signed int __a, __vector signed int __b, + __vector signed long long __c) { + return __builtin_s390_vmaef(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_meadd(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned long long __c) { + return __builtin_s390_vmalef(__a, __b, __c); +} + +/*-- vec_moadd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_moadd(__vector signed char __a, __vector signed char __b, + __vector signed short __c) { + return __builtin_s390_vmaob(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_moadd(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned short __c) { + return __builtin_s390_vmalob(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector signed int +vec_moadd(__vector signed short __a, __vector signed short __b, + __vector signed int __c) { + return __builtin_s390_vmaoh(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_moadd(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned int __c) { + return __builtin_s390_vmaloh(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_moadd(__vector signed int __a, __vector signed int __b, + __vector signed long long __c) { + return __builtin_s390_vmaof(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_moadd(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned long long __c) { + return __builtin_s390_vmalof(__a, __b, __c); +} + +/*-- vec_mulh ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_mulh(__vector signed char __a, __vector signed char __b) { + return __builtin_s390_vmhb(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_mulh(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vmlhb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_mulh(__vector signed short __a, __vector signed short __b) { + return __builtin_s390_vmhh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mulh(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vmlhh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_mulh(__vector signed int __a, __vector signed int __b) { + return __builtin_s390_vmhf(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mulh(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vmlhf(__a, __b); +} + +/*-- vec_mule ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_mule(__vector signed char __a, __vector signed char __b) { + return __builtin_s390_vmeb(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mule(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vmleb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_mule(__vector signed short __a, __vector signed short __b) { + return __builtin_s390_vmeh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mule(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vmleh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_mule(__vector signed int __a, __vector signed int __b) { + return __builtin_s390_vmef(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_mule(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vmlef(__a, __b); +} + +/*-- vec_mulo ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed short +vec_mulo(__vector signed char __a, __vector signed char __b) { + return __builtin_s390_vmob(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_mulo(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vmlob(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_mulo(__vector signed short __a, __vector signed short __b) { + return __builtin_s390_vmoh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_mulo(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vmloh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed long long +vec_mulo(__vector signed int __a, __vector signed int __b) { + return __builtin_s390_vmof(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_mulo(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vmlof(__a, __b); +} + +/*-- vec_msum_u128 ----------------------------------------------------------*/ + +#if __ARCH__ >= 12 +#define vec_msum_u128(X, Y, Z, W) \ + ((__vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W))); +#endif + +/*-- vec_sub_u128 -----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsq(__a, __b); +} + +/*-- vec_subc ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_subc(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vscbib(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_subc(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vscbih(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_subc(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vscbif(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_vscbig(__a, __b); +} + +/*-- vec_subc_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vscbiq(__a, __b); +} + +/*-- vec_sube_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vsbiq(__a, __b, __c); +} + +/*-- vec_subec_u128 ---------------------------------------------------------*/ + +static inline __ATTRS_ai __vector unsigned char +vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vsbcbiq(__a, __b, __c); +} + +/*-- vec_sum2 ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned long long +vec_sum2(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vsumgh(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned long long +vec_sum2(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vsumgf(__a, __b); +} + +/*-- vec_sum_u128 -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vsumqf(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) { + return __builtin_s390_vsumqg(__a, __b); +} + +/*-- vec_sum4 ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned int +vec_sum4(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vsumb(__a, __b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_sum4(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vsumh(__a, __b); +} + +/*-- vec_test_mask ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_test_mask(__vector signed char __a, __vector unsigned char __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vtm(__a, __b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector signed short __a, __vector unsigned short __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector signed int __a, __vector unsigned int __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector signed long long __a, __vector unsigned long long __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(__vector unsigned long long __a, + __vector unsigned long long __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai int +vec_test_mask(__vector float __a, __vector unsigned int __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} +#endif + +static inline __ATTRS_o_ai int +vec_test_mask(__vector double __a, __vector unsigned long long __b) { + return __builtin_s390_vtm((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +/*-- vec_madd ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_madd(__vector float __a, __vector float __b, __vector float __c) { + return __builtin_s390_vfmasb(__a, __b, __c); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_madd(__vector double __a, __vector double __b, __vector double __c) { + return __builtin_s390_vfmadb(__a, __b, __c); +} + +/*-- vec_msub ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_msub(__vector float __a, __vector float __b, __vector float __c) { + return __builtin_s390_vfmssb(__a, __b, __c); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_msub(__vector double __a, __vector double __b, __vector double __c) { + return __builtin_s390_vfmsdb(__a, __b, __c); +} + +/*-- vec_nmadd ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_nmadd(__vector float __a, __vector float __b, __vector float __c) { + return __builtin_s390_vfnmasb(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector double +vec_nmadd(__vector double __a, __vector double __b, __vector double __c) { + return __builtin_s390_vfnmadb(__a, __b, __c); +} +#endif + +/*-- vec_nmsub ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_nmsub(__vector float __a, __vector float __b, __vector float __c) { + return __builtin_s390_vfnmssb(__a, __b, __c); +} + +static inline __ATTRS_o_ai __vector double +vec_nmsub(__vector double __a, __vector double __b, __vector double __c) { + return __builtin_s390_vfnmsdb(__a, __b, __c); +} +#endif + +/*-- vec_sqrt ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_sqrt(__vector float __a) { + return __builtin_s390_vfsqsb(__a); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_sqrt(__vector double __a) { + return __builtin_s390_vfsqdb(__a); +} + +/*-- vec_ld2f ---------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_ai __vector double +vec_ld2f(const float *__ptr) { + typedef float __v2f32 __attribute__((__vector_size__(8))); + return __builtin_convertvector(*(const __v2f32 *)__ptr, __vector double); +} + +/*-- vec_st2f ---------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_ai void +vec_st2f(__vector double __a, float *__ptr) { + typedef float __v2f32 __attribute__((__vector_size__(8))); + *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32); +} + +/*-- vec_ctd ----------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_ctd(__vector signed long long __a, int __b) + __constant_range(__b, 0, 31) { + __vector double __conv = __builtin_convertvector(__a, __vector double); + __conv *= ((__vector double)(__vector unsigned long long) + ((0x3ffULL - __b) << 52)); + return __conv; +} + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector double +vec_ctd(__vector unsigned long long __a, int __b) + __constant_range(__b, 0, 31) { + __vector double __conv = __builtin_convertvector(__a, __vector double); + __conv *= ((__vector double)(__vector unsigned long long) + ((0x3ffULL - __b) << 52)); + return __conv; +} + +/*-- vec_ctsl ---------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector signed long long +vec_ctsl(__vector double __a, int __b) + __constant_range(__b, 0, 31) { + __a *= ((__vector double)(__vector unsigned long long) + ((0x3ffULL + __b) << 52)); + return __builtin_convertvector(__a, __vector signed long long); +} + +/*-- vec_ctul ---------------------------------------------------------------*/ + +// This prototype is deprecated. +static inline __ATTRS_o_ai __vector unsigned long long +vec_ctul(__vector double __a, int __b) + __constant_range(__b, 0, 31) { + __a *= ((__vector double)(__vector unsigned long long) + ((0x3ffULL + __b) << 52)); + return __builtin_convertvector(__a, __vector unsigned long long); +} + +/*-- vec_doublee ------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_ai __vector double +vec_doublee(__vector float __a) { + typedef float __v2f32 __attribute__((__vector_size__(8))); + __v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2); + return __builtin_convertvector(__pack, __vector double); +} +#endif + +/*-- vec_floate -------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_ai __vector float +vec_floate(__vector double __a) { + typedef float __v2f32 __attribute__((__vector_size__(8))); + __v2f32 __pack = __builtin_convertvector(__a, __v2f32); + return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1); +} +#endif + +/*-- vec_double -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector double +vec_double(__vector signed long long __a) { + return __builtin_convertvector(__a, __vector double); +} + +static inline __ATTRS_o_ai __vector double +vec_double(__vector unsigned long long __a) { + return __builtin_convertvector(__a, __vector double); +} + +/*-- vec_float --------------------------------------------------------------*/ + +#if __ARCH__ >= 13 + +static inline __ATTRS_o_ai __vector float +vec_float(__vector signed int __a) { + return __builtin_convertvector(__a, __vector float); +} + +static inline __ATTRS_o_ai __vector float +vec_float(__vector unsigned int __a) { + return __builtin_convertvector(__a, __vector float); +} + +#endif + +/*-- vec_signed -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed long long +vec_signed(__vector double __a) { + return __builtin_convertvector(__a, __vector signed long long); +} + +#if __ARCH__ >= 13 +static inline __ATTRS_o_ai __vector signed int +vec_signed(__vector float __a) { + return __builtin_convertvector(__a, __vector signed int); +} +#endif + +/*-- vec_unsigned -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned long long +vec_unsigned(__vector double __a) { + return __builtin_convertvector(__a, __vector unsigned long long); +} + +#if __ARCH__ >= 13 +static inline __ATTRS_o_ai __vector unsigned int +vec_unsigned(__vector float __a) { + return __builtin_convertvector(__a, __vector unsigned int); +} +#endif + +/*-- vec_roundp -------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_roundp(__vector float __a) { + return __builtin_s390_vfisb(__a, 4, 6); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_roundp(__vector double __a) { + return __builtin_s390_vfidb(__a, 4, 6); +} + +/*-- vec_ceil ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_ceil(__vector float __a) { + // On this platform, vec_ceil never triggers the IEEE-inexact exception. + return __builtin_s390_vfisb(__a, 4, 6); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_ceil(__vector double __a) { + // On this platform, vec_ceil never triggers the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 4, 6); +} + +/*-- vec_roundm -------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_roundm(__vector float __a) { + return __builtin_s390_vfisb(__a, 4, 7); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_roundm(__vector double __a) { + return __builtin_s390_vfidb(__a, 4, 7); +} + +/*-- vec_floor --------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_floor(__vector float __a) { + // On this platform, vec_floor never triggers the IEEE-inexact exception. + return __builtin_s390_vfisb(__a, 4, 7); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_floor(__vector double __a) { + // On this platform, vec_floor never triggers the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 4, 7); +} + +/*-- vec_roundz -------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_roundz(__vector float __a) { + return __builtin_s390_vfisb(__a, 4, 5); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_roundz(__vector double __a) { + return __builtin_s390_vfidb(__a, 4, 5); +} + +/*-- vec_trunc --------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_trunc(__vector float __a) { + // On this platform, vec_trunc never triggers the IEEE-inexact exception. + return __builtin_s390_vfisb(__a, 4, 5); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_trunc(__vector double __a) { + // On this platform, vec_trunc never triggers the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 4, 5); +} + +/*-- vec_roundc -------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_roundc(__vector float __a) { + return __builtin_s390_vfisb(__a, 4, 0); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_roundc(__vector double __a) { + return __builtin_s390_vfidb(__a, 4, 0); +} + +/*-- vec_rint ---------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_rint(__vector float __a) { + // vec_rint may trigger the IEEE-inexact exception. + return __builtin_s390_vfisb(__a, 0, 0); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_rint(__vector double __a) { + // vec_rint may trigger the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 0, 0); +} + +/*-- vec_round --------------------------------------------------------------*/ + +#if __ARCH__ >= 12 +static inline __ATTRS_o_ai __vector float +vec_round(__vector float __a) { + return __builtin_s390_vfisb(__a, 4, 4); +} +#endif + +static inline __ATTRS_o_ai __vector double +vec_round(__vector double __a) { + return __builtin_s390_vfidb(__a, 4, 4); +} + +/*-- vec_fp_test_data_class -------------------------------------------------*/ + +#if __ARCH__ >= 12 +extern __ATTRS_o __vector __bool int +vec_fp_test_data_class(__vector float __a, int __b, int *__c) + __constant_range(__b, 0, 4095); + +extern __ATTRS_o __vector __bool long long +vec_fp_test_data_class(__vector double __a, int __b, int *__c) + __constant_range(__b, 0, 4095); + +#define vec_fp_test_data_class(X, Y, Z) \ + ((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \ + __extension__ ({ \ + __vector unsigned char __res; \ + __vector unsigned char __x = (__vector unsigned char)(X); \ + int *__z = (Z); \ + switch (sizeof ((X)[0])) { \ + case 4: __res = (__vector unsigned char) \ + __builtin_s390_vftcisb((__vector float)__x, (Y), __z); \ + break; \ + default: __res = (__vector unsigned char) \ + __builtin_s390_vftcidb((__vector double)__x, (Y), __z); \ + break; \ + } __res; })) +#else +#define vec_fp_test_data_class(X, Y, Z) \ + ((__vector __bool long long)__builtin_s390_vftcidb((X), (Y), (Z))) +#endif + +#define __VEC_CLASS_FP_ZERO_P (1 << 11) +#define __VEC_CLASS_FP_ZERO_N (1 << 10) +#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | __VEC_CLASS_FP_ZERO_N) +#define __VEC_CLASS_FP_NORMAL_P (1 << 9) +#define __VEC_CLASS_FP_NORMAL_N (1 << 8) +#define __VEC_CLASS_FP_NORMAL (__VEC_CLASS_FP_NORMAL_P | \ + __VEC_CLASS_FP_NORMAL_N) +#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 7) +#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 6) +#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \ + __VEC_CLASS_FP_SUBNORMAL_N) +#define __VEC_CLASS_FP_INFINITY_P (1 << 5) +#define __VEC_CLASS_FP_INFINITY_N (1 << 4) +#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \ + __VEC_CLASS_FP_INFINITY_N) +#define __VEC_CLASS_FP_QNAN_P (1 << 3) +#define __VEC_CLASS_FP_QNAN_N (1 << 2) +#define __VEC_CLASS_FP_QNAN (__VEC_CLASS_FP_QNAN_P | __VEC_CLASS_FP_QNAN_N) +#define __VEC_CLASS_FP_SNAN_P (1 << 1) +#define __VEC_CLASS_FP_SNAN_N (1 << 0) +#define __VEC_CLASS_FP_SNAN (__VEC_CLASS_FP_SNAN_P | __VEC_CLASS_FP_SNAN_N) +#define __VEC_CLASS_FP_NAN (__VEC_CLASS_FP_QNAN | __VEC_CLASS_FP_SNAN) +#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \ + __VEC_CLASS_FP_SUBNORMAL | \ + __VEC_CLASS_FP_ZERO | \ + __VEC_CLASS_FP_INFINITY) + +/*-- vec_extend_to_fp32_hi --------------------------------------------------*/ + +#if __ARCH__ >= 14 +#define vec_extend_to_fp32_hi(X, W) \ + ((__vector float)__builtin_s390_vclfnhs((X), (W))); +#endif + +/*-- vec_extend_to_fp32_hi --------------------------------------------------*/ + +#if __ARCH__ >= 14 +#define vec_extend_to_fp32_lo(X, W) \ + ((__vector float)__builtin_s390_vclfnls((X), (W))); +#endif + +/*-- vec_round_from_fp32 ----------------------------------------------------*/ + +#if __ARCH__ >= 14 +#define vec_round_from_fp32(X, Y, W) \ + ((__vector unsigned short)__builtin_s390_vcrnfs((X), (Y), (W))); +#endif + +/*-- vec_convert_to_fp16 ----------------------------------------------------*/ + +#if __ARCH__ >= 14 +#define vec_convert_to_fp16(X, W) \ + ((__vector unsigned short)__builtin_s390_vcfn((X), (W))); +#endif + +/*-- vec_convert_from_fp16 --------------------------------------------------*/ + +#if __ARCH__ >= 14 +#define vec_convert_from_fp16(X, W) \ + ((__vector unsigned short)__builtin_s390_vcnf((X), (W))); +#endif + +/*-- vec_cp_until_zero ------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cp_until_zero(__vector signed char __a) { + return ((__vector signed char) + __builtin_s390_vistrb((__vector unsigned char)__a)); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cp_until_zero(__vector __bool char __a) { + return ((__vector __bool char) + __builtin_s390_vistrb((__vector unsigned char)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cp_until_zero(__vector unsigned char __a) { + return __builtin_s390_vistrb(__a); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cp_until_zero(__vector signed short __a) { + return ((__vector signed short) + __builtin_s390_vistrh((__vector unsigned short)__a)); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cp_until_zero(__vector __bool short __a) { + return ((__vector __bool short) + __builtin_s390_vistrh((__vector unsigned short)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cp_until_zero(__vector unsigned short __a) { + return __builtin_s390_vistrh(__a); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cp_until_zero(__vector signed int __a) { + return ((__vector signed int) + __builtin_s390_vistrf((__vector unsigned int)__a)); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cp_until_zero(__vector __bool int __a) { + return ((__vector __bool int) + __builtin_s390_vistrf((__vector unsigned int)__a)); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cp_until_zero(__vector unsigned int __a) { + return __builtin_s390_vistrf(__a); +} + +/*-- vec_cp_until_zero_cc ---------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cp_until_zero_cc(__vector signed char __a, int *__cc) { + return (__vector signed char) + __builtin_s390_vistrbs((__vector unsigned char)__a, __cc); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_cp_until_zero_cc(__vector __bool char __a, int *__cc) { + return (__vector __bool char) + __builtin_s390_vistrbs((__vector unsigned char)__a, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cp_until_zero_cc(__vector unsigned char __a, int *__cc) { + return __builtin_s390_vistrbs(__a, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cp_until_zero_cc(__vector signed short __a, int *__cc) { + return (__vector signed short) + __builtin_s390_vistrhs((__vector unsigned short)__a, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cp_until_zero_cc(__vector __bool short __a, int *__cc) { + return (__vector __bool short) + __builtin_s390_vistrhs((__vector unsigned short)__a, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cp_until_zero_cc(__vector unsigned short __a, int *__cc) { + return __builtin_s390_vistrhs(__a, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cp_until_zero_cc(__vector signed int __a, int *__cc) { + return (__vector signed int) + __builtin_s390_vistrfs((__vector unsigned int)__a, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cp_until_zero_cc(__vector __bool int __a, int *__cc) { + return (__vector __bool int) + __builtin_s390_vistrfs((__vector unsigned int)__a, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cp_until_zero_cc(__vector unsigned int __a, int *__cc) { + return __builtin_s390_vistrfs(__a, __cc); +} + +/*-- vec_cmpeq_idx ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpeq_idx(__vector signed char __a, __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfeeb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_idx(__vector __bool char __a, __vector __bool char __b) { + return __builtin_s390_vfeeb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_idx(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vfeeb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpeq_idx(__vector signed short __a, __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfeeh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_idx(__vector __bool short __a, __vector __bool short __b) { + return __builtin_s390_vfeeh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_idx(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vfeeh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpeq_idx(__vector signed int __a, __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfeef((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_idx(__vector __bool int __a, __vector __bool int __b) { + return __builtin_s390_vfeef((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_idx(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vfeef(__a, __b); +} + +/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpeq_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) { + return (__vector signed char) + __builtin_s390_vfeebs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) { + return __builtin_s390_vfeebs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfeebs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpeq_idx_cc(__vector signed short __a, __vector signed short __b, + int *__cc) { + return (__vector signed short) + __builtin_s390_vfeehs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_idx_cc(__vector __bool short __a, __vector __bool short __b, int *__cc) { + return __builtin_s390_vfeehs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfeehs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpeq_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) { + return (__vector signed int) + __builtin_s390_vfeefs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) { + return __builtin_s390_vfeefs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfeefs(__a, __b, __cc); +} + +/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpeq_or_0_idx(__vector signed char __a, __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfeezb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_or_0_idx(__vector __bool char __a, __vector __bool char __b) { + return __builtin_s390_vfeezb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vfeezb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpeq_or_0_idx(__vector signed short __a, __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfeezh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_or_0_idx(__vector __bool short __a, __vector __bool short __b) { + return __builtin_s390_vfeezh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vfeezh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpeq_or_0_idx(__vector signed int __a, __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfeezf((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_or_0_idx(__vector __bool int __a, __vector __bool int __b) { + return __builtin_s390_vfeezf((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vfeezf(__a, __b); +} + +/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpeq_or_0_idx_cc(__vector signed char __a, __vector signed char __b, + int *__cc) { + return (__vector signed char) + __builtin_s390_vfeezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b, + int *__cc) { + return __builtin_s390_vfeezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpeq_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfeezbs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpeq_or_0_idx_cc(__vector signed short __a, __vector signed short __b, + int *__cc) { + return (__vector signed short) + __builtin_s390_vfeezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b, + int *__cc) { + return __builtin_s390_vfeezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpeq_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfeezhs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpeq_or_0_idx_cc(__vector signed int __a, __vector signed int __b, + int *__cc) { + return (__vector signed int) + __builtin_s390_vfeezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b, + int *__cc) { + return __builtin_s390_vfeezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpeq_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfeezfs(__a, __b, __cc); +} + +/*-- vec_cmpne_idx ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpne_idx(__vector signed char __a, __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfeneb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_idx(__vector __bool char __a, __vector __bool char __b) { + return __builtin_s390_vfeneb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_idx(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vfeneb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpne_idx(__vector signed short __a, __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfeneh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_idx(__vector __bool short __a, __vector __bool short __b) { + return __builtin_s390_vfeneh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_idx(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vfeneh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpne_idx(__vector signed int __a, __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfenef((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_idx(__vector __bool int __a, __vector __bool int __b) { + return __builtin_s390_vfenef((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_idx(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vfenef(__a, __b); +} + +/*-- vec_cmpne_idx_cc -------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpne_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) { + return (__vector signed char) + __builtin_s390_vfenebs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) { + return __builtin_s390_vfenebs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfenebs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpne_idx_cc(__vector signed short __a, __vector signed short __b, + int *__cc) { + return (__vector signed short) + __builtin_s390_vfenehs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_idx_cc(__vector __bool short __a, __vector __bool short __b, + int *__cc) { + return __builtin_s390_vfenehs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfenehs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpne_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) { + return (__vector signed int) + __builtin_s390_vfenefs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) { + return __builtin_s390_vfenefs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfenefs(__a, __b, __cc); +} + +/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpne_or_0_idx(__vector signed char __a, __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfenezb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_or_0_idx(__vector __bool char __a, __vector __bool char __b) { + return __builtin_s390_vfenezb((__vector unsigned char)__a, + (__vector unsigned char)__b); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vfenezb(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpne_or_0_idx(__vector signed short __a, __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfenezh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_or_0_idx(__vector __bool short __a, __vector __bool short __b) { + return __builtin_s390_vfenezh((__vector unsigned short)__a, + (__vector unsigned short)__b); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vfenezh(__a, __b); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpne_or_0_idx(__vector signed int __a, __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfenezf((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_or_0_idx(__vector __bool int __a, __vector __bool int __b) { + return __builtin_s390_vfenezf((__vector unsigned int)__a, + (__vector unsigned int)__b); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vfenezf(__a, __b); +} + +/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_cmpne_or_0_idx_cc(__vector signed char __a, __vector signed char __b, + int *__cc) { + return (__vector signed char) + __builtin_s390_vfenezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b, + int *__cc) { + return __builtin_s390_vfenezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpne_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfenezbs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_cmpne_or_0_idx_cc(__vector signed short __a, __vector signed short __b, + int *__cc) { + return (__vector signed short) + __builtin_s390_vfenezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b, + int *__cc) { + return __builtin_s390_vfenezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpne_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfenezhs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_cmpne_or_0_idx_cc(__vector signed int __a, __vector signed int __b, + int *__cc) { + return (__vector signed int) + __builtin_s390_vfenezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b, + int *__cc) { + return __builtin_s390_vfenezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpne_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfenezfs(__a, __b, __cc); +} + +/*-- vec_cmprg --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmprg(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 4); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmprg(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 4); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmprg(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 4); +} + +/*-- vec_cmprg_cc -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmprg_cc(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return (__vector __bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmprg_cc(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c, int *__cc) { + return (__vector __bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmprg_cc(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c, int *__cc) { + return (__vector __bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc); +} + +/*-- vec_cmprg_idx ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmprg_idx(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vstrcb(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmprg_idx(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __builtin_s390_vstrch(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmprg_idx(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __builtin_s390_vstrcf(__a, __b, __c, 0); +} + +/*-- vec_cmprg_idx_cc -------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmprg_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmprg_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmprg_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc); +} + +/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmprg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vstrczb(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmprg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __builtin_s390_vstrczh(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmprg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __builtin_s390_vstrczf(__a, __b, __c, 0); +} + +/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmprg_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmprg_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmprg_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc); +} + +/*-- vec_cmpnrg -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpnrg(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 12); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpnrg(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 12); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpnrg(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 12); +} + +/*-- vec_cmpnrg_cc ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_cmpnrg_cc(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return (__vector __bool char) + __builtin_s390_vstrcbs(__a, __b, __c, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_cmpnrg_cc(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c, int *__cc) { + return (__vector __bool short) + __builtin_s390_vstrchs(__a, __b, __c, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_cmpnrg_cc(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c, int *__cc) { + return (__vector __bool int) + __builtin_s390_vstrcfs(__a, __b, __c, 12, __cc); +} + +/*-- vec_cmpnrg_idx ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpnrg_idx(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vstrcb(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpnrg_idx(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __builtin_s390_vstrch(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpnrg_idx(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __builtin_s390_vstrcf(__a, __b, __c, 8); +} + +/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpnrg_idx_cc(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpnrg_idx_cc(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpnrg_idx_cc(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc); +} + +/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpnrg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c) { + return __builtin_s390_vstrczb(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpnrg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned short __c) { + return __builtin_s390_vstrczh(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpnrg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned int __c) { + return __builtin_s390_vstrczf(__a, __b, __c, 8); +} + +/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector unsigned char +vec_cmpnrg_or_0_idx_cc(__vector unsigned char __a, + __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_cmpnrg_or_0_idx_cc(__vector unsigned short __a, + __vector unsigned short __b, + __vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_cmpnrg_or_0_idx_cc(__vector unsigned int __a, + __vector unsigned int __b, + __vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc); +} + +/*-- vec_find_any_eq --------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_eq(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char) + __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 4); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_eq(__vector __bool char __a, __vector __bool char __b) { + return (__vector __bool char) + __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 4); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_eq(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 4); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_eq(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short) + __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 4); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_eq(__vector __bool short __a, __vector __bool short __b) { + return (__vector __bool short) + __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 4); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_eq(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 4); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_eq(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int) + __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 4); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_eq(__vector __bool int __a, __vector __bool int __b) { + return (__vector __bool int) + __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 4); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_eq(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 4); +} + +/*-- vec_find_any_eq_cc -----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_eq_cc(__vector signed char __a, __vector signed char __b, + int *__cc) { + return (__vector __bool char) + __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_eq_cc(__vector __bool char __a, __vector __bool char __b, + int *__cc) { + return (__vector __bool char) + __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_eq_cc(__vector unsigned char __a, __vector unsigned char __b, + int *__cc) { + return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_eq_cc(__vector signed short __a, __vector signed short __b, + int *__cc) { + return (__vector __bool short) + __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_eq_cc(__vector __bool short __a, __vector __bool short __b, + int *__cc) { + return (__vector __bool short) + __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_eq_cc(__vector unsigned short __a, __vector unsigned short __b, + int *__cc) { + return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_eq_cc(__vector signed int __a, __vector signed int __b, + int *__cc) { + return (__vector __bool int) + __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_eq_cc(__vector __bool int __a, __vector __bool int __b, + int *__cc) { + return (__vector __bool int) + __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 4, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_eq_cc(__vector unsigned int __a, __vector unsigned int __b, + int *__cc) { + return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc); +} + +/*-- vec_find_any_eq_idx ----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_eq_idx(__vector signed char __a, __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_idx(__vector __bool char __a, __vector __bool char __b) { + return __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_idx(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vfaeb(__a, __b, 0); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_eq_idx(__vector signed short __a, __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_idx(__vector __bool short __a, __vector __bool short __b) { + return __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_idx(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vfaeh(__a, __b, 0); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_eq_idx(__vector signed int __a, __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_idx(__vector __bool int __a, __vector __bool int __b) { + return __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_idx(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vfaef(__a, __b, 0); +} + +/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_eq_idx_cc(__vector signed char __a, + __vector signed char __b, int *__cc) { + return (__vector signed char) + __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_idx_cc(__vector __bool char __a, + __vector __bool char __b, int *__cc) { + return __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_idx_cc(__vector unsigned char __a, + __vector unsigned char __b, int *__cc) { + return __builtin_s390_vfaebs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_eq_idx_cc(__vector signed short __a, + __vector signed short __b, int *__cc) { + return (__vector signed short) + __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_idx_cc(__vector __bool short __a, + __vector __bool short __b, int *__cc) { + return __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_idx_cc(__vector unsigned short __a, + __vector unsigned short __b, int *__cc) { + return __builtin_s390_vfaehs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_eq_idx_cc(__vector signed int __a, + __vector signed int __b, int *__cc) { + return (__vector signed int) + __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_idx_cc(__vector __bool int __a, + __vector __bool int __b, int *__cc) { + return __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_idx_cc(__vector unsigned int __a, + __vector unsigned int __b, int *__cc) { + return __builtin_s390_vfaefs(__a, __b, 0, __cc); +} + +/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_eq_or_0_idx(__vector signed char __a, + __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfaezb((__vector unsigned char)__a, + (__vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_or_0_idx(__vector __bool char __a, + __vector __bool char __b) { + return __builtin_s390_vfaezb((__vector unsigned char)__a, + (__vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_or_0_idx(__vector unsigned char __a, + __vector unsigned char __b) { + return __builtin_s390_vfaezb(__a, __b, 0); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_eq_or_0_idx(__vector signed short __a, + __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfaezh((__vector unsigned short)__a, + (__vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_or_0_idx(__vector __bool short __a, + __vector __bool short __b) { + return __builtin_s390_vfaezh((__vector unsigned short)__a, + (__vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_or_0_idx(__vector unsigned short __a, + __vector unsigned short __b) { + return __builtin_s390_vfaezh(__a, __b, 0); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_eq_or_0_idx(__vector signed int __a, + __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfaezf((__vector unsigned int)__a, + (__vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_or_0_idx(__vector __bool int __a, + __vector __bool int __b) { + return __builtin_s390_vfaezf((__vector unsigned int)__a, + (__vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_or_0_idx(__vector unsigned int __a, + __vector unsigned int __b) { + return __builtin_s390_vfaezf(__a, __b, 0); +} + +/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_eq_or_0_idx_cc(__vector signed char __a, + __vector signed char __b, int *__cc) { + return (__vector signed char) + __builtin_s390_vfaezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_or_0_idx_cc(__vector __bool char __a, + __vector __bool char __b, int *__cc) { + return __builtin_s390_vfaezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_eq_or_0_idx_cc(__vector unsigned char __a, + __vector unsigned char __b, int *__cc) { + return __builtin_s390_vfaezbs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_eq_or_0_idx_cc(__vector signed short __a, + __vector signed short __b, int *__cc) { + return (__vector signed short) + __builtin_s390_vfaezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_or_0_idx_cc(__vector __bool short __a, + __vector __bool short __b, int *__cc) { + return __builtin_s390_vfaezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_eq_or_0_idx_cc(__vector unsigned short __a, + __vector unsigned short __b, int *__cc) { + return __builtin_s390_vfaezhs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_eq_or_0_idx_cc(__vector signed int __a, + __vector signed int __b, int *__cc) { + return (__vector signed int) + __builtin_s390_vfaezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_or_0_idx_cc(__vector __bool int __a, + __vector __bool int __b, int *__cc) { + return __builtin_s390_vfaezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_eq_or_0_idx_cc(__vector unsigned int __a, + __vector unsigned int __b, int *__cc) { + return __builtin_s390_vfaezfs(__a, __b, 0, __cc); +} + +/*-- vec_find_any_ne --------------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_ne(__vector signed char __a, __vector signed char __b) { + return (__vector __bool char) + __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 12); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_ne(__vector __bool char __a, __vector __bool char __b) { + return (__vector __bool char) + __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 12); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_ne(__vector unsigned char __a, __vector unsigned char __b) { + return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 12); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_ne(__vector signed short __a, __vector signed short __b) { + return (__vector __bool short) + __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 12); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_ne(__vector __bool short __a, __vector __bool short __b) { + return (__vector __bool short) + __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 12); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_ne(__vector unsigned short __a, __vector unsigned short __b) { + return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 12); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_ne(__vector signed int __a, __vector signed int __b) { + return (__vector __bool int) + __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 12); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_ne(__vector __bool int __a, __vector __bool int __b) { + return (__vector __bool int) + __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 12); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_ne(__vector unsigned int __a, __vector unsigned int __b) { + return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 12); +} + +/*-- vec_find_any_ne_cc -----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_ne_cc(__vector signed char __a, + __vector signed char __b, int *__cc) { + return (__vector __bool char) + __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_ne_cc(__vector __bool char __a, + __vector __bool char __b, int *__cc) { + return (__vector __bool char) + __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool char +vec_find_any_ne_cc(__vector unsigned char __a, + __vector unsigned char __b, int *__cc) { + return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_ne_cc(__vector signed short __a, + __vector signed short __b, int *__cc) { + return (__vector __bool short) + __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_ne_cc(__vector __bool short __a, + __vector __bool short __b, int *__cc) { + return (__vector __bool short) + __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool short +vec_find_any_ne_cc(__vector unsigned short __a, + __vector unsigned short __b, int *__cc) { + return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_ne_cc(__vector signed int __a, + __vector signed int __b, int *__cc) { + return (__vector __bool int) + __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_ne_cc(__vector __bool int __a, + __vector __bool int __b, int *__cc) { + return (__vector __bool int) + __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 12, __cc); +} + +static inline __ATTRS_o_ai __vector __bool int +vec_find_any_ne_cc(__vector unsigned int __a, + __vector unsigned int __b, int *__cc) { + return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc); +} + +/*-- vec_find_any_ne_idx ----------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_ne_idx(__vector signed char __a, __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_idx(__vector __bool char __a, __vector __bool char __b) { + return __builtin_s390_vfaeb((__vector unsigned char)__a, + (__vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_idx(__vector unsigned char __a, __vector unsigned char __b) { + return __builtin_s390_vfaeb(__a, __b, 8); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_ne_idx(__vector signed short __a, __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_idx(__vector __bool short __a, __vector __bool short __b) { + return __builtin_s390_vfaeh((__vector unsigned short)__a, + (__vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_idx(__vector unsigned short __a, __vector unsigned short __b) { + return __builtin_s390_vfaeh(__a, __b, 8); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_ne_idx(__vector signed int __a, __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_idx(__vector __bool int __a, __vector __bool int __b) { + return __builtin_s390_vfaef((__vector unsigned int)__a, + (__vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_idx(__vector unsigned int __a, __vector unsigned int __b) { + return __builtin_s390_vfaef(__a, __b, 8); +} + +/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_ne_idx_cc(__vector signed char __a, + __vector signed char __b, int *__cc) { + return (__vector signed char) + __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_idx_cc(__vector __bool char __a, + __vector __bool char __b, int *__cc) { + return __builtin_s390_vfaebs((__vector unsigned char)__a, + (__vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_idx_cc(__vector unsigned char __a, + __vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfaebs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_ne_idx_cc(__vector signed short __a, + __vector signed short __b, int *__cc) { + return (__vector signed short) + __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_idx_cc(__vector __bool short __a, + __vector __bool short __b, int *__cc) { + return __builtin_s390_vfaehs((__vector unsigned short)__a, + (__vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_idx_cc(__vector unsigned short __a, + __vector unsigned short __b, int *__cc) { + return __builtin_s390_vfaehs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_ne_idx_cc(__vector signed int __a, + __vector signed int __b, int *__cc) { + return (__vector signed int) + __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_idx_cc(__vector __bool int __a, + __vector __bool int __b, int *__cc) { + return __builtin_s390_vfaefs((__vector unsigned int)__a, + (__vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_idx_cc(__vector unsigned int __a, + __vector unsigned int __b, int *__cc) { + return __builtin_s390_vfaefs(__a, __b, 8, __cc); +} + +/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_ne_or_0_idx(__vector signed char __a, + __vector signed char __b) { + return (__vector signed char) + __builtin_s390_vfaezb((__vector unsigned char)__a, + (__vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_or_0_idx(__vector __bool char __a, + __vector __bool char __b) { + return __builtin_s390_vfaezb((__vector unsigned char)__a, + (__vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_or_0_idx(__vector unsigned char __a, + __vector unsigned char __b) { + return __builtin_s390_vfaezb(__a, __b, 8); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_ne_or_0_idx(__vector signed short __a, + __vector signed short __b) { + return (__vector signed short) + __builtin_s390_vfaezh((__vector unsigned short)__a, + (__vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_or_0_idx(__vector __bool short __a, + __vector __bool short __b) { + return __builtin_s390_vfaezh((__vector unsigned short)__a, + (__vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_or_0_idx(__vector unsigned short __a, + __vector unsigned short __b) { + return __builtin_s390_vfaezh(__a, __b, 8); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_ne_or_0_idx(__vector signed int __a, + __vector signed int __b) { + return (__vector signed int) + __builtin_s390_vfaezf((__vector unsigned int)__a, + (__vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_or_0_idx(__vector __bool int __a, + __vector __bool int __b) { + return __builtin_s390_vfaezf((__vector unsigned int)__a, + (__vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_or_0_idx(__vector unsigned int __a, + __vector unsigned int __b) { + return __builtin_s390_vfaezf(__a, __b, 8); +} + +/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/ + +static inline __ATTRS_o_ai __vector signed char +vec_find_any_ne_or_0_idx_cc(__vector signed char __a, + __vector signed char __b, int *__cc) { + return (__vector signed char) + __builtin_s390_vfaezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_or_0_idx_cc(__vector __bool char __a, + __vector __bool char __b, int *__cc) { + return __builtin_s390_vfaezbs((__vector unsigned char)__a, + (__vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_find_any_ne_or_0_idx_cc(__vector unsigned char __a, + __vector unsigned char __b, int *__cc) { + return __builtin_s390_vfaezbs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector signed short +vec_find_any_ne_or_0_idx_cc(__vector signed short __a, + __vector signed short __b, int *__cc) { + return (__vector signed short) + __builtin_s390_vfaezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_or_0_idx_cc(__vector __bool short __a, + __vector __bool short __b, int *__cc) { + return __builtin_s390_vfaezhs((__vector unsigned short)__a, + (__vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned short +vec_find_any_ne_or_0_idx_cc(__vector unsigned short __a, + __vector unsigned short __b, int *__cc) { + return __builtin_s390_vfaezhs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector signed int +vec_find_any_ne_or_0_idx_cc(__vector signed int __a, + __vector signed int __b, int *__cc) { + return (__vector signed int) + __builtin_s390_vfaezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_or_0_idx_cc(__vector __bool int __a, + __vector __bool int __b, int *__cc) { + return __builtin_s390_vfaezfs((__vector unsigned int)__a, + (__vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned int +vec_find_any_ne_or_0_idx_cc(__vector unsigned int __a, + __vector unsigned int __b, int *__cc) { + return __builtin_s390_vfaezfs(__a, __b, 8, __cc); +} + +/*-- vec_search_string_cc ---------------------------------------------------*/ + +#if __ARCH__ >= 13 + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector signed char __a, __vector signed char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsb((__vector unsigned char)__a, + (__vector unsigned char)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector __bool char __a, __vector __bool char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsb((__vector unsigned char)__a, + (__vector unsigned char)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector unsigned char __a, __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsb(__a, __b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector signed short __a, __vector signed short __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsh((__vector unsigned short)__a, + (__vector unsigned short)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector __bool short __a, __vector __bool short __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsh((__vector unsigned short)__a, + (__vector unsigned short)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector unsigned short __a, __vector unsigned short __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsh(__a, __b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector signed int __a, __vector signed int __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsf((__vector unsigned int)__a, + (__vector unsigned int)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector __bool int __a, __vector __bool int __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsf((__vector unsigned int)__a, + (__vector unsigned int)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_cc(__vector unsigned int __a, __vector unsigned int __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrsf(__a, __b, __c, __cc); +} + +#endif + +/*-- vec_search_string_until_zero_cc ----------------------------------------*/ + +#if __ARCH__ >= 13 + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector signed char __a, + __vector signed char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszb((__vector unsigned char)__a, + (__vector unsigned char)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector __bool char __a, + __vector __bool char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszb((__vector unsigned char)__a, + (__vector unsigned char)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector unsigned char __a, + __vector unsigned char __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszb(__a, __b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector signed short __a, + __vector signed short __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszh((__vector unsigned short)__a, + (__vector unsigned short)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector __bool short __a, + __vector __bool short __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszh((__vector unsigned short)__a, + (__vector unsigned short)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector unsigned short __a, + __vector unsigned short __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszh(__a, __b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector signed int __a, + __vector signed int __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszf((__vector unsigned int)__a, + (__vector unsigned int)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector __bool int __a, + __vector __bool int __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszf((__vector unsigned int)__a, + (__vector unsigned int)__b, __c, __cc); +} + +static inline __ATTRS_o_ai __vector unsigned char +vec_search_string_until_zero_cc(__vector unsigned int __a, + __vector unsigned int __b, + __vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrszf(__a, __b, __c, __cc); +} + +#endif + +#undef __constant_pow2_range +#undef __constant_range +#undef __constant +#undef __ATTRS_o +#undef __ATTRS_o_ai +#undef __ATTRS_ai + +#else + +#error "Use -fzvector to enable vector extensions" + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vpclmulqdqintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vpclmulqdqintrin.h new file mode 100644 index 0000000..44daadb0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/vpclmulqdqintrin.h @@ -0,0 +1,30 @@ +/*===------------ vpclmulqdqintrin.h - VPCLMULQDQ intrinsics ---------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VPCLMULQDQINTRIN_H +#define __VPCLMULQDQINTRIN_H + +#define _mm256_clmulepi64_epi128(A, B, I) \ + (__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (char)(I)) + +#ifdef __AVX512FINTRIN_H +#define _mm512_clmulepi64_epi128(A, B, I) \ + (__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (char)(I)) +#endif // __AVX512FINTRIN_H + +#endif /* __VPCLMULQDQINTRIN_H */ + diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/waitpkgintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/waitpkgintrin.h new file mode 100644 index 0000000..7ecada4 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/waitpkgintrin.h @@ -0,0 +1,42 @@ +/*===----------------------- waitpkgintrin.h - WAITPKG --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __WAITPKGINTRIN_H +#define __WAITPKGINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("waitpkg"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_umonitor (void * __address) +{ + __builtin_ia32_umonitor (__address); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_umwait (unsigned int __control, unsigned long long __counter) +{ + return __builtin_ia32_umwait (__control, + (unsigned int)(__counter >> 32), (unsigned int)__counter); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_tpause (unsigned int __control, unsigned long long __counter) +{ + return __builtin_ia32_tpause (__control, + (unsigned int)(__counter >> 32), (unsigned int)__counter); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __WAITPKGINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wasm_simd128.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wasm_simd128.h new file mode 100644 index 0000000..712fa03 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wasm_simd128.h @@ -0,0 +1,1734 @@ +/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WASM_SIMD128_H +#define __WASM_SIMD128_H + +#include +#include + +// User-facing type +typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16))); + +// Internal types determined by clang builtin definitions +typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1))); +typedef signed char __i8x16 + __attribute__((__vector_size__(16), __aligned__(16))); +typedef unsigned char __u8x16 + __attribute__((__vector_size__(16), __aligned__(16))); +typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16))); +typedef unsigned short __u16x8 + __attribute__((__vector_size__(16), __aligned__(16))); +typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16))); +typedef unsigned int __u32x4 + __attribute__((__vector_size__(16), __aligned__(16))); +typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16))); +typedef unsigned long long __u64x2 + __attribute__((__vector_size__(16), __aligned__(16))); +typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16))); +typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16))); + +typedef signed char __i8x8 __attribute__((__vector_size__(8), __aligned__(8))); +typedef unsigned char __u8x8 + __attribute__((__vector_size__(8), __aligned__(8))); +typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8))); +typedef unsigned short __u16x4 + __attribute__((__vector_size__(8), __aligned__(8))); +typedef int __i32x2 __attribute__((__vector_size__(8), __aligned__(8))); +typedef unsigned int __u32x2 + __attribute__((__vector_size__(8), __aligned__(8))); +typedef float __f32x2 __attribute__((__vector_size__(8), __aligned__(8))); + +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("simd128"), \ + __min_vector_width__(128))) + +#define __REQUIRE_CONSTANT(c) \ + __attribute__((__diagnose_if__(!__builtin_constant_p(c), \ + #c " must be constant", "error"))) + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) { + // UB-free unaligned access copied from xmmintrin.h + struct __wasm_v128_load_struct { + __v128_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __wasm_v128_load_struct *)__mem)->__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_v128_load8_splat(const void *__mem) { + struct __wasm_v128_load8_splat_struct { + uint8_t __v; + } __attribute__((__packed__, __may_alias__)); + uint8_t __v = ((const struct __wasm_v128_load8_splat_struct *)__mem)->__v; + return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v, + __v, __v, __v, __v, __v, __v, __v, __v}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_v128_load16_splat(const void *__mem) { + struct __wasm_v128_load16_splat_struct { + uint16_t __v; + } __attribute__((__packed__, __may_alias__)); + uint16_t __v = ((const struct __wasm_v128_load16_splat_struct *)__mem)->__v; + return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_v128_load32_splat(const void *__mem) { + struct __wasm_v128_load32_splat_struct { + uint32_t __v; + } __attribute__((__packed__, __may_alias__)); + uint32_t __v = ((const struct __wasm_v128_load32_splat_struct *)__mem)->__v; + return (v128_t)(__u32x4){__v, __v, __v, __v}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_v128_load64_splat(const void *__mem) { + struct __wasm_v128_load64_splat_struct { + uint64_t __v; + } __attribute__((__packed__, __may_alias__)); + uint64_t __v = ((const struct __wasm_v128_load64_splat_struct *)__mem)->__v; + return (v128_t)(__u64x2){__v, __v}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_load8x8(const void *__mem) { + struct __wasm_i16x8_load8x8_struct { + __i8x8 __v; + } __attribute__((__packed__, __may_alias__)); + __i8x8 __v = ((const struct __wasm_i16x8_load8x8_struct *)__mem)->__v; + return (v128_t) __builtin_convertvector(__v, __i16x8); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_load8x8(const void *__mem) { + struct __wasm_u16x8_load8x8_struct { + __u8x8 __v; + } __attribute__((__packed__, __may_alias__)); + __u8x8 __v = ((const struct __wasm_u16x8_load8x8_struct *)__mem)->__v; + return (v128_t) __builtin_convertvector(__v, __u16x8); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_load16x4(const void *__mem) { + struct __wasm_i32x4_load16x4_struct { + __i16x4 __v; + } __attribute__((__packed__, __may_alias__)); + __i16x4 __v = ((const struct __wasm_i32x4_load16x4_struct *)__mem)->__v; + return (v128_t) __builtin_convertvector(__v, __i32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_load16x4(const void *__mem) { + struct __wasm_u32x4_load16x4_struct { + __u16x4 __v; + } __attribute__((__packed__, __may_alias__)); + __u16x4 __v = ((const struct __wasm_u32x4_load16x4_struct *)__mem)->__v; + return (v128_t) __builtin_convertvector(__v, __u32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i64x2_load32x2(const void *__mem) { + struct __wasm_i64x2_load32x2_struct { + __i32x2 __v; + } __attribute__((__packed__, __may_alias__)); + __i32x2 __v = ((const struct __wasm_i64x2_load32x2_struct *)__mem)->__v; + return (v128_t) __builtin_convertvector(__v, __i64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u64x2_load32x2(const void *__mem) { + struct __wasm_u64x2_load32x2_struct { + __u32x2 __v; + } __attribute__((__packed__, __may_alias__)); + __u32x2 __v = ((const struct __wasm_u64x2_load32x2_struct *)__mem)->__v; + return (v128_t) __builtin_convertvector(__v, __u64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_v128_load32_zero(const void *__mem) { + struct __wasm_v128_load32_zero_struct { + int32_t __v; + } __attribute__((__packed__, __may_alias__)); + int32_t __v = ((const struct __wasm_v128_load32_zero_struct *)__mem)->__v; + return (v128_t)(__i32x4){__v, 0, 0, 0}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_v128_load64_zero(const void *__mem) { + struct __wasm_v128_load64_zero_struct { + int64_t __v; + } __attribute__((__packed__, __may_alias__)); + int64_t __v = ((const struct __wasm_v128_load64_zero_struct *)__mem)->__v; + return (v128_t)(__i64x2){__v, 0}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load8_lane( + const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_load8_lane_struct { + int8_t __v; + } __attribute__((__packed__, __may_alias__)); + int8_t __v = ((const struct __wasm_v128_load8_lane_struct *)__mem)->__v; + __i8x16 __ret = (__i8x16)__vec; + __ret[__i] = __v; + return (v128_t)__ret; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load16_lane( + const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_load16_lane_struct { + int16_t __v; + } __attribute__((__packed__, __may_alias__)); + int16_t __v = ((const struct __wasm_v128_load16_lane_struct *)__mem)->__v; + __i16x8 __ret = (__i16x8)__vec; + __ret[__i] = __v; + return (v128_t)__ret; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_lane( + const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_load32_lane_struct { + int32_t __v; + } __attribute__((__packed__, __may_alias__)); + int32_t __v = ((const struct __wasm_v128_load32_lane_struct *)__mem)->__v; + __i32x4 __ret = (__i32x4)__vec; + __ret[__i] = __v; + return (v128_t)__ret; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_lane( + const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_load64_lane_struct { + int64_t __v; + } __attribute__((__packed__, __may_alias__)); + int64_t __v = ((const struct __wasm_v128_load64_lane_struct *)__mem)->__v; + __i64x2 __ret = (__i64x2)__vec; + __ret[__i] = __v; + return (v128_t)__ret; +} + +static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem, + v128_t __a) { + // UB-free unaligned access copied from xmmintrin.h + struct __wasm_v128_store_struct { + __v128_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __wasm_v128_store_struct *)__mem)->__v = __a; +} + +static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store8_lane(void *__mem, + v128_t __vec, + int __i) + __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_store8_lane_struct { + int8_t __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __wasm_v128_store8_lane_struct *)__mem)->__v = ((__i8x16)__vec)[__i]; +} + +static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store16_lane(void *__mem, + v128_t __vec, + int __i) + __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_store16_lane_struct { + int16_t __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __wasm_v128_store16_lane_struct *)__mem)->__v = + ((__i16x8)__vec)[__i]; +} + +static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store32_lane(void *__mem, + v128_t __vec, + int __i) + __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_store32_lane_struct { + int32_t __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __wasm_v128_store32_lane_struct *)__mem)->__v = + ((__i32x4)__vec)[__i]; +} + +static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store64_lane(void *__mem, + v128_t __vec, + int __i) + __REQUIRE_CONSTANT(__i) { + struct __wasm_v128_store64_lane_struct { + int64_t __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __wasm_v128_store64_lane_struct *)__mem)->__v = + ((__i64x2)__vec)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4, + int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9, + int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13, + int8_t __c14, int8_t __c15) { + return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5, + __c6, __c7, __c8, __c9, __c10, __c11, + __c12, __c13, __c14, __c15}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3, + int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) { + return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0, + int32_t __c1, + int32_t __c2, + int32_t __c3) { + return (v128_t)(__i32x4){__c0, __c1, __c2, __c3}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0, + int64_t __c1) { + return (v128_t)(__i64x2){__c0, __c1}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0, + float __c1, + float __c2, + float __c3) { + return (v128_t)(__f32x4){__c0, __c1, __c2, __c3}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0, + double __c1) { + return (v128_t)(__f64x2){__c0, __c1}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i8x16_const(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, + int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7, + int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11, + int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15) + __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) + __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4) + __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6) + __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8) + __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10) + __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12) + __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14) + __REQUIRE_CONSTANT(__c15) { + return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5, + __c6, __c7, __c8, __c9, __c10, __c11, + __c12, __c13, __c14, __c15}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3, + int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) + __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) + __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4) + __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6) + __REQUIRE_CONSTANT(__c7) { + return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_const(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3) + __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) + __REQUIRE_CONSTANT(__c3) { + return (v128_t)(__i32x4){__c0, __c1, __c2, __c3}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const(int64_t __c0, + int64_t __c1) + __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) { + return (v128_t)(__i64x2){__c0, __c1}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f32x4_const(float __c0, float __c1, float __c2, float __c3) + __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) + __REQUIRE_CONSTANT(__c3) { + return (v128_t)(__f32x4){__c0, __c1, __c2, __c3}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const(double __c0, + double __c1) + __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) { + return (v128_t)(__f64x2){__c0, __c1}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const_splat(int8_t __c) + __REQUIRE_CONSTANT(__c) { + return (v128_t)(__i8x16){__c, __c, __c, __c, __c, __c, __c, __c, + __c, __c, __c, __c, __c, __c, __c, __c}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const_splat(int16_t __c) + __REQUIRE_CONSTANT(__c) { + return (v128_t)(__i16x8){__c, __c, __c, __c, __c, __c, __c, __c}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const_splat(int32_t __c) + __REQUIRE_CONSTANT(__c) { + return (v128_t)(__i32x4){__c, __c, __c, __c}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const_splat(int64_t __c) + __REQUIRE_CONSTANT(__c) { + return (v128_t)(__i64x2){__c, __c}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const_splat(float __c) + __REQUIRE_CONSTANT(__c) { + return (v128_t)(__f32x4){__c, __c, __c, __c}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const_splat(double __c) + __REQUIRE_CONSTANT(__c) { + return (v128_t)(__f64x2){__c, __c}; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) { + return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a, + __a, __a, __a, __a, __a, __a, __a, __a}; +} + +static __inline__ int8_t __DEFAULT_FN_ATTRS wasm_i8x16_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__i8x16)__a)[__i]; +} + +static __inline__ uint8_t __DEFAULT_FN_ATTRS wasm_u8x16_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__u8x16)__a)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_replace_lane(v128_t __a, + int __i, + int8_t __b) + __REQUIRE_CONSTANT(__i) { + __i8x16 __v = (__i8x16)__a; + __v[__i] = __b; + return (v128_t)__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) { + return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a}; +} + +static __inline__ int16_t __DEFAULT_FN_ATTRS wasm_i16x8_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__i16x8)__a)[__i]; +} + +static __inline__ uint16_t __DEFAULT_FN_ATTRS +wasm_u16x8_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) { + return ((__u16x8)__a)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_replace_lane(v128_t __a, + int __i, + int16_t __b) + __REQUIRE_CONSTANT(__i) { + __i16x8 __v = (__i16x8)__a; + __v[__i] = __b; + return (v128_t)__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) { + return (v128_t)(__i32x4){__a, __a, __a, __a}; +} + +static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__i32x4)__a)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a, + int __i, + int32_t __b) + __REQUIRE_CONSTANT(__i) { + __i32x4 __v = (__i32x4)__a; + __v[__i] = __b; + return (v128_t)__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) { + return (v128_t)(__i64x2){__a, __a}; +} + +static __inline__ int64_t __DEFAULT_FN_ATTRS wasm_i64x2_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__i64x2)__a)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a, + int __i, + int64_t __b) + __REQUIRE_CONSTANT(__i) { + __i64x2 __v = (__i64x2)__a; + __v[__i] = __b; + return (v128_t)__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) { + return (v128_t)(__f32x4){__a, __a, __a, __a}; +} + +static __inline__ float __DEFAULT_FN_ATTRS wasm_f32x4_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__f32x4)__a)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_replace_lane(v128_t __a, + int __i, + float __b) + __REQUIRE_CONSTANT(__i) { + __f32x4 __v = (__f32x4)__a; + __v[__i] = __b; + return (v128_t)__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) { + return (v128_t)(__f64x2){__a, __a}; +} + +static __inline__ double __DEFAULT_FN_ATTRS wasm_f64x2_extract_lane(v128_t __a, + int __i) + __REQUIRE_CONSTANT(__i) { + return ((__f64x2)__a)[__i]; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_replace_lane(v128_t __a, + int __i, + double __b) + __REQUIRE_CONSTANT(__i) { + __f64x2 __v = (__f64x2)__a; + __v[__i] = __b; + return (v128_t)__v; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a, + v128_t __b) { + return (v128_t)((__i8x16)__a == (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a, + v128_t __b) { + return (v128_t)((__i8x16)__a != (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__i8x16)__a < (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__u8x16)__a < (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__i8x16)__a > (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__u8x16)__a > (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a, + v128_t __b) { + return (v128_t)((__i8x16)__a <= (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a, + v128_t __b) { + return (v128_t)((__u8x16)__a <= (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__i8x16)__a >= (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__u8x16)__a >= (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a, + v128_t __b) { + return (v128_t)((__i16x8)__a == (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a != (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__i16x8)__a < (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a < (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__i16x8)__a > (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a > (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a, + v128_t __b) { + return (v128_t)((__i16x8)__a <= (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a <= (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__i16x8)__a >= (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a >= (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a, + v128_t __b) { + return (v128_t)((__i32x4)__a == (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a, + v128_t __b) { + return (v128_t)((__i32x4)__a != (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__i32x4)__a < (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a < (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__i32x4)__a > (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a > (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a, + v128_t __b) { + return (v128_t)((__i32x4)__a <= (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a <= (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__i32x4)__a >= (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a >= (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_eq(v128_t __a, + v128_t __b) { + return (v128_t)((__i64x2)__a == (__i64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ne(v128_t __a, + v128_t __b) { + return (v128_t)((__i64x2)__a != (__i64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__i64x2)__a < (__i64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__i64x2)__a > (__i64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_le(v128_t __a, + v128_t __b) { + return (v128_t)((__i64x2)__a <= (__i64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__i64x2)__a >= (__i64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a == (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a != (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a < (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a > (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a <= (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a >= (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a == (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a != (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a < (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a > (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a <= (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a >= (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) { + return ~__a; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a, + v128_t __b) { + return __a & __b; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a, + v128_t __b) { + return __a | __b; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a, + v128_t __b) { + return __a ^ __b; +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a, + v128_t __b) { + return __a & ~__b; +} + +static __inline__ bool __DEFAULT_FN_ATTRS wasm_v128_any_true(v128_t __a) { + return __builtin_wasm_any_true_v128((__i8x16)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a, + v128_t __b, + v128_t __mask) { + return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b, + (__i32x4)__mask); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) { + return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) { + return (v128_t)(-(__u8x16)__a); +} + +static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) { + return __builtin_wasm_all_true_i8x16((__i8x16)__a); +} + +static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) { + return __builtin_wasm_bitmask_i8x16((__i8x16)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) { + return (v128_t)__builtin_wasm_popcnt_i8x16((__i8x16)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a, + int32_t __b) { + return (v128_t)((__i8x16)__a << __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__i8x16)__a >> __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__u8x16)__a >> __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a, + v128_t __b) { + return (v128_t)((__u8x16)__a + (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_add_sat_s_i8x16((__i8x16)__a, (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_add_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_add_sat_u_i8x16((__u8x16)__a, (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a, + v128_t __b) { + return (v128_t)((__u8x16)__a - (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_sub_sat_s_i8x16((__i8x16)__a, (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_sub_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_sub_sat_u_i8x16((__u8x16)__a, (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_u_i8x16((__u8x16)__a, (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_u_i8x16((__u8x16)__a, (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)__a, (__u8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) { + return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) { + return (v128_t)(-(__u16x8)__a); +} + +static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) { + return __builtin_wasm_all_true_i16x8((__i16x8)__a); +} + +static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) { + return __builtin_wasm_bitmask_i16x8((__i16x8)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a, + int32_t __b) { + return (v128_t)((__i16x8)__a << __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__i16x8)__a >> __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__u16x8)__a >> __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a + (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_add_sat_s_i16x8((__i16x8)__a, (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_add_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_add_sat_u_i16x8((__u16x8)__a, (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a, + v128_t __b) { + return (v128_t)((__i16x8)__a - (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_sub_sat_s_i16x8((__i16x8)__a, (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_sub_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_sub_sat_u_i16x8((__u16x8)__a, (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a, + v128_t __b) { + return (v128_t)((__u16x8)__a * (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_u_i16x8((__u16x8)__a, (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_u_i16x8((__u16x8)__a, (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)__a, (__u16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) { + return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) { + return (v128_t)(-(__u32x4)__a); +} + +static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) { + return __builtin_wasm_all_true_i32x4((__i32x4)__a); +} + +static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) { + return __builtin_wasm_bitmask_i32x4((__i32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a, + int32_t __b) { + return (v128_t)((__i32x4)__a << __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__i32x4)__a >> __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__u32x4)__a >> __b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a + (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a - (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a, + v128_t __b) { + return (v128_t)((__u32x4)__a * (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_u_i32x4((__u32x4)__a, (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_dot_i16x8(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_dot_s_i32x4_i16x8((__i16x8)__a, (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_abs(v128_t __a) { + return (v128_t)__builtin_wasm_abs_i64x2((__i64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) { + return (v128_t)(-(__u64x2)__a); +} + +static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) { + return __builtin_wasm_all_true_i64x2((__i64x2)__a); +} + +static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) { + return __builtin_wasm_bitmask_i64x2((__i64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a, + int32_t __b) { + return (v128_t)((__i64x2)__a << (int64_t)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__i64x2)__a >> (int64_t)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a, + int32_t __b) { + return (v128_t)((__u64x2)__a >> (int64_t)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a, + v128_t __b) { + return (v128_t)((__u64x2)__a + (__u64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a, + v128_t __b) { + return (v128_t)((__u64x2)__a - (__u64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a, + v128_t __b) { + return (v128_t)((__u64x2)__a * (__u64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) { + return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) { + return (v128_t)(-(__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) { + return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ceil(v128_t __a) { + return (v128_t)__builtin_wasm_ceil_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_floor(v128_t __a) { + return (v128_t)__builtin_wasm_floor_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_trunc(v128_t __a) { + return (v128_t)__builtin_wasm_trunc_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_nearest(v128_t __a) { + return (v128_t)__builtin_wasm_nearest_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a + (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a - (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a * (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a, + v128_t __b) { + return (v128_t)((__f32x4)__a / (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a, + v128_t __b) { + __i32x4 __mask = (__i32x4)((__f32x4)__b < (__f32x4)__a); + return (v128_t)((((__i32x4)__b) & __mask) | (((__i32x4)__a) & ~__mask)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a, + v128_t __b) { + __i32x4 __mask = (__i32x4)((__f32x4)__a < (__f32x4)__b); + return (v128_t)((((__i32x4)__b) & __mask) | (((__i32x4)__a) & ~__mask)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) { + return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) { + return (v128_t)(-(__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) { + return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ceil(v128_t __a) { + return (v128_t)__builtin_wasm_ceil_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_floor(v128_t __a) { + return (v128_t)__builtin_wasm_floor_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_trunc(v128_t __a) { + return (v128_t)__builtin_wasm_trunc_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_nearest(v128_t __a) { + return (v128_t)__builtin_wasm_nearest_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a + (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a - (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a * (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a, + v128_t __b) { + return (v128_t)((__f64x2)__a / (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a, + v128_t __b) { + __i64x2 __mask = (__i64x2)((__f64x2)__b < (__f64x2)__a); + return (v128_t)((((__i64x2)__b) & __mask) | (((__i64x2)__a) & ~__mask)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a, + v128_t __b) { + __i64x2 __mask = (__i64x2)((__f64x2)__a < (__f64x2)__b); + return (v128_t)((((__i64x2)__b) & __mask) | (((__i64x2)__a) & ~__mask)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_trunc_sat_f32x4(v128_t __a) { + return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_trunc_sat_f32x4(v128_t __a) { + return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f32x4_convert_i32x4(v128_t __a) { + return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f32x4_convert_u32x4(v128_t __a) { + return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f64x2_convert_low_i32x4(v128_t __a) { + return (v128_t) __builtin_convertvector((__i32x2){__a[0], __a[1]}, __f64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f64x2_convert_low_u32x4(v128_t __a) { + return (v128_t) __builtin_convertvector((__u32x2){__a[0], __a[1]}, __f64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) { + return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) { + return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f32x4_demote_f64x2_zero(v128_t __a) { + return (v128_t) __builtin_convertvector( + __builtin_shufflevector((__f64x2)__a, (__f64x2){0, 0}, 0, 1, 2, 3), + __f32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_f64x2_promote_low_f32x4(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__f32x2){((__f32x4)__a)[0], ((__f32x4)__a)[1]}, __f64x2); +} + +#define wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \ + __c7, __c8, __c9, __c10, __c11, __c12, __c13, \ + __c14, __c15) \ + ((v128_t)__builtin_wasm_shuffle_i8x16( \ + (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \ + __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15)) + +#define wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \ + __c7) \ + ((v128_t)__builtin_wasm_shuffle_i8x16( \ + (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \ + (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \ + (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \ + (__c7)*2 + 1)) + +#define wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \ + ((v128_t)__builtin_wasm_shuffle_i8x16( \ + (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \ + (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \ + (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \ + (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3)) + +#define wasm_i64x2_shuffle(__a, __b, __c0, __c1) \ + ((v128_t)__builtin_wasm_shuffle_i8x16( \ + (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \ + (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \ + (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \ + (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7)) + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_swizzle(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_swizzle_i8x16((__i8x16)__a, (__i8x16)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) { + return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a, + (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) { + return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a, + (__i16x8)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) { + return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a, + (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) { + return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a, + (__i32x4)__b); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_extend_low_i8x16(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2], + ((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5], + ((__i8x16)__a)[6], ((__i8x16)__a)[7]}, + __i16x8); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_extend_high_i8x16(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10], + ((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13], + ((__i8x16)__a)[14], ((__i8x16)__a)[15]}, + __i16x8); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_extend_low_u8x16(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2], + ((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5], + ((__u8x16)__a)[6], ((__u8x16)__a)[7]}, + __u16x8); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_extend_high_u8x16(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10], + ((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13], + ((__u8x16)__a)[14], ((__u8x16)__a)[15]}, + __u16x8); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_extend_low_i16x8(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2], + ((__i16x8)__a)[3]}, + __i32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_extend_high_i16x8(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6], + ((__i16x8)__a)[7]}, + __i32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_extend_low_u16x8(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2], + ((__u16x8)__a)[3]}, + __u32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_extend_high_u16x8(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6], + ((__u16x8)__a)[7]}, + __u32x4); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i64x2_extend_low_i32x4(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__i32x2){((__i32x4)__a)[0], ((__i32x4)__a)[1]}, __i64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i64x2_extend_high_i32x4(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__i32x2){((__i32x4)__a)[2], ((__i32x4)__a)[3]}, __i64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u64x2_extend_low_u32x4(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__u32x2){((__u32x4)__a)[0], ((__u32x4)__a)[1]}, __u64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u64x2_extend_high_u32x4(v128_t __a) { + return (v128_t) __builtin_convertvector( + (__u32x2){((__u32x4)__a)[2], ((__u32x4)__a)[3]}, __u64x2); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_extadd_pairwise_i8x16(v128_t __a) { + return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_s_i16x8((__i8x16)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_extadd_pairwise_u8x16(v128_t __a) { + return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_u_i16x8((__u8x16)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_extadd_pairwise_i16x8(v128_t __a) { + return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_s_i32x4((__i16x8)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_extadd_pairwise_u16x8(v128_t __a) { + return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_u_i32x4((__u16x8)__a); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_extmul_low_i8x16(v128_t __a, v128_t __b) { + return (v128_t)((__i16x8)wasm_i16x8_extend_low_i8x16(__a) * + (__i16x8)wasm_i16x8_extend_low_i8x16(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i16x8_extmul_high_i8x16(v128_t __a, v128_t __b) { + return (v128_t)((__i16x8)wasm_i16x8_extend_high_i8x16(__a) * + (__i16x8)wasm_i16x8_extend_high_i8x16(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_extmul_low_u8x16(v128_t __a, v128_t __b) { + return (v128_t)((__u16x8)wasm_u16x8_extend_low_u8x16(__a) * + (__u16x8)wasm_u16x8_extend_low_u8x16(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u16x8_extmul_high_u8x16(v128_t __a, v128_t __b) { + return (v128_t)((__u16x8)wasm_u16x8_extend_high_u8x16(__a) * + (__u16x8)wasm_u16x8_extend_high_u8x16(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_extmul_low_i16x8(v128_t __a, v128_t __b) { + return (v128_t)((__i32x4)wasm_i32x4_extend_low_i16x8(__a) * + (__i32x4)wasm_i32x4_extend_low_i16x8(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i32x4_extmul_high_i16x8(v128_t __a, v128_t __b) { + return (v128_t)((__i32x4)wasm_i32x4_extend_high_i16x8(__a) * + (__i32x4)wasm_i32x4_extend_high_i16x8(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_extmul_low_u16x8(v128_t __a, v128_t __b) { + return (v128_t)((__u32x4)wasm_u32x4_extend_low_u16x8(__a) * + (__u32x4)wasm_u32x4_extend_low_u16x8(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u32x4_extmul_high_u16x8(v128_t __a, v128_t __b) { + return (v128_t)((__u32x4)wasm_u32x4_extend_high_u16x8(__a) * + (__u32x4)wasm_u32x4_extend_high_u16x8(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i64x2_extmul_low_i32x4(v128_t __a, v128_t __b) { + return (v128_t)((__i64x2)wasm_i64x2_extend_low_i32x4(__a) * + (__i64x2)wasm_i64x2_extend_low_i32x4(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_i64x2_extmul_high_i32x4(v128_t __a, v128_t __b) { + return (v128_t)((__i64x2)wasm_i64x2_extend_high_i32x4(__a) * + (__i64x2)wasm_i64x2_extend_high_i32x4(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u64x2_extmul_low_u32x4(v128_t __a, v128_t __b) { + return (v128_t)((__u64x2)wasm_u64x2_extend_low_u32x4(__a) * + (__u64x2)wasm_u64x2_extend_low_u32x4(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS +wasm_u64x2_extmul_high_u32x4(v128_t __a, v128_t __b) { + return (v128_t)((__u64x2)wasm_u64x2_extend_high_u32x4(__a) * + (__u64x2)wasm_u64x2_extend_high_u32x4(__b)); +} + +static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_q15mulr_sat(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_q15mulr_sat_s_i16x8((__i16x8)__a, (__i16x8)__b); +} + +// Old intrinsic names supported to ease transitioning to the standard names. Do +// not use these; they will be removed in the near future. + +#define __DEPRECATED_FN_ATTRS(__replacement) \ + __DEFAULT_FN_ATTRS __attribute__( \ + (deprecated("use " __replacement " instead", __replacement))) + +#define __WASM_STR(X) #X + +#ifdef __DEPRECATED +#define __DEPRECATED_WASM_MACRO(__name, __replacement) \ + _Pragma(__WASM_STR(GCC warning( \ + "'" __name "' is deprecated: use '" __replacement "' instead"))) +#else +#define __DEPRECATED_WASM_MACRO(__name, __replacement) +#endif + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load8_splat") +wasm_v8x16_load_splat(const void *__mem) { + return wasm_v128_load8_splat(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load16_splat") +wasm_v16x8_load_splat(const void *__mem) { + return wasm_v128_load16_splat(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load32_splat") +wasm_v32x4_load_splat(const void *__mem) { + return wasm_v128_load32_splat(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load64_splat") +wasm_v64x2_load_splat(const void *__mem) { + return wasm_v128_load64_splat(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_load8x8") +wasm_i16x8_load_8x8(const void *__mem) { + return wasm_i16x8_load8x8(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_load8x8") +wasm_u16x8_load_8x8(const void *__mem) { + return wasm_u16x8_load8x8(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_load16x4") +wasm_i32x4_load_16x4(const void *__mem) { + return wasm_i32x4_load16x4(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_load16x4") +wasm_u32x4_load_16x4(const void *__mem) { + return wasm_u32x4_load16x4(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i64x2_load32x2") +wasm_i64x2_load_32x2(const void *__mem) { + return wasm_i64x2_load32x2(__mem); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u64x2_load32x2") +wasm_u64x2_load_32x2(const void *__mem) { + return wasm_u64x2_load32x2(__mem); +} + +#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \ + __c7, __c8, __c9, __c10, __c11, __c12, __c13, \ + __c14, __c15) \ + __DEPRECATED_WASM_MACRO("wasm_v8x16_shuffle", "wasm_i8x16_shuffle") \ + wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \ + __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15) + +#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \ + __c7) \ + __DEPRECATED_WASM_MACRO("wasm_v16x8_shuffle", "wasm_i16x8_shuffle") \ + wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7) + +#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \ + __DEPRECATED_WASM_MACRO("wasm_v32x4_shuffle", "wasm_i32x4_shuffle") \ + wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) + +#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \ + __DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \ + wasm_i64x2_shuffle(__a, __b, __c0, __c1) + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle") +wasm_v8x16_swizzle(v128_t __a, v128_t __b) { + return wasm_i8x16_swizzle(__a, __b); +} + +static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true") +wasm_i8x16_any_true(v128_t __a) { + return wasm_v128_any_true(__a); +} + +static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true") +wasm_i16x8_any_true(v128_t __a) { + return wasm_v128_any_true(__a); +} + +static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true") +wasm_i32x4_any_true(v128_t __a) { + return wasm_v128_any_true(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_add_sat") +wasm_i8x16_add_saturate(v128_t __a, v128_t __b) { + return wasm_i8x16_add_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_add_sat") +wasm_u8x16_add_saturate(v128_t __a, v128_t __b) { + return wasm_u8x16_add_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_sub_sat") +wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) { + return wasm_i8x16_sub_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_sub_sat") +wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) { + return wasm_u8x16_sub_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_add_sat") +wasm_i16x8_add_saturate(v128_t __a, v128_t __b) { + return wasm_i16x8_add_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_add_sat") +wasm_u16x8_add_saturate(v128_t __a, v128_t __b) { + return wasm_u16x8_add_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_sub_sat") +wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) { + return wasm_i16x8_sub_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_sub_sat") +wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) { + return wasm_u16x8_sub_sat(__a, __b); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_low_i8x16") +wasm_i16x8_widen_low_i8x16(v128_t __a) { + return wasm_i16x8_extend_low_i8x16(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_high_i8x16") +wasm_i16x8_widen_high_i8x16(v128_t __a) { + return wasm_i16x8_extend_high_i8x16(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_low_u8x16") +wasm_i16x8_widen_low_u8x16(v128_t __a) { + return wasm_u16x8_extend_low_u8x16(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_high_u8x16") +wasm_i16x8_widen_high_u8x16(v128_t __a) { + return wasm_u16x8_extend_high_u8x16(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_low_i16x8") +wasm_i32x4_widen_low_i16x8(v128_t __a) { + return wasm_i32x4_extend_low_i16x8(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_high_i16x8") +wasm_i32x4_widen_high_i16x8(v128_t __a) { + return wasm_i32x4_extend_high_i16x8(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_low_u16x8") +wasm_i32x4_widen_low_u16x8(v128_t __a) { + return wasm_u32x4_extend_low_u16x8(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_high_u16x8") +wasm_i32x4_widen_high_u16x8(v128_t __a) { + return wasm_u32x4_extend_high_u16x8(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_trunc_sat_f32x4") +wasm_i32x4_trunc_saturate_f32x4(v128_t __a) { + return wasm_i32x4_trunc_sat_f32x4(__a); +} + +static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_trunc_sat_f32x4") +wasm_u32x4_trunc_saturate_f32x4(v128_t __a) { + return wasm_u32x4_trunc_sat_f32x4(__a); +} + +// Undefine helper macros +#undef __DEFAULT_FN_ATTRS +#undef __DEPRECATED_FN_ATTRS + +#endif // __WASM_SIMD128_H diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wbnoinvdintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wbnoinvdintrin.h new file mode 100644 index 0000000..cac0347 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wbnoinvdintrin.h @@ -0,0 +1,24 @@ +/*===-------------- wbnoinvdintrin.h - wbnoinvd intrinsic-------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __WBNOINVDINTRIN_H +#define __WBNOINVDINTRIN_H + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("wbnoinvd"))) +_wbnoinvd (void) +{ + __builtin_ia32_wbnoinvd (); +} + +#endif /* __WBNOINVDINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wmmintrin.h new file mode 100644 index 0000000..f932ca8 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/wmmintrin.h @@ -0,0 +1,19 @@ +/*===---- wmmintrin.h - AES intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WMMINTRIN_H +#define __WMMINTRIN_H + +#include + +#include <__wmmintrin_aes.h> + +#include <__wmmintrin_pclmul.h> + +#endif /* __WMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/x86gprintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/x86gprintrin.h new file mode 100644 index 0000000..1fc6cab --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/x86gprintrin.h @@ -0,0 +1,23 @@ +/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86GPRINTRIN_H +#define __X86GPRINTRIN_H + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__HRESET__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__UINTR__) +#include +#endif + +#endif /* __X86GPRINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/x86intrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/x86intrin.h new file mode 100644 index 0000000..768d0e5 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/x86intrin.h @@ -0,0 +1,63 @@ +/*===---- x86intrin.h - X86 intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#define __X86INTRIN_H + +#include + +#include + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__3dNOW__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__PRFCHW__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SSE4A__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__FMA4__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__XOP__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__TBM__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__LWP__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__MWAITX__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__CLZERO__) +#include +#endif + + +#endif /* __X86INTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xmmintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xmmintrin.h new file mode 100644 index 0000000..f468669 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xmmintrin.h @@ -0,0 +1,3008 @@ +/*===---- xmmintrin.h - SSE intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __XMMINTRIN_H +#define __XMMINTRIN_H + +#include + +typedef int __v4si __attribute__((__vector_size__(16))); +typedef float __v4sf __attribute__((__vector_size__(16))); +typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16))); + +typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1))); + +/* Unsigned types */ +typedef unsigned int __v4su __attribute__((__vector_size__(16))); + +/* This header should only be included in a hosted environment as it depends on + * a standard library to provide allocation routines. */ +#if __STDC_HOSTED__ +#include +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse"), __min_vector_width__(64))) + +/// Adds the 32-bit float values in the low-order bits of the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSS / ADDSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum +/// of the lower 32 bits of both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_add_ss(__m128 __a, __m128 __b) +{ + __a[0] += __b[0]; + return __a; +} + +/// Adds two 128-bit vectors of [4 x float], and returns the results of +/// the addition. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS / ADDPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the sums of both +/// operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_add_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a + (__v4sf)__b); +} + +/// Subtracts the 32-bit float value in the low-order bits of the second +/// operand from the corresponding value in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBSS / SUBSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits +/// of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing the subtrahend. The lower 32 +/// bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// difference of the lower 32 bits of both operands. The upper 96 bits are +/// copied from the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sub_ss(__m128 __a, __m128 __b) +{ + __a[0] -= __b[0]; + return __a; +} + +/// Subtracts each of the values of the second operand from the first +/// operand, both of which are 128-bit vectors of [4 x float] and returns +/// the results of the subtraction. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS / SUBPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the minuend. +/// \param __b +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the differences between +/// both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sub_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a - (__v4sf)__b); +} + +/// Multiplies two 32-bit float values in the low-order bits of the +/// operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULSS / MULSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the product of the lower +/// 32 bits of both operands. The upper 96 bits are copied from the upper 96 +/// bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mul_ss(__m128 __a, __m128 __b) +{ + __a[0] *= __b[0]; + return __a; +} + +/// Multiplies two 128-bit vectors of [4 x float] and returns the +/// results of the multiplication. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPS / MULPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the products of both +/// operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mul_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a * (__v4sf)__b); +} + +/// Divides the value in the low-order 32 bits of the first operand by +/// the corresponding value in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVSS / DIVSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the dividend. The lower 32 +/// bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits +/// of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the quotients of the +/// lower 32 bits of both operands. The upper 96 bits are copied from the +/// upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_div_ss(__m128 __a, __m128 __b) +{ + __a[0] /= __b[0]; + return __a; +} + +/// Divides two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPS / DIVPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the dividend. +/// \param __b +/// A 128-bit vector of [4 x float] containing the divisor. +/// \returns A 128-bit vector of [4 x float] containing the quotients of both +/// operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_div_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a / (__v4sf)__b); +} + +/// Calculates the square root of the value stored in the low-order bits +/// of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTSS / SQRTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the square root of the +/// value in the low-order bits of the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sqrt_ss(__m128 __a) +{ + return (__m128)__builtin_ia32_sqrtss((__v4sf)__a); +} + +/// Calculates the square roots of the values stored in a 128-bit vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPS / SQRTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the square roots of the +/// values in the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sqrt_ps(__m128 __a) +{ + return __builtin_ia32_sqrtps((__v4sf)__a); +} + +/// Calculates the approximate reciprocal of the value stored in the +/// low-order bits of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPSS / RCPSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocal of the value in the low-order bits of the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rcp_ss(__m128 __a) +{ + return (__m128)__builtin_ia32_rcpss((__v4sf)__a); +} + +/// Calculates the approximate reciprocals of the values stored in a +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPPS / RCPPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocals of the values in the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rcp_ps(__m128 __a) +{ + return (__m128)__builtin_ia32_rcpps((__v4sf)__a); +} + +/// Calculates the approximate reciprocal of the square root of the value +/// stored in the low-order bits of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTSS / RSQRTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocal of the square root of the value in the low-order bits of the +/// operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rsqrt_ss(__m128 __a) +{ + return __builtin_ia32_rsqrtss((__v4sf)__a); +} + +/// Calculates the approximate reciprocals of the square roots of the +/// values stored in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTPS / RSQRTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocals of the square roots of the values in the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rsqrt_ps(__m128 __a) +{ + return __builtin_ia32_rsqrtps((__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands and returns the lesser value in the low-order bits of the +/// vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINSS / MINSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// minimum value between both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_min_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 128-bit vectors of [4 x float] and returns the lesser +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPS / MINPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \returns A 128-bit vector of [4 x float] containing the minimum values +/// between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_min_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands and returns the greater value in the low-order bits of a 128-bit +/// vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXSS / MAXSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// maximum value between both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_max_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 128-bit vectors of [4 x float] and returns the greater +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPS / MAXPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \returns A 128-bit vector of [4 x float] containing the maximum values +/// between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_max_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPS / ANDPS instructions. +/// +/// \param __a +/// A 128-bit vector containing one of the source operands. +/// \param __b +/// A 128-bit vector containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the +/// values between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_and_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4su)__a & (__v4su)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [4 x float], using +/// the one's complement of the values contained in the first source +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPS / ANDNPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the first source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector of [4 x float] containing the second source operand. +/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the +/// one's complement of the first operand and the values in the second +/// operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_andnot_ps(__m128 __a, __m128 __b) +{ + return (__m128)(~(__v4su)__a & (__v4su)__b); +} + +/// Performs a bitwise OR of two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPS / ORPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the +/// values between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_or_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4su)__a | (__v4su)__b); +} + +/// Performs a bitwise exclusive OR of two 128-bit vectors of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR +/// of the values between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_xor_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4su)__a ^ (__v4su)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands for equality and returns the result of the comparison in the +/// low-order bits of a vector [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQSS / CMPEQSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpeq_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] for equality. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQPS / CMPEQPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpeq_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is less than the +/// corresponding value in the second operand and returns the result of the +/// comparison in the low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmplt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are less than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmplt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is less than or +/// equal to the corresponding value in the second operand and returns the +/// result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmple_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are less than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmple_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is greater than +/// the corresponding value in the second operand and returns the result of +/// the comparison in the low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpgt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are greater than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpgt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is greater than +/// or equal to the corresponding value in the second operand and returns +/// the result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpge_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are greater than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpge_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands for inequality and returns the result of the comparison in the +/// low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQSS / CMPNEQSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpneq_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] for inequality. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQPS / CMPNEQPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpneq_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not less than +/// the corresponding value in the second operand and returns the result of +/// the comparison in the low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnlt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not less than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnlt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not less than +/// or equal to the corresponding value in the second operand and returns +/// the result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnle_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not less than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnle_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not greater +/// than the corresponding value in the second operand and returns the +/// result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpngt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not greater than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpngt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not greater +/// than or equal to the corresponding value in the second operand and +/// returns the result of the comparison in the low-order bits of a vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnge_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not greater than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnge_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is ordered with +/// respect to the corresponding value in the second operand and returns the +/// result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDSS / CMPORDSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpord_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are ordered with respect to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDPS / CMPORDPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpord_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is unordered +/// with respect to the corresponding value in the second operand and +/// returns the result of the comparison in the low-order bits of a vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDSS / CMPUNORDSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpunord_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are unordered with respect to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDPS / CMPUNORDPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpunord_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands for equality and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the +/// two lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comieq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is less than the second +/// operand and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comilt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is less than or equal to the +/// second operand and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comile_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is greater than the second +/// operand and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the +/// two lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comigt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is greater than or equal to +/// the second operand and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comige_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is not equal to the second +/// operand and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the +/// two lower 32-bit values is NaN, 1 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comineq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine equality and returns +/// the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomieq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// less than the second operand and returns the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomilt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// less than or equal to the second operand and returns the result of the +/// comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomile_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// greater than the second operand and returns the result of the +/// comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomigt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// greater than or equal to the second operand and returns the result of +/// the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 0 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomige_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine inequality and returns +/// the result of the comparison. +/// +/// If either of the two lower 32-bit values is NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. If either of the two +/// lower 32-bit values is NaN, 1 is returned. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomineq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b); +} + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtss_si32(__m128 __a) +{ + return __builtin_ia32_cvtss2si((__v4sf)__a); +} + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvt_ss2si(__m128 __a) +{ + return _mm_cvtss_si32(__a); +} + +#ifdef __x86_64__ + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 64-bit integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 64-bit integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvtss_si64(__m128 __a) +{ + return __builtin_ia32_cvtss2si64((__v4sf)__a); +} + +#endif + +/// Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtps_pi32(__m128 __a) +{ + return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a); +} + +/// Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvt_ps2pi(__m128 __a) +{ + return _mm_cvtps_pi32(__a); +} + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer, truncating the result when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvttss_si32(__m128 __a) +{ + return __builtin_ia32_cvttss2si((__v4sf)__a); +} + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer, truncating the result when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtt_ss2si(__m128 __a) +{ + return _mm_cvttss_si32(__a); +} + +#ifdef __x86_64__ +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 64-bit integer, truncating the result when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 64-bit integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvttss_si64(__m128 __a) +{ + return __builtin_ia32_cvttss2si64((__v4sf)__a); +} +#endif + +/// Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result +/// when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPS2PI / VTTPS2PI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvttps_pi32(__m128 __a) +{ + return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a); +} + +/// Converts two low-order float values in a 128-bit vector of [4 x +/// float] into a 64-bit vector of [2 x i32], truncating the result when it +/// is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtt_ps2pi(__m128 __a) +{ + return _mm_cvttps_pi32(__a); +} + +/// Converts a 32-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination vector are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 32-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtsi32_ss(__m128 __a, int __b) +{ + __a[0] = __b; + return __a; +} + +/// Converts a 32-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 32-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvt_si2ss(__m128 __a, int __b) +{ + return _mm_cvtsi32_ss(__a, __b); +} + +#ifdef __x86_64__ + +/// Converts a 64-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtsi64_ss(__m128 __a, long long __b) +{ + __a[0] = __b; + return __a; +} + +#endif + +/// Converts two elements of a 64-bit vector of [2 x i32] into two +/// floating point values and writes them to the lower 64-bits of the +/// destination. The remaining higher order elements of the destination are +/// copied from the corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit vector of [2 x i32]. The elements in this vector are converted +/// and written to the corresponding low-order elements in the destination. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted value of the second operand. The upper 64 bits are copied from +/// the upper 64 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi32_ps(__m128 __a, __m64 __b) +{ + return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b); +} + +/// Converts two elements of a 64-bit vector of [2 x i32] into two +/// floating point values and writes them to the lower 64-bits of the +/// destination. The remaining higher order elements of the destination are +/// copied from the corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit vector of [2 x i32]. The elements in this vector are converted +/// and written to the corresponding low-order elements in the destination. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted value from the second operand. The upper 64 bits are copied +/// from the upper 64 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvt_pi2ps(__m128 __a, __m64 __b) +{ + return _mm_cvtpi32_ps(__a, __b); +} + +/// Extracts a float value contained in the lower 32 bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the extraction. +/// \returns A 32-bit float containing the extracted value. +static __inline__ float __DEFAULT_FN_ATTRS +_mm_cvtss_f32(__m128 __a) +{ + return __a[0]; +} + +/// Loads two packed float values from the address \a __p into the +/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits +/// are copied from the low-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0] +/// of the destination. +/// \param __p +/// A pointer to two packed float values. Bits [63:0] are written to bits +/// [127:64] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the moved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadh_pi(__m128 __a, const __m64 *__p) +{ + typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_loadh_pi_struct { + __mm_loadh_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + __mm_loadh_pi_v2f32 __b = ((const struct __mm_loadh_pi_struct*)__p)->__u; + __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1); + return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5); +} + +/// Loads two packed float values from the address \a __p into the +/// low-order bits of a 128-bit vector of [4 x float]. The high-order bits +/// are copied from the high-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. Bits [127:64] are written to bits +/// [127:64] of the destination. +/// \param __p +/// A pointer to two packed float values. Bits [63:0] are written to bits +/// [63:0] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the moved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadl_pi(__m128 __a, const __m64 *__p) +{ + typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_loadl_pi_struct { + __mm_loadl_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + __mm_loadl_pi_v2f32 __b = ((const struct __mm_loadl_pi_struct*)__p)->__u; + __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1); + return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits of the vector are initialized with the single-precision +/// floating-point value loaded from a specified memory location. The upper +/// 96 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location containing a single-precision +/// floating-point value. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. The +/// lower 32 bits contain the value loaded from the memory location. The +/// upper 96 bits are set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_load_ss(const float *__p) +{ + struct __mm_load_ss_struct { + float __u; + } __attribute__((__packed__, __may_alias__)); + float __u = ((const struct __mm_load_ss_struct*)__p)->__u; + return __extension__ (__m128){ __u, 0, 0, 0 }; +} + +/// Loads a 32-bit float value and duplicates it to all four vector +/// elements of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS / MOVSS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a float value to be loaded and duplicated. +/// \returns A 128-bit vector of [4 x float] containing the loaded and +/// duplicated values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_load1_ps(const float *__p) +{ + struct __mm_load1_ps_struct { + float __u; + } __attribute__((__packed__, __may_alias__)); + float __u = ((const struct __mm_load1_ps_struct*)__p)->__u; + return __extension__ (__m128){ __u, __u, __u, __u }; +} + +#define _mm_load_ps1(p) _mm_load1_ps(p) + +/// Loads a 128-bit floating-point vector of [4 x float] from an aligned +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_load_ps(const float *__p) +{ + return *(const __m128*)__p; +} + +/// Loads a 128-bit floating-point vector of [4 x float] from an +/// unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadu_ps(const float *__p) +{ + struct __loadu_ps { + __m128_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +/// Loads four packed float values, in reverse order, from an aligned +/// memory location to 32-bit elements in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded +/// in reverse order. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadr_ps(const float *__p) +{ + __m128 __a = _mm_load_ps(__p); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0); +} + +/// Create a 128-bit vector of [4 x float] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit vector of [4 x float] containing undefined values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_undefined_ps(void) +{ + return (__m128)__builtin_ia32_undef128(); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits of the vector are initialized with the specified single-precision +/// floating-point value. The upper 96 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize the lower 32 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. The +/// lower 32 bits contain the value provided in the source operand. The +/// upper 96 bits are set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set_ss(float __w) +{ + return __extension__ (__m128){ __w, 0, 0, 0 }; +} + +/// Constructs a 128-bit floating-point vector of [4 x float], with each +/// of the four single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set1_ps(float __w) +{ + return __extension__ (__m128){ __w, __w, __w, __w }; +} + +/* Microsoft specific. */ +/// Constructs a 128-bit floating-point vector of [4 x float], with each +/// of the four single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set_ps1(float __w) +{ + return _mm_set1_ps(__w); +} + +/// Constructs a 128-bit floating-point vector of [4 x float] +/// initialized with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __z +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __y +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __x +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __w +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set_ps(float __z, float __y, float __x, float __w) +{ + return __extension__ (__m128){ __w, __x, __y, __z }; +} + +/// Constructs a 128-bit floating-point vector of [4 x float], +/// initialized in reverse order with the specified 32-bit single-precision +/// float-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __z +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \param __y +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __x +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __w +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_setr_ps(float __z, float __y, float __x, float __w) +{ + return __extension__ (__m128){ __z, __y, __x, __w }; +} + +/// Constructs a 128-bit floating-point vector of [4 x float] initialized +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit floating-point vector of [4 x float] with +/// all elements set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_setzero_ps(void) +{ + return __extension__ (__m128){ 0, 0, 0, 0 }; +} + +/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPEXTRQ / PEXTRQ instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeh_pi(__m64 *__p, __m128 __a) +{ + typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_storeh_pi_struct { + __mm_storeh_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 2, 3); +} + +/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storel_pi(__m64 *__p, __m128 __a) +{ + typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_storeh_pi_struct { + __mm_storeh_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 0, 1); +} + +/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ss(float *__p, __m128 __a) +{ + struct __mm_store_ss_struct { + float __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_store_ss_struct*)__p)->__u = __a[0]; +} + +/// Stores a 128-bit vector of [4 x float] to an unaligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_ps(float *__p, __m128 __a) +{ + struct __storeu_ps { + __m128_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; +} + +/// Stores a 128-bit vector of [4 x float] into an aligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ps(float *__p, __m128 __a) +{ + *(__m128*)__p = __a; +} + +/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into +/// four contiguous elements in an aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each +/// of the four contiguous elements pointed by \a __p. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store1_ps(float *__p, __m128 __a) +{ + __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0); + _mm_store_ps(__p, __a); +} + +/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into +/// four contiguous elements in an aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each +/// of the four contiguous elements pointed by \a __p. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ps1(float *__p, __m128 __a) +{ + _mm_store1_ps(__p, __a); +} + +/// Stores float values from a 128-bit vector of [4 x float] to an +/// aligned memory location in reverse order. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storer_ps(float *__p, __m128 __a) +{ + __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0); + _mm_store_ps(__p, __a); +} + +#define _MM_HINT_ET0 7 +#define _MM_HINT_ET1 6 +#define _MM_HINT_T0 3 +#define _MM_HINT_T1 2 +#define _MM_HINT_T2 1 +#define _MM_HINT_NTA 0 + +#ifndef _MSC_VER +/* FIXME: We have to #define this because "sel" must be a constant integer, and + Sema doesn't do any form of constant propagation yet. */ + +/// Loads one cache line of data from the specified address to a location +/// closer to the processor. +/// +/// \headerfile +/// +/// \code +/// void _mm_prefetch(const void * a, const int sel); +/// \endcode +/// +/// This intrinsic corresponds to the PREFETCHNTA instruction. +/// +/// \param a +/// A pointer to a memory location containing a cache line of data. +/// \param sel +/// A predefined integer constant specifying the type of prefetch +/// operation: \n +/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The +/// PREFETCHNTA instruction will be generated. \n +/// _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will +/// be generated. \n +/// _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will +/// be generated. \n +/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will +/// be generated. +#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \ + ((sel) >> 2) & 1, (sel) & 0x3)) +#endif + +/// Stores a 64-bit integer in the specified aligned memory location. To +/// minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTQ instruction. +/// +/// \param __p +/// A pointer to an aligned memory location used to store the register value. +/// \param __a +/// A 64-bit integer containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS_MMX +_mm_stream_pi(__m64 *__p, __m64 __a) +{ + __builtin_ia32_movntq(__p, __a); +} + +/// Moves packed float values from a 128-bit vector of [4 x float] to a +/// 128-bit aligned memory location. To minimize caching, the data is flagged +/// as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit aligned memory location that will receive the +/// single-precision floating-point values. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be moved. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_ps(float *__p, __m128 __a) +{ + __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p); +} + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Forces strong memory ordering (serialization) between store +/// instructions preceding this instruction and store instructions following +/// this instruction, ensuring the system completes all previous stores +/// before executing subsequent stores. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SFENCE instruction. +/// +void _mm_sfence(void); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// Extracts 16-bit element from a 64-bit vector of [4 x i16] and +/// returns it, as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_pi16(__m64 a, int n); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. +/// +/// \param a +/// A 64-bit vector of [4 x i16]. +/// \param n +/// An immediate integer operand that determines which bits are extracted: \n +/// 0: Bits [15:0] are copied to the destination. \n +/// 1: Bits [31:16] are copied to the destination. \n +/// 2: Bits [47:32] are copied to the destination. \n +/// 3: Bits [63:48] are copied to the destination. +/// \returns A 16-bit integer containing the extracted 16 bits of packed data. +#define _mm_extract_pi16(a, n) \ + (int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n) + +/// Copies data from the 64-bit vector of [4 x i16] to the destination, +/// and inserts the lower 16-bits of an integer operand at the 16-bit offset +/// specified by the immediate operand \a n. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_insert_pi16(__m64 a, int d, int n); +/// \endcode +/// +/// This intrinsic corresponds to the PINSRW instruction. +/// +/// \param a +/// A 64-bit vector of [4 x i16]. +/// \param d +/// An integer. The lower 16-bit value from this operand is written to the +/// destination at the offset specified by operand \a n. +/// \param n +/// An immediate integer operant that determines which the bits to be used +/// in the destination. \n +/// 0: Bits [15:0] are copied to the destination. \n +/// 1: Bits [31:16] are copied to the destination. \n +/// 2: Bits [47:32] are copied to the destination. \n +/// 3: Bits [63:48] are copied to the destination. \n +/// The remaining bits in the destination are copied from the corresponding +/// bits in operand \a a. +/// \returns A 64-bit integer vector containing the copied packed data from the +/// operands. +#define _mm_insert_pi16(a, d, n) \ + (__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n) + +/// Compares each of the corresponding packed 16-bit integer values of +/// the 64-bit integer vectors, and writes the greater value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMAXSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_max_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b); +} + +/// Compares each of the corresponding packed 8-bit unsigned integer +/// values of the 64-bit integer vectors, and writes the greater value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMAXUB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_max_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b); +} + +/// Compares each of the corresponding packed 16-bit integer values of +/// the 64-bit integer vectors, and writes the lesser value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMINSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_min_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b); +} + +/// Compares each of the corresponding packed 8-bit unsigned integer +/// values of the 64-bit integer vectors, and writes the lesser value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMINUB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_min_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b); +} + +/// Takes the most significant bit from each 8-bit element in a 64-bit +/// integer vector to create an 8-bit mask value. Zero-extends the value to +/// 32-bit integer and writes it to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMOVMSKB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values with bits to be extracted. +/// \returns The most significant bit from each 8-bit element in \a __a, +/// written to bits [7:0]. +static __inline__ int __DEFAULT_FN_ATTRS_MMX +_mm_movemask_pi8(__m64 __a) +{ + return __builtin_ia32_pmovmskb((__v8qi)__a); +} + +/// Multiplies packed 16-bit unsigned integer values and writes the +/// high-order 16 bits of each 32-bit product to the corresponding bits in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULHUW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the products of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_mulhi_pu16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b); +} + +/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the +/// destination, as specified by the immediate value operand. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_shuffle_pi16(__m64 a, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the PSHUFW instruction. +/// +/// \param a +/// A 64-bit integer vector containing the values to be shuffled. +/// \param n +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a. The destinations within the 64-bit destination are +/// assigned values as follows: \n +/// Bits [1:0] are used to assign values to bits [15:0] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [31:16] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [47:32] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [63:48] in the +/// destination. \n +/// Bit value assignments: \n +/// 00: assigned from bits [15:0] of \a a. \n +/// 01: assigned from bits [31:16] of \a a. \n +/// 10: assigned from bits [47:32] of \a a. \n +/// 11: assigned from bits [63:48] of \a a. +/// \returns A 64-bit integer vector containing the shuffled values. +#define _mm_shuffle_pi16(a, n) \ + (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)) + +/// Conditionally copies the values from each 8-bit element in the first +/// 64-bit integer vector operand to the specified memory location, as +/// specified by the most significant bit in the corresponding element in the +/// second 64-bit integer vector operand. +/// +/// To minimize caching, the data is flagged as non-temporal +/// (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MASKMOVQ instruction. +/// +/// \param __d +/// A 64-bit integer vector containing the values with elements to be copied. +/// \param __n +/// A 64-bit integer vector operand. The most significant bit from each 8-bit +/// element determines whether the corresponding element in operand \a __d +/// is copied. If the most significant bit of a given element is 1, the +/// corresponding element in operand \a __d is copied. +/// \param __p +/// A pointer to a 64-bit memory location that will receive the conditionally +/// copied integer values. The address of the memory location does not have +/// to be aligned. +static __inline__ void __DEFAULT_FN_ATTRS_MMX +_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p) +{ + __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p); +} + +/// Computes the rounded averages of the packed unsigned 8-bit integer +/// values and writes the averages to the corresponding bits in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAVGB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the averages of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_avg_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b); +} + +/// Computes the rounded averages of the packed unsigned 16-bit integer +/// values and writes the averages to the corresponding bits in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAVGW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the averages of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_avg_pu16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b); +} + +/// Subtracts the corresponding 8-bit unsigned integer values of the two +/// 64-bit vector operands and computes the absolute value for each of the +/// difference. Then sum of the 8 absolute differences is written to the +/// bits [15:0] of the destination; the remaining bits [63:16] are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSADBW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the +/// sets of absolute differences between both operands. The upper bits are +/// cleared. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sad_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b); +} + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Returns the contents of the MXCSR register as a 32-bit unsigned +/// integer value. +/// +/// There are several groups of macros associated with this +/// intrinsic, including: +///
    +///
  • +/// For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, +/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, +/// _MM_EXCEPT_INEXACT. There is a convenience wrapper +/// _MM_GET_EXCEPTION_STATE(). +///
  • +///
  • +/// For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW, +/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT. +/// There is a convenience wrapper _MM_GET_EXCEPTION_MASK(). +///
  • +///
  • +/// For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, +/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper +/// _MM_GET_ROUNDING_MODE(). +///
  • +///
  • +/// For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF. +/// There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE(). +///
  • +///
  • +/// For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON, +/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper +/// _MM_GET_DENORMALS_ZERO_MODE(). +///
  • +///
+/// +/// For example, the following expression checks if an overflow exception has +/// occurred: +/// \code +/// ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW ) +/// \endcode +/// +/// The following expression gets the current rounding mode: +/// \code +/// _MM_GET_ROUNDING_MODE() +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSTMXCSR / STMXCSR instruction. +/// +/// \returns A 32-bit unsigned integer containing the contents of the MXCSR +/// register. +unsigned int _mm_getcsr(void); + +/// Sets the MXCSR register with the 32-bit unsigned integer value. +/// +/// There are several groups of macros associated with this intrinsic, +/// including: +///
    +///
  • +/// For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, +/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, +/// _MM_EXCEPT_INEXACT. There is a convenience wrapper +/// _MM_SET_EXCEPTION_STATE(x) where x is one of these macros. +///
  • +///
  • +/// For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW, +/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT. +/// There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one +/// of these macros. +///
  • +///
  • +/// For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, +/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper +/// _MM_SET_ROUNDING_MODE(x) where x is one of these macros. +///
  • +///
  • +/// For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF. +/// There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is +/// one of these macros. +///
  • +///
  • +/// For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON, +/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper +/// _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros. +///
  • +///
+/// +/// For example, the following expression causes subsequent floating-point +/// operations to round up: +/// _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP) +/// +/// The following example sets the DAZ and FTZ flags: +/// \code +/// void setFlags() { +/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); +/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); +/// } +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDMXCSR / LDMXCSR instruction. +/// +/// \param __i +/// A 32-bit unsigned integer value to be written to the MXCSR register. +void _mm_setcsr(unsigned int __i); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// Selects 4 float values from the 128-bit operands of [4 x float], as +/// specified by the immediate value operand. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPS / SHUFPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param mask +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a and \a b. \n +/// Bits [3:0] specify the values copied from operand \a a. \n +/// Bits [7:4] specify the values copied from operand \a b. \n +/// The destinations within the 128-bit destination are assigned values as +/// follows: \n +/// Bits [1:0] are used to assign values to bits [31:0] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [63:32] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [95:64] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [127:96] in the +/// destination. \n +/// Bit value assignments: \n +/// 00: Bits [31:0] copied from the specified operand. \n +/// 01: Bits [63:32] copied from the specified operand. \n +/// 10: Bits [95:64] copied from the specified operand. \n +/// 11: Bits [127:96] copied from the specified operand. +/// \returns A 128-bit vector of [4 x float] containing the shuffled values. +#define _mm_shuffle_ps(a, b, mask) \ + (__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \ + (int)(mask)) + +/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of +/// [4 x float] and interleaves them into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPS / UNPCKHPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [95:64] are written to bits [31:0] of the destination. \n +/// Bits [127:96] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// Bits [95:64] are written to bits [63:32] of the destination. \n +/// Bits [127:96] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the interleaved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_unpackhi_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7); +} + +/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of +/// [4 x float] and interleaves them into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPS / UNPCKLPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [31:0] are written to bits [31:0] of the destination. \n +/// Bits [63:32] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x float]. \n +/// Bits [31:0] are written to bits [63:32] of the destination. \n +/// Bits [63:32] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the interleaved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_unpacklo_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits are set to the lower 32 bits of the second parameter. The upper +/// 96 bits are set to the upper 96 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDPS / BLENDPS / MOVSS +/// instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are +/// written to the upper 96 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The lower 32 bits are +/// written to the lower 32 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_move_ss(__m128 __a, __m128 __b) +{ + __a[0] = __b[0]; + return __a; +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 64 bits are set to the upper 64 bits of the second parameter. The upper +/// 64 bits are set to the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are +/// written to the upper 64 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are +/// written to the lower 64 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_movehl_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 64 bits are set to the lower 64 bits of the first parameter. The upper +/// 64 bits are set to the lower 64 bits of the second parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are +/// written to the upper 64 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_movelh_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5); +} + +/// Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x +/// float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16]. The elements of the destination are copied +/// from the corresponding elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi16_ps(__m64 __a) +{ + __m64 __b, __c; + __m128 __r; + + __b = _mm_setzero_si64(); + __b = _mm_cmpgt_pi16(__b, __a); + __c = _mm_unpackhi_pi16(__a, __b); + __r = _mm_setzero_ps(); + __r = _mm_cvtpi32_ps(__r, __c); + __r = _mm_movelh_ps(__r, __r); + __c = _mm_unpacklo_pi16(__a, __b); + __r = _mm_cvtpi32_ps(__r, __c); + + return __r; +} + +/// Converts a 64-bit vector of 16-bit unsigned integer values into a +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of 16-bit unsigned integer values. The elements of the +/// destination are copied from the corresponding elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpu16_ps(__m64 __a) +{ + __m64 __b, __c; + __m128 __r; + + __b = _mm_setzero_si64(); + __c = _mm_unpackhi_pi16(__a, __b); + __r = _mm_setzero_ps(); + __r = _mm_cvtpi32_ps(__r, __c); + __r = _mm_movelh_ps(__r, __r); + __c = _mm_unpacklo_pi16(__a, __b); + __r = _mm_cvtpi32_ps(__r, __c); + + return __r; +} + +/// Converts the lower four 8-bit values from a 64-bit vector of [8 x i8] +/// into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of [8 x i8]. The elements of the destination are copied +/// from the corresponding lower 4 elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi8_ps(__m64 __a) +{ + __m64 __b; + + __b = _mm_setzero_si64(); + __b = _mm_cmpgt_pi8(__b, __a); + __b = _mm_unpacklo_pi8(__a, __b); + + return _mm_cvtpi16_ps(__b); +} + +/// Converts the lower four unsigned 8-bit integer values from a 64-bit +/// vector of [8 x u8] into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of unsigned 8-bit integer values. The elements of the +/// destination are copied from the corresponding lower 4 elements in this +/// operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpu8_ps(__m64 __a) +{ + __m64 __b; + + __b = _mm_setzero_si64(); + __b = _mm_unpacklo_pi8(__a, __b); + + return _mm_cvtpi16_ps(__b); +} + +/// Converts the two 32-bit signed integer values from each 64-bit vector +/// operand of [2 x i32] into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. The lower elements of the destination are +/// copied from the elements in this operand. +/// \param __b +/// A 64-bit vector of [2 x i32]. The upper elements of the destination are +/// copied from the elements in this operand. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// copied and converted values from the first operand. The upper 64 bits +/// contain the copied and converted values from the second operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi32x2_ps(__m64 __a, __m64 __b) +{ + __m128 __c; + + __c = _mm_setzero_ps(); + __c = _mm_cvtpi32_ps(__c, __b); + __c = _mm_movelh_ps(__c, __c); + + return _mm_cvtpi32_ps(__c, __a); +} + +/// Converts each single-precision floating-point element of a 128-bit +/// floating-point vector of [4 x float] into a 16-bit signed integer, and +/// packs the results into a 64-bit integer vector of [4 x i16]. +/// +/// If the floating-point element is NaN or infinity, or if the +/// floating-point element is greater than 0x7FFFFFFF or less than -0x8000, +/// it is converted to 0x8000. Otherwise if the floating-point element is +/// greater than 0x7FFF, it is converted to 0x7FFF. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 64-bit integer vector of [4 x i16] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtps_pi16(__m128 __a) +{ + __m64 __b, __c; + + __b = _mm_cvtps_pi32(__a); + __a = _mm_movehl_ps(__a, __a); + __c = _mm_cvtps_pi32(__a); + + return _mm_packs_pi32(__b, __c); +} + +/// Converts each single-precision floating-point element of a 128-bit +/// floating-point vector of [4 x float] into an 8-bit signed integer, and +/// packs the results into the lower 32 bits of a 64-bit integer vector of +/// [8 x i8]. The upper 32 bits of the vector are set to 0. +/// +/// If the floating-point element is NaN or infinity, or if the +/// floating-point element is greater than 0x7FFFFFFF or less than -0x80, it +/// is converted to 0x80. Otherwise if the floating-point element is greater +/// than 0x7F, it is converted to 0x7F. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction. +/// +/// \param __a +/// 128-bit floating-point vector of [4 x float]. +/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the +/// converted values and the uppper 32 bits are set to zero. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtps_pi8(__m128 __a) +{ + __m64 __b, __c; + + __b = _mm_cvtps_pi16(__a); + __c = _mm_setzero_si64(); + + return _mm_packs_pi16(__b, __c); +} + +/// Extracts the sign bits from each single-precision floating-point +/// element of a 128-bit floating-point vector of [4 x float] and returns the +/// sign bits in bits [0:3] of the result. Bits [31:4] of the result are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS / MOVMSKPS instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each +/// single-precision floating-point element of the parameter. Bits [31:4] are +/// set to zero. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_movemask_ps(__m128 __a) +{ + return __builtin_ia32_movmskps((__v4sf)__a); +} + + +#define _MM_ALIGN16 __attribute__((aligned(16))) + +#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) + +#define _MM_EXCEPT_INVALID (0x0001U) +#define _MM_EXCEPT_DENORM (0x0002U) +#define _MM_EXCEPT_DIV_ZERO (0x0004U) +#define _MM_EXCEPT_OVERFLOW (0x0008U) +#define _MM_EXCEPT_UNDERFLOW (0x0010U) +#define _MM_EXCEPT_INEXACT (0x0020U) +#define _MM_EXCEPT_MASK (0x003fU) + +#define _MM_MASK_INVALID (0x0080U) +#define _MM_MASK_DENORM (0x0100U) +#define _MM_MASK_DIV_ZERO (0x0200U) +#define _MM_MASK_OVERFLOW (0x0400U) +#define _MM_MASK_UNDERFLOW (0x0800U) +#define _MM_MASK_INEXACT (0x1000U) +#define _MM_MASK_MASK (0x1f80U) + +#define _MM_ROUND_NEAREST (0x0000U) +#define _MM_ROUND_DOWN (0x2000U) +#define _MM_ROUND_UP (0x4000U) +#define _MM_ROUND_TOWARD_ZERO (0x6000U) +#define _MM_ROUND_MASK (0x6000U) + +#define _MM_FLUSH_ZERO_MASK (0x8000U) +#define _MM_FLUSH_ZERO_ON (0x8000U) +#define _MM_FLUSH_ZERO_OFF (0x0000U) + +#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK) +#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK) +#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK) +#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK) + +#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x))) +#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x))) +#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x))) +#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x))) + +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __m128 tmp3, tmp2, tmp1, tmp0; \ + tmp0 = _mm_unpacklo_ps((row0), (row1)); \ + tmp2 = _mm_unpacklo_ps((row2), (row3)); \ + tmp1 = _mm_unpackhi_ps((row0), (row1)); \ + tmp3 = _mm_unpackhi_ps((row2), (row3)); \ + (row0) = _mm_movelh_ps(tmp0, tmp2); \ + (row1) = _mm_movehl_ps(tmp2, tmp0); \ + (row2) = _mm_movelh_ps(tmp1, tmp3); \ + (row3) = _mm_movehl_ps(tmp3, tmp1); \ +} while (0) + +/* Aliases for compatibility. */ +#define _m_pextrw _mm_extract_pi16 +#define _m_pinsrw _mm_insert_pi16 +#define _m_pmaxsw _mm_max_pi16 +#define _m_pmaxub _mm_max_pu8 +#define _m_pminsw _mm_min_pi16 +#define _m_pminub _mm_min_pu8 +#define _m_pmovmskb _mm_movemask_pi8 +#define _m_pmulhuw _mm_mulhi_pu16 +#define _m_pshufw _mm_shuffle_pi16 +#define _m_maskmovq _mm_maskmove_si64 +#define _m_pavgb _mm_avg_pu8 +#define _m_pavgw _mm_avg_pu16 +#define _m_psadbw _mm_sad_pu8 +#define _m_ _mm_ +#define _m_ _mm_ + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_MMX + +/* Ugly hack for backwards-compatibility (compatible with gcc) */ +#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics) +#include +#endif + +#endif /* __XMMINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xopintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xopintrin.h new file mode 100644 index 0000000..5cedde4 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xopintrin.h @@ -0,0 +1,770 @@ +/*===---- xopintrin.h - XOP intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XOPINTRIN_H +#define __XOPINTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddw_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epi16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epi16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epi32(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphadddq((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddw_epu8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epu8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epu8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epu16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epu16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epu32(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubw_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubd_epi16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubq_epi32(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)(((__v2du)__A & (__v2du)__C) | ((__v2du)__B & ~(__v2du)__C)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)(((__v4du)__A & (__v4du)__C) | ((__v4du)__B & ~(__v4du)__C)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi8(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B); +} + +#define _mm_roti_epi8(A, N) \ + (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)) + +#define _mm_roti_epi16(A, N) \ + (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)) + +#define _mm_roti_epi32(A, N) \ + (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)) + +#define _mm_roti_epi64(A, N) \ + (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi8(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi8(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B); +} + +#define _mm_com_epu8(A, B, N) \ + (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (N)) + +#define _mm_com_epu16(A, B, N) \ + (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (N)) + +#define _mm_com_epu32(A, B, N) \ + (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (N)) + +#define _mm_com_epu64(A, B, N) \ + (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (N)) + +#define _mm_com_epi8(A, B, N) \ + (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (N)) + +#define _mm_com_epi16(A, B, N) \ + (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (N)) + +#define _mm_com_epi32(A, B, N) \ + (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (N)) + +#define _mm_com_epi64(A, B, N) \ + (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (N)) + +#define _MM_PCOMCTRL_LT 0 +#define _MM_PCOMCTRL_LE 1 +#define _MM_PCOMCTRL_GT 2 +#define _MM_PCOMCTRL_GE 3 +#define _MM_PCOMCTRL_EQ 4 +#define _MM_PCOMCTRL_NEQ 5 +#define _MM_PCOMCTRL_FALSE 6 +#define _MM_PCOMCTRL_TRUE 7 + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE); +} + +#define _mm_permute2_pd(X, Y, C, I) \ + (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(C), (I)) + +#define _mm256_permute2_pd(X, Y, C, I) \ + (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(C), (I)) + +#define _mm_permute2_ps(X, Y, C, I) \ + (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(C), (I)) + +#define _mm256_permute2_ps(X, Y, C, I) \ + (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(C), (I)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_frcz_ss(__m128 __A) +{ + return (__m128)__builtin_ia32_vfrczss((__v4sf)__A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_frcz_sd(__m128d __A) +{ + return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_frcz_ps(__m128 __A) +{ + return (__m128)__builtin_ia32_vfrczps((__v4sf)__A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_frcz_pd(__m128d __A) +{ + return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_frcz_ps(__m256 __A) +{ + return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_frcz_pd(__m256d __A) +{ + return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __XOPINTRIN_H */ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsavecintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsavecintrin.h new file mode 100644 index 0000000..5524947 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsavecintrin.h @@ -0,0 +1,34 @@ +/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVECINTRIN_H +#define __XSAVECINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec64(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsaveintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsaveintrin.h new file mode 100644 index 0000000..9429db6 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsaveintrin.h @@ -0,0 +1,63 @@ +/*===---- xsaveintrin.h - XSAVE intrinsic ----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEINTRIN_H +#define __XSAVEINTRIN_H + +#ifdef _MSC_VER +#define _XCR_XFEATURE_ENABLED_MASK 0 +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsave(void *__p, unsigned long long __m) { + __builtin_ia32_xsave(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor(void *__p, unsigned long long __m) { + __builtin_ia32_xrstor(__p, __m); +} + +#ifndef _MSC_VER +#define _xgetbv(A) __builtin_ia32_xgetbv((long long)(A)) +#define _xsetbv(A, B) __builtin_ia32_xsetbv((unsigned int)(A), (unsigned long long)(B)) +#else +#ifdef __cplusplus +extern "C" { +#endif +unsigned __int64 __cdecl _xgetbv(unsigned int); +void __cdecl _xsetbv(unsigned int, unsigned __int64); +#ifdef __cplusplus +} +#endif +#endif /* _MSC_VER */ + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsave64(void *__p, unsigned long long __m) { + __builtin_ia32_xsave64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor64(void *__p, unsigned long long __m) { + __builtin_ia32_xrstor64(__p, __m); +} + +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsaveoptintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsaveoptintrin.h new file mode 100644 index 0000000..89a4c44 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsaveoptintrin.h @@ -0,0 +1,34 @@ +/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEOPTINTRIN_H +#define __XSAVEOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt(void *__p, unsigned long long __m) { + __builtin_ia32_xsaveopt(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt64(void *__p, unsigned long long __m) { + __builtin_ia32_xsaveopt64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsavesintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsavesintrin.h new file mode 100644 index 0000000..3f99219 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xsavesintrin.h @@ -0,0 +1,44 @@ +/*===---- xsavesintrin.h - XSAVES intrinsic --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVESINTRIN_H +#define __XSAVESINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors64(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves64(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xtestintrin.h b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xtestintrin.h new file mode 100644 index 0000000..7d19e37 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/include/xtestintrin.h @@ -0,0 +1,27 @@ +/*===---- xtestintrin.h - XTEST intrinsic ----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XTESTINTRIN_H +#define __XTESTINTRIN_H + +/* xtest returns non-zero if the instruction is executed within an RTM or active + * HLE region. */ +/* FIXME: This can be an either or for RTM/HLE. Deal with this when HLE is + * supported. */ +static __inline__ int + __attribute__((__always_inline__, __nodebug__, __target__("rtm"))) + _xtest(void) { + return __builtin_ia32_xtest(); +} + +#endif diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/aarch64-unknown-fuchsia/libclang_rt.builtins.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/aarch64-unknown-fuchsia/libclang_rt.builtins.a new file mode 100644 index 0000000..8bffbef Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/aarch64-unknown-fuchsia/libclang_rt.builtins.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-aarch64-android.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-aarch64-android.so new file mode 100755 index 0000000..c3a850f Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-aarch64-android.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-arm-android.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-arm-android.so new file mode 100755 index 0000000..b417826 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-arm-android.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-i386.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-i386.a new file mode 100644 index 0000000..c68890d Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-i386.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-i686-android.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-i686-android.so new file mode 100755 index 0000000..40570c9 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-i686-android.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-x86_64.a new file mode 100644 index 0000000..875396d Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-x86_64.a.syms new file mode 100644 index 0000000..8ede516 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan-x86_64.a.syms @@ -0,0 +1,1664 @@ +{ + _Unwind_RaiseException; + __asan_*; + __bzero; + __cxa_atexit; + __cxa_rethrow_primary_exception; + __cxa_throw; + __fprintf_chk; + __getdelim; + __interceptor__Unwind_RaiseException; + __interceptor___bzero; + __interceptor___cxa_atexit; + __interceptor___cxa_rethrow_primary_exception; + __interceptor___cxa_throw; + __interceptor___fprintf_chk; + __interceptor___getdelim; + __interceptor___isoc99_fprintf; + __interceptor___isoc99_fscanf; + __interceptor___isoc99_printf; + __interceptor___isoc99_scanf; + __interceptor___isoc99_snprintf; + __interceptor___isoc99_sprintf; + __interceptor___isoc99_sscanf; + __interceptor___isoc99_vfprintf; + __interceptor___isoc99_vfscanf; + __interceptor___isoc99_vprintf; + __interceptor___isoc99_vscanf; + __interceptor___isoc99_vsnprintf; + __interceptor___isoc99_vsprintf; + __interceptor___isoc99_vsscanf; + __interceptor___libc_memalign; + __interceptor___longjmp_chk; + __interceptor___lxstat; + __interceptor___lxstat64; + __interceptor___overflow; + __interceptor___pthread_mutex_lock; + __interceptor___pthread_mutex_unlock; + __interceptor___snprintf_chk; + __interceptor___sprintf_chk; + __interceptor___strdup; + __interceptor___strndup; + __interceptor___strxfrm_l; + __interceptor___tls_get_addr; + __interceptor___uflow; + __interceptor___underflow; + __interceptor___vsnprintf_chk; + __interceptor___vsprintf_chk; + __interceptor___wcsxfrm_l; + __interceptor___woverflow; + __interceptor___wuflow; + __interceptor___wunderflow; + __interceptor___xpg_strerror_r; + __interceptor___xstat; + __interceptor___xstat64; + __interceptor__exit; + __interceptor__longjmp; + __interceptor__obstack_begin; + __interceptor__obstack_begin_1; + __interceptor__obstack_newchunk; + __interceptor_accept; + __interceptor_accept4; + __interceptor_aligned_alloc; + __interceptor_asctime; + __interceptor_asctime_r; + __interceptor_asprintf; + __interceptor_atoi; + __interceptor_atol; + __interceptor_atoll; + __interceptor_backtrace; + __interceptor_backtrace_symbols; + __interceptor_bcmp; + __interceptor_bsearch; + __interceptor_bzero; + __interceptor_calloc; + __interceptor_canonicalize_file_name; + __interceptor_capget; + __interceptor_capset; + __interceptor_cfree; + __interceptor_clock_getcpuclockid; + __interceptor_clock_getres; + __interceptor_clock_gettime; + __interceptor_clock_settime; + __interceptor_confstr; + __interceptor_crypt; + __interceptor_crypt_r; + __interceptor_ctermid; + __interceptor_ctime; + __interceptor_ctime_r; + __interceptor_dlclose; + __interceptor_dlopen; + __interceptor_drand48_r; + __interceptor_endgrent; + __interceptor_endpwent; + __interceptor_ether_aton; + __interceptor_ether_aton_r; + __interceptor_ether_hostton; + __interceptor_ether_line; + __interceptor_ether_ntoa; + __interceptor_ether_ntoa_r; + __interceptor_ether_ntohost; + __interceptor_eventfd_read; + __interceptor_eventfd_write; + __interceptor_fclose; + __interceptor_fdopen; + __interceptor_fflush; + __interceptor_fgetgrent; + __interceptor_fgetgrent_r; + __interceptor_fgetpwent; + __interceptor_fgetpwent_r; + __interceptor_fgets; + __interceptor_fgetxattr; + __interceptor_flistxattr; + __interceptor_fmemopen; + __interceptor_fopen; + __interceptor_fopen64; + __interceptor_fopencookie; + __interceptor_fprintf; + __interceptor_fputs; + __interceptor_fread; + __interceptor_free; + __interceptor_freopen; + __interceptor_freopen64; + __interceptor_frexp; + __interceptor_frexpf; + __interceptor_frexpl; + __interceptor_fscanf; + __interceptor_fstatfs; + __interceptor_fstatfs64; + __interceptor_fstatvfs; + __interceptor_fstatvfs64; + __interceptor_ftime; + __interceptor_fwrite; + __interceptor_get_current_dir_name; + __interceptor_getaddrinfo; + __interceptor_getcwd; + __interceptor_getdelim; + __interceptor_getgrent; + __interceptor_getgrent_r; + __interceptor_getgrgid; + __interceptor_getgrgid_r; + __interceptor_getgrnam; + __interceptor_getgrnam_r; + __interceptor_getgroups; + __interceptor_gethostbyaddr; + __interceptor_gethostbyaddr_r; + __interceptor_gethostbyname; + __interceptor_gethostbyname2; + __interceptor_gethostbyname2_r; + __interceptor_gethostbyname_r; + __interceptor_gethostent; + __interceptor_gethostent_r; + __interceptor_getifaddrs; + __interceptor_getitimer; + __interceptor_getline; + __interceptor_getloadavg; + __interceptor_getmntent; + __interceptor_getmntent_r; + __interceptor_getnameinfo; + __interceptor_getpass; + __interceptor_getpeername; + __interceptor_getprotobyname; + __interceptor_getprotobyname_r; + __interceptor_getprotobynumber; + __interceptor_getprotobynumber_r; + __interceptor_getprotoent; + __interceptor_getprotoent_r; + __interceptor_getpwent; + __interceptor_getpwent_r; + __interceptor_getpwnam; + __interceptor_getpwnam_r; + __interceptor_getpwuid; + __interceptor_getpwuid_r; + __interceptor_getresgid; + __interceptor_getresuid; + __interceptor_getsockname; + __interceptor_getsockopt; + __interceptor_getusershell; + __interceptor_getutent; + __interceptor_getutid; + __interceptor_getutline; + __interceptor_getutxent; + __interceptor_getutxid; + __interceptor_getutxline; + __interceptor_getxattr; + __interceptor_glob; + __interceptor_glob64; + __interceptor_gmtime; + __interceptor_gmtime_r; + __interceptor_iconv; + __interceptor_if_indextoname; + __interceptor_if_nametoindex; + __interceptor_index; + __interceptor_inet_aton; + __interceptor_inet_ntop; + __interceptor_inet_pton; + __interceptor_initgroups; + __interceptor_ioctl; + __interceptor_lgamma; + __interceptor_lgamma_r; + __interceptor_lgammaf; + __interceptor_lgammaf_r; + __interceptor_lgammal; + __interceptor_lgammal_r; + __interceptor_lgetxattr; + __interceptor_listxattr; + __interceptor_llistxattr; + __interceptor_localtime; + __interceptor_localtime_r; + __interceptor_longjmp; + __interceptor_lrand48_r; + __interceptor_mallinfo; + __interceptor_malloc; + __interceptor_malloc_stats; + __interceptor_malloc_usable_size; + __interceptor_mallopt; + __interceptor_mbsnrtowcs; + __interceptor_mbsrtowcs; + __interceptor_mbstowcs; + __interceptor_mcheck; + __interceptor_mcheck_pedantic; + __interceptor_memalign; + __interceptor_memchr; + __interceptor_memcmp; + __interceptor_memcpy; + __interceptor_memmem; + __interceptor_memmove; + __interceptor_memrchr; + __interceptor_memset; + __interceptor_mincore; + __interceptor_mktime; + __interceptor_mlock; + __interceptor_mlockall; + __interceptor_mmap; + __interceptor_mmap64; + __interceptor_modf; + __interceptor_modff; + __interceptor_modfl; + __interceptor_mprobe; + __interceptor_mprotect; + __interceptor_msgrcv; + __interceptor_msgsnd; + __interceptor_munlock; + __interceptor_munlockall; + __interceptor_name_to_handle_at; + __interceptor_open_by_handle_at; + __interceptor_open_memstream; + __interceptor_open_wmemstream; + __interceptor_opendir; + __interceptor_pclose; + __interceptor_poll; + __interceptor_popen; + __interceptor_posix_memalign; + __interceptor_ppoll; + __interceptor_prctl; + __interceptor_pread; + __interceptor_pread64; + __interceptor_preadv; + __interceptor_preadv64; + __interceptor_printf; + __interceptor_process_vm_readv; + __interceptor_process_vm_writev; + __interceptor_pthread_attr_getaffinity_np; + __interceptor_pthread_attr_getdetachstate; + __interceptor_pthread_attr_getguardsize; + __interceptor_pthread_attr_getinheritsched; + __interceptor_pthread_attr_getschedparam; + __interceptor_pthread_attr_getschedpolicy; + __interceptor_pthread_attr_getscope; + __interceptor_pthread_attr_getstack; + __interceptor_pthread_attr_getstacksize; + __interceptor_pthread_barrierattr_getpshared; + __interceptor_pthread_condattr_getclock; + __interceptor_pthread_condattr_getpshared; + __interceptor_pthread_create; + __interceptor_pthread_getname_np; + __interceptor_pthread_getschedparam; + __interceptor_pthread_join; + __interceptor_pthread_mutex_lock; + __interceptor_pthread_mutex_unlock; + __interceptor_pthread_mutexattr_getprioceiling; + __interceptor_pthread_mutexattr_getprotocol; + __interceptor_pthread_mutexattr_getpshared; + __interceptor_pthread_mutexattr_getrobust; + __interceptor_pthread_mutexattr_getrobust_np; + __interceptor_pthread_mutexattr_gettype; + __interceptor_pthread_rwlockattr_getkind_np; + __interceptor_pthread_rwlockattr_getpshared; + __interceptor_pthread_setcancelstate; + __interceptor_pthread_setcanceltype; + __interceptor_pthread_setname_np; + __interceptor_pthread_sigmask; + __interceptor_ptrace; + __interceptor_ptsname; + __interceptor_ptsname_r; + __interceptor_puts; + __interceptor_pututxline; + __interceptor_pvalloc; + __interceptor_pwrite; + __interceptor_pwrite64; + __interceptor_pwritev; + __interceptor_pwritev64; + __interceptor_qsort; + __interceptor_qsort_r; + __interceptor_rand_r; + __interceptor_random_r; + __interceptor_read; + __interceptor_readdir; + __interceptor_readdir64; + __interceptor_readdir64_r; + __interceptor_readdir_r; + __interceptor_readlink; + __interceptor_readlinkat; + __interceptor_readv; + __interceptor_realloc; + __interceptor_reallocarray; + __interceptor_realpath; + __interceptor_recv; + __interceptor_recvfrom; + __interceptor_recvmmsg; + __interceptor_recvmsg; + __interceptor_regcomp; + __interceptor_regerror; + __interceptor_regexec; + __interceptor_regfree; + __interceptor_remquo; + __interceptor_remquof; + __interceptor_remquol; + __interceptor_scandir; + __interceptor_scandir64; + __interceptor_scanf; + __interceptor_sched_getaffinity; + __interceptor_sched_getparam; + __interceptor_sem_destroy; + __interceptor_sem_getvalue; + __interceptor_sem_init; + __interceptor_sem_post; + __interceptor_sem_timedwait; + __interceptor_sem_trywait; + __interceptor_sem_wait; + __interceptor_send; + __interceptor_sendmmsg; + __interceptor_sendmsg; + __interceptor_sendto; + __interceptor_setbuf; + __interceptor_setbuffer; + __interceptor_setgrent; + __interceptor_setitimer; + __interceptor_setlinebuf; + __interceptor_setlocale; + __interceptor_setpwent; + __interceptor_setvbuf; + __interceptor_shmctl; + __interceptor_sigaction; + __interceptor_sigaltstack; + __interceptor_sigandset; + __interceptor_sigemptyset; + __interceptor_sigfillset; + __interceptor_siglongjmp; + __interceptor_signal; + __interceptor_sigorset; + __interceptor_sigpending; + __interceptor_sigprocmask; + __interceptor_sigtimedwait; + __interceptor_sigwait; + __interceptor_sigwaitinfo; + __interceptor_sincos; + __interceptor_sincosf; + __interceptor_sincosl; + __interceptor_snprintf; + __interceptor_sprintf; + __interceptor_sscanf; + __interceptor_statfs; + __interceptor_statfs64; + __interceptor_statvfs; + __interceptor_statvfs64; + __interceptor_strcasecmp; + __interceptor_strcasestr; + __interceptor_strcat; + __interceptor_strchr; + __interceptor_strchrnul; + __interceptor_strcmp; + __interceptor_strcpy; + __interceptor_strcspn; + __interceptor_strdup; + __interceptor_strerror; + __interceptor_strerror_r; + __interceptor_strlen; + __interceptor_strncasecmp; + __interceptor_strncat; + __interceptor_strncmp; + __interceptor_strncpy; + __interceptor_strndup; + __interceptor_strnlen; + __interceptor_strpbrk; + __interceptor_strptime; + __interceptor_strrchr; + __interceptor_strspn; + __interceptor_strstr; + __interceptor_strtoimax; + __interceptor_strtok; + __interceptor_strtol; + __interceptor_strtoll; + __interceptor_strtoumax; + __interceptor_strxfrm; + __interceptor_strxfrm_l; + __interceptor_swapcontext; + __interceptor_sysinfo; + __interceptor_tcgetattr; + __interceptor_tempnam; + __interceptor_textdomain; + __interceptor_time; + __interceptor_timerfd_gettime; + __interceptor_timerfd_settime; + __interceptor_times; + __interceptor_tmpnam; + __interceptor_tmpnam_r; + __interceptor_tsearch; + __interceptor_ttyname; + __interceptor_ttyname_r; + __interceptor_uname; + __interceptor_valloc; + __interceptor_vasprintf; + __interceptor_vfork; + __interceptor_vfprintf; + __interceptor_vfscanf; + __interceptor_vprintf; + __interceptor_vscanf; + __interceptor_vsnprintf; + __interceptor_vsprintf; + __interceptor_vsscanf; + __interceptor_wait; + __interceptor_wait3; + __interceptor_wait4; + __interceptor_waitid; + __interceptor_waitpid; + __interceptor_wcrtomb; + __interceptor_wcscat; + __interceptor_wcsdup; + __interceptor_wcslen; + __interceptor_wcsncat; + __interceptor_wcsnlen; + __interceptor_wcsnrtombs; + __interceptor_wcsrtombs; + __interceptor_wcstombs; + __interceptor_wcsxfrm; + __interceptor_wcsxfrm_l; + __interceptor_wctomb; + __interceptor_wordexp; + __interceptor_write; + __interceptor_writev; + __interceptor_xdr_bool; + __interceptor_xdr_bytes; + __interceptor_xdr_char; + __interceptor_xdr_destroy; + __interceptor_xdr_double; + __interceptor_xdr_enum; + __interceptor_xdr_float; + __interceptor_xdr_hyper; + __interceptor_xdr_int; + __interceptor_xdr_int16_t; + __interceptor_xdr_int32_t; + __interceptor_xdr_int64_t; + __interceptor_xdr_int8_t; + __interceptor_xdr_long; + __interceptor_xdr_longlong_t; + __interceptor_xdr_quad_t; + __interceptor_xdr_short; + __interceptor_xdr_string; + __interceptor_xdr_u_char; + __interceptor_xdr_u_hyper; + __interceptor_xdr_u_int; + __interceptor_xdr_u_long; + __interceptor_xdr_u_longlong_t; + __interceptor_xdr_u_quad_t; + __interceptor_xdr_u_short; + __interceptor_xdr_uint16_t; + __interceptor_xdr_uint32_t; + __interceptor_xdr_uint64_t; + __interceptor_xdr_uint8_t; + __interceptor_xdrmem_create; + __interceptor_xdrrec_create; + __interceptor_xdrstdio_create; + __isoc99_fprintf; + __isoc99_fscanf; + __isoc99_printf; + __isoc99_scanf; + __isoc99_snprintf; + __isoc99_sprintf; + __isoc99_sscanf; + __isoc99_vfprintf; + __isoc99_vfscanf; + __isoc99_vprintf; + __isoc99_vscanf; + __isoc99_vsnprintf; + __isoc99_vsprintf; + __isoc99_vsscanf; + __libc_memalign; + __longjmp_chk; + __lsan_*; + __lxstat; + __lxstat64; + __overflow; + __pthread_mutex_lock; + __pthread_mutex_unlock; + __sancov_*; + __sanitizer_acquire_crash_state; + __sanitizer_annotate_contiguous_container; + __sanitizer_contiguous_container_find_bad_address; + __sanitizer_cov_8bit_counters_init; + __sanitizer_cov_bool_flag_init; + __sanitizer_cov_dump; + __sanitizer_cov_pcs_init; + __sanitizer_cov_reset; + __sanitizer_cov_trace_cmp; + __sanitizer_cov_trace_cmp1; + __sanitizer_cov_trace_cmp2; + __sanitizer_cov_trace_cmp4; + __sanitizer_cov_trace_cmp8; + __sanitizer_cov_trace_const_cmp1; + __sanitizer_cov_trace_const_cmp2; + __sanitizer_cov_trace_const_cmp4; + __sanitizer_cov_trace_const_cmp8; + __sanitizer_cov_trace_div4; + __sanitizer_cov_trace_div8; + __sanitizer_cov_trace_gep; + __sanitizer_cov_trace_pc_guard; + __sanitizer_cov_trace_pc_guard_init; + __sanitizer_cov_trace_pc_indir; + __sanitizer_cov_trace_switch; + __sanitizer_dump_coverage; + __sanitizer_dump_trace_pc_guard_coverage; + __sanitizer_finish_switch_fiber; + __sanitizer_get_allocated_size; + __sanitizer_get_current_allocated_bytes; + __sanitizer_get_estimated_allocated_size; + __sanitizer_get_free_bytes; + __sanitizer_get_heap_size; + __sanitizer_get_module_and_offset_for_pc; + __sanitizer_get_ownership; + __sanitizer_get_report_path; + __sanitizer_get_unmapped_bytes; + __sanitizer_install_malloc_and_free_hooks; + __sanitizer_on_print; + __sanitizer_print_memory_profile; + __sanitizer_print_stack_trace; + __sanitizer_ptr_cmp; + __sanitizer_ptr_sub; + __sanitizer_purge_allocator; + __sanitizer_report_error_summary; + __sanitizer_sandbox_on_notify; + __sanitizer_set_death_callback; + __sanitizer_set_report_fd; + __sanitizer_set_report_path; + __sanitizer_start_switch_fiber; + __sanitizer_symbolize_global; + __sanitizer_symbolize_pc; + __sanitizer_syscall_post_impl_accept; + __sanitizer_syscall_post_impl_accept4; + __sanitizer_syscall_post_impl_access; + __sanitizer_syscall_post_impl_acct; + __sanitizer_syscall_post_impl_add_key; + __sanitizer_syscall_post_impl_adjtimex; + __sanitizer_syscall_post_impl_alarm; + __sanitizer_syscall_post_impl_bdflush; + __sanitizer_syscall_post_impl_bind; + __sanitizer_syscall_post_impl_brk; + __sanitizer_syscall_post_impl_capget; + __sanitizer_syscall_post_impl_capset; + __sanitizer_syscall_post_impl_chdir; + __sanitizer_syscall_post_impl_chmod; + __sanitizer_syscall_post_impl_chown; + __sanitizer_syscall_post_impl_chroot; + __sanitizer_syscall_post_impl_clock_adjtime; + __sanitizer_syscall_post_impl_clock_getres; + __sanitizer_syscall_post_impl_clock_gettime; + __sanitizer_syscall_post_impl_clock_nanosleep; + __sanitizer_syscall_post_impl_clock_settime; + __sanitizer_syscall_post_impl_close; + __sanitizer_syscall_post_impl_connect; + __sanitizer_syscall_post_impl_creat; + __sanitizer_syscall_post_impl_delete_module; + __sanitizer_syscall_post_impl_dup; + __sanitizer_syscall_post_impl_dup2; + __sanitizer_syscall_post_impl_dup3; + __sanitizer_syscall_post_impl_epoll_create; + __sanitizer_syscall_post_impl_epoll_create1; + __sanitizer_syscall_post_impl_epoll_ctl; + __sanitizer_syscall_post_impl_epoll_pwait; + __sanitizer_syscall_post_impl_epoll_pwait2; + __sanitizer_syscall_post_impl_epoll_wait; + __sanitizer_syscall_post_impl_eventfd; + __sanitizer_syscall_post_impl_eventfd2; + __sanitizer_syscall_post_impl_exit; + __sanitizer_syscall_post_impl_exit_group; + __sanitizer_syscall_post_impl_faccessat; + __sanitizer_syscall_post_impl_fchdir; + __sanitizer_syscall_post_impl_fchmod; + __sanitizer_syscall_post_impl_fchmodat; + __sanitizer_syscall_post_impl_fchown; + __sanitizer_syscall_post_impl_fchownat; + __sanitizer_syscall_post_impl_fcntl; + __sanitizer_syscall_post_impl_fcntl64; + __sanitizer_syscall_post_impl_fdatasync; + __sanitizer_syscall_post_impl_fgetxattr; + __sanitizer_syscall_post_impl_flistxattr; + __sanitizer_syscall_post_impl_flock; + __sanitizer_syscall_post_impl_fork; + __sanitizer_syscall_post_impl_fremovexattr; + __sanitizer_syscall_post_impl_fsetxattr; + __sanitizer_syscall_post_impl_fstat; + __sanitizer_syscall_post_impl_fstat64; + __sanitizer_syscall_post_impl_fstatat64; + __sanitizer_syscall_post_impl_fstatfs; + __sanitizer_syscall_post_impl_fstatfs64; + __sanitizer_syscall_post_impl_fsync; + __sanitizer_syscall_post_impl_ftruncate; + __sanitizer_syscall_post_impl_futimesat; + __sanitizer_syscall_post_impl_get_mempolicy; + __sanitizer_syscall_post_impl_get_robust_list; + __sanitizer_syscall_post_impl_getcpu; + __sanitizer_syscall_post_impl_getcwd; + __sanitizer_syscall_post_impl_getdents; + __sanitizer_syscall_post_impl_getdents64; + __sanitizer_syscall_post_impl_getegid; + __sanitizer_syscall_post_impl_geteuid; + __sanitizer_syscall_post_impl_getgid; + __sanitizer_syscall_post_impl_getgroups; + __sanitizer_syscall_post_impl_gethostname; + __sanitizer_syscall_post_impl_getitimer; + __sanitizer_syscall_post_impl_getpeername; + __sanitizer_syscall_post_impl_getpgid; + __sanitizer_syscall_post_impl_getpgrp; + __sanitizer_syscall_post_impl_getpid; + __sanitizer_syscall_post_impl_getppid; + __sanitizer_syscall_post_impl_getpriority; + __sanitizer_syscall_post_impl_getrandom; + __sanitizer_syscall_post_impl_getresgid; + __sanitizer_syscall_post_impl_getresuid; + __sanitizer_syscall_post_impl_getrlimit; + __sanitizer_syscall_post_impl_getrusage; + __sanitizer_syscall_post_impl_getsid; + __sanitizer_syscall_post_impl_getsockname; + __sanitizer_syscall_post_impl_getsockopt; + __sanitizer_syscall_post_impl_gettid; + __sanitizer_syscall_post_impl_gettimeofday; + __sanitizer_syscall_post_impl_getuid; + __sanitizer_syscall_post_impl_getxattr; + __sanitizer_syscall_post_impl_init_module; + __sanitizer_syscall_post_impl_inotify_add_watch; + __sanitizer_syscall_post_impl_inotify_init; + __sanitizer_syscall_post_impl_inotify_init1; + __sanitizer_syscall_post_impl_inotify_rm_watch; + __sanitizer_syscall_post_impl_io_cancel; + __sanitizer_syscall_post_impl_io_destroy; + __sanitizer_syscall_post_impl_io_getevents; + __sanitizer_syscall_post_impl_io_setup; + __sanitizer_syscall_post_impl_io_submit; + __sanitizer_syscall_post_impl_ioctl; + __sanitizer_syscall_post_impl_ioperm; + __sanitizer_syscall_post_impl_ioprio_get; + __sanitizer_syscall_post_impl_ioprio_set; + __sanitizer_syscall_post_impl_ipc; + __sanitizer_syscall_post_impl_kexec_load; + __sanitizer_syscall_post_impl_keyctl; + __sanitizer_syscall_post_impl_kill; + __sanitizer_syscall_post_impl_lchown; + __sanitizer_syscall_post_impl_lgetxattr; + __sanitizer_syscall_post_impl_link; + __sanitizer_syscall_post_impl_linkat; + __sanitizer_syscall_post_impl_listen; + __sanitizer_syscall_post_impl_listxattr; + __sanitizer_syscall_post_impl_llistxattr; + __sanitizer_syscall_post_impl_llseek; + __sanitizer_syscall_post_impl_lookup_dcookie; + __sanitizer_syscall_post_impl_lremovexattr; + __sanitizer_syscall_post_impl_lseek; + __sanitizer_syscall_post_impl_lsetxattr; + __sanitizer_syscall_post_impl_lstat; + __sanitizer_syscall_post_impl_lstat64; + __sanitizer_syscall_post_impl_madvise; + __sanitizer_syscall_post_impl_mbind; + __sanitizer_syscall_post_impl_migrate_pages; + __sanitizer_syscall_post_impl_mincore; + __sanitizer_syscall_post_impl_mkdir; + __sanitizer_syscall_post_impl_mkdirat; + __sanitizer_syscall_post_impl_mknod; + __sanitizer_syscall_post_impl_mknodat; + __sanitizer_syscall_post_impl_mlock; + __sanitizer_syscall_post_impl_mlockall; + __sanitizer_syscall_post_impl_mmap_pgoff; + __sanitizer_syscall_post_impl_mount; + __sanitizer_syscall_post_impl_move_pages; + __sanitizer_syscall_post_impl_mprotect; + __sanitizer_syscall_post_impl_mq_getsetattr; + __sanitizer_syscall_post_impl_mq_notify; + __sanitizer_syscall_post_impl_mq_open; + __sanitizer_syscall_post_impl_mq_timedreceive; + __sanitizer_syscall_post_impl_mq_timedsend; + __sanitizer_syscall_post_impl_mq_unlink; + __sanitizer_syscall_post_impl_mremap; + __sanitizer_syscall_post_impl_msgctl; + __sanitizer_syscall_post_impl_msgget; + __sanitizer_syscall_post_impl_msgrcv; + __sanitizer_syscall_post_impl_msgsnd; + __sanitizer_syscall_post_impl_msync; + __sanitizer_syscall_post_impl_munlock; + __sanitizer_syscall_post_impl_munlockall; + __sanitizer_syscall_post_impl_munmap; + __sanitizer_syscall_post_impl_name_to_handle_at; + __sanitizer_syscall_post_impl_nanosleep; + __sanitizer_syscall_post_impl_newfstat; + __sanitizer_syscall_post_impl_newfstatat; + __sanitizer_syscall_post_impl_newlstat; + __sanitizer_syscall_post_impl_newstat; + __sanitizer_syscall_post_impl_newuname; + __sanitizer_syscall_post_impl_ni_syscall; + __sanitizer_syscall_post_impl_nice; + __sanitizer_syscall_post_impl_old_getrlimit; + __sanitizer_syscall_post_impl_old_mmap; + __sanitizer_syscall_post_impl_old_readdir; + __sanitizer_syscall_post_impl_old_select; + __sanitizer_syscall_post_impl_oldumount; + __sanitizer_syscall_post_impl_olduname; + __sanitizer_syscall_post_impl_open; + __sanitizer_syscall_post_impl_open_by_handle_at; + __sanitizer_syscall_post_impl_openat; + __sanitizer_syscall_post_impl_pause; + __sanitizer_syscall_post_impl_pciconfig_iobase; + __sanitizer_syscall_post_impl_pciconfig_read; + __sanitizer_syscall_post_impl_pciconfig_write; + __sanitizer_syscall_post_impl_perf_event_open; + __sanitizer_syscall_post_impl_personality; + __sanitizer_syscall_post_impl_pipe; + __sanitizer_syscall_post_impl_pipe2; + __sanitizer_syscall_post_impl_pivot_root; + __sanitizer_syscall_post_impl_poll; + __sanitizer_syscall_post_impl_ppoll; + __sanitizer_syscall_post_impl_pread64; + __sanitizer_syscall_post_impl_preadv; + __sanitizer_syscall_post_impl_prlimit64; + __sanitizer_syscall_post_impl_process_vm_readv; + __sanitizer_syscall_post_impl_process_vm_writev; + __sanitizer_syscall_post_impl_pselect6; + __sanitizer_syscall_post_impl_ptrace; + __sanitizer_syscall_post_impl_pwrite64; + __sanitizer_syscall_post_impl_pwritev; + __sanitizer_syscall_post_impl_quotactl; + __sanitizer_syscall_post_impl_read; + __sanitizer_syscall_post_impl_readlink; + __sanitizer_syscall_post_impl_readlinkat; + __sanitizer_syscall_post_impl_readv; + __sanitizer_syscall_post_impl_reboot; + __sanitizer_syscall_post_impl_recv; + __sanitizer_syscall_post_impl_recvfrom; + __sanitizer_syscall_post_impl_recvmmsg; + __sanitizer_syscall_post_impl_recvmsg; + __sanitizer_syscall_post_impl_remap_file_pages; + __sanitizer_syscall_post_impl_removexattr; + __sanitizer_syscall_post_impl_rename; + __sanitizer_syscall_post_impl_renameat; + __sanitizer_syscall_post_impl_request_key; + __sanitizer_syscall_post_impl_restart_syscall; + __sanitizer_syscall_post_impl_rmdir; + __sanitizer_syscall_post_impl_rt_sigaction; + __sanitizer_syscall_post_impl_rt_sigpending; + __sanitizer_syscall_post_impl_rt_sigprocmask; + __sanitizer_syscall_post_impl_rt_sigqueueinfo; + __sanitizer_syscall_post_impl_rt_sigtimedwait; + __sanitizer_syscall_post_impl_rt_tgsigqueueinfo; + __sanitizer_syscall_post_impl_sched_get_priority_max; + __sanitizer_syscall_post_impl_sched_get_priority_min; + __sanitizer_syscall_post_impl_sched_getaffinity; + __sanitizer_syscall_post_impl_sched_getparam; + __sanitizer_syscall_post_impl_sched_getscheduler; + __sanitizer_syscall_post_impl_sched_rr_get_interval; + __sanitizer_syscall_post_impl_sched_setaffinity; + __sanitizer_syscall_post_impl_sched_setparam; + __sanitizer_syscall_post_impl_sched_setscheduler; + __sanitizer_syscall_post_impl_sched_yield; + __sanitizer_syscall_post_impl_select; + __sanitizer_syscall_post_impl_semctl; + __sanitizer_syscall_post_impl_semget; + __sanitizer_syscall_post_impl_semop; + __sanitizer_syscall_post_impl_semtimedop; + __sanitizer_syscall_post_impl_send; + __sanitizer_syscall_post_impl_sendfile; + __sanitizer_syscall_post_impl_sendfile64; + __sanitizer_syscall_post_impl_sendmmsg; + __sanitizer_syscall_post_impl_sendmsg; + __sanitizer_syscall_post_impl_sendto; + __sanitizer_syscall_post_impl_set_mempolicy; + __sanitizer_syscall_post_impl_set_robust_list; + __sanitizer_syscall_post_impl_set_tid_address; + __sanitizer_syscall_post_impl_setdomainname; + __sanitizer_syscall_post_impl_setfsgid; + __sanitizer_syscall_post_impl_setfsuid; + __sanitizer_syscall_post_impl_setgid; + __sanitizer_syscall_post_impl_setgroups; + __sanitizer_syscall_post_impl_sethostname; + __sanitizer_syscall_post_impl_setitimer; + __sanitizer_syscall_post_impl_setns; + __sanitizer_syscall_post_impl_setpgid; + __sanitizer_syscall_post_impl_setpriority; + __sanitizer_syscall_post_impl_setregid; + __sanitizer_syscall_post_impl_setresgid; + __sanitizer_syscall_post_impl_setresuid; + __sanitizer_syscall_post_impl_setreuid; + __sanitizer_syscall_post_impl_setrlimit; + __sanitizer_syscall_post_impl_setsid; + __sanitizer_syscall_post_impl_setsockopt; + __sanitizer_syscall_post_impl_settimeofday; + __sanitizer_syscall_post_impl_setuid; + __sanitizer_syscall_post_impl_setxattr; + __sanitizer_syscall_post_impl_sgetmask; + __sanitizer_syscall_post_impl_shmat; + __sanitizer_syscall_post_impl_shmctl; + __sanitizer_syscall_post_impl_shmdt; + __sanitizer_syscall_post_impl_shmget; + __sanitizer_syscall_post_impl_shutdown; + __sanitizer_syscall_post_impl_sigaction; + __sanitizer_syscall_post_impl_sigaltstack; + __sanitizer_syscall_post_impl_signal; + __sanitizer_syscall_post_impl_signalfd; + __sanitizer_syscall_post_impl_signalfd4; + __sanitizer_syscall_post_impl_sigpending; + __sanitizer_syscall_post_impl_sigprocmask; + __sanitizer_syscall_post_impl_socket; + __sanitizer_syscall_post_impl_socketcall; + __sanitizer_syscall_post_impl_socketpair; + __sanitizer_syscall_post_impl_splice; + __sanitizer_syscall_post_impl_spu_create; + __sanitizer_syscall_post_impl_spu_run; + __sanitizer_syscall_post_impl_ssetmask; + __sanitizer_syscall_post_impl_stat; + __sanitizer_syscall_post_impl_stat64; + __sanitizer_syscall_post_impl_statfs; + __sanitizer_syscall_post_impl_statfs64; + __sanitizer_syscall_post_impl_stime; + __sanitizer_syscall_post_impl_swapoff; + __sanitizer_syscall_post_impl_swapon; + __sanitizer_syscall_post_impl_symlink; + __sanitizer_syscall_post_impl_symlinkat; + __sanitizer_syscall_post_impl_sync; + __sanitizer_syscall_post_impl_syncfs; + __sanitizer_syscall_post_impl_sysctl; + __sanitizer_syscall_post_impl_sysfs; + __sanitizer_syscall_post_impl_sysinfo; + __sanitizer_syscall_post_impl_syslog; + __sanitizer_syscall_post_impl_tee; + __sanitizer_syscall_post_impl_tgkill; + __sanitizer_syscall_post_impl_time; + __sanitizer_syscall_post_impl_timer_create; + __sanitizer_syscall_post_impl_timer_delete; + __sanitizer_syscall_post_impl_timer_getoverrun; + __sanitizer_syscall_post_impl_timer_gettime; + __sanitizer_syscall_post_impl_timer_settime; + __sanitizer_syscall_post_impl_timerfd_create; + __sanitizer_syscall_post_impl_timerfd_gettime; + __sanitizer_syscall_post_impl_timerfd_settime; + __sanitizer_syscall_post_impl_times; + __sanitizer_syscall_post_impl_tkill; + __sanitizer_syscall_post_impl_truncate; + __sanitizer_syscall_post_impl_umask; + __sanitizer_syscall_post_impl_umount; + __sanitizer_syscall_post_impl_uname; + __sanitizer_syscall_post_impl_unlink; + __sanitizer_syscall_post_impl_unlinkat; + __sanitizer_syscall_post_impl_unshare; + __sanitizer_syscall_post_impl_uselib; + __sanitizer_syscall_post_impl_ustat; + __sanitizer_syscall_post_impl_utime; + __sanitizer_syscall_post_impl_utimensat; + __sanitizer_syscall_post_impl_utimes; + __sanitizer_syscall_post_impl_vfork; + __sanitizer_syscall_post_impl_vhangup; + __sanitizer_syscall_post_impl_vmsplice; + __sanitizer_syscall_post_impl_wait4; + __sanitizer_syscall_post_impl_waitid; + __sanitizer_syscall_post_impl_waitpid; + __sanitizer_syscall_post_impl_write; + __sanitizer_syscall_post_impl_writev; + __sanitizer_syscall_pre_impl_accept; + __sanitizer_syscall_pre_impl_accept4; + __sanitizer_syscall_pre_impl_access; + __sanitizer_syscall_pre_impl_acct; + __sanitizer_syscall_pre_impl_add_key; + __sanitizer_syscall_pre_impl_adjtimex; + __sanitizer_syscall_pre_impl_alarm; + __sanitizer_syscall_pre_impl_bdflush; + __sanitizer_syscall_pre_impl_bind; + __sanitizer_syscall_pre_impl_brk; + __sanitizer_syscall_pre_impl_capget; + __sanitizer_syscall_pre_impl_capset; + __sanitizer_syscall_pre_impl_chdir; + __sanitizer_syscall_pre_impl_chmod; + __sanitizer_syscall_pre_impl_chown; + __sanitizer_syscall_pre_impl_chroot; + __sanitizer_syscall_pre_impl_clock_adjtime; + __sanitizer_syscall_pre_impl_clock_getres; + __sanitizer_syscall_pre_impl_clock_gettime; + __sanitizer_syscall_pre_impl_clock_nanosleep; + __sanitizer_syscall_pre_impl_clock_settime; + __sanitizer_syscall_pre_impl_close; + __sanitizer_syscall_pre_impl_connect; + __sanitizer_syscall_pre_impl_creat; + __sanitizer_syscall_pre_impl_delete_module; + __sanitizer_syscall_pre_impl_dup; + __sanitizer_syscall_pre_impl_dup2; + __sanitizer_syscall_pre_impl_dup3; + __sanitizer_syscall_pre_impl_epoll_create; + __sanitizer_syscall_pre_impl_epoll_create1; + __sanitizer_syscall_pre_impl_epoll_ctl; + __sanitizer_syscall_pre_impl_epoll_pwait; + __sanitizer_syscall_pre_impl_epoll_pwait2; + __sanitizer_syscall_pre_impl_epoll_wait; + __sanitizer_syscall_pre_impl_eventfd; + __sanitizer_syscall_pre_impl_eventfd2; + __sanitizer_syscall_pre_impl_exit; + __sanitizer_syscall_pre_impl_exit_group; + __sanitizer_syscall_pre_impl_faccessat; + __sanitizer_syscall_pre_impl_fchdir; + __sanitizer_syscall_pre_impl_fchmod; + __sanitizer_syscall_pre_impl_fchmodat; + __sanitizer_syscall_pre_impl_fchown; + __sanitizer_syscall_pre_impl_fchownat; + __sanitizer_syscall_pre_impl_fcntl; + __sanitizer_syscall_pre_impl_fcntl64; + __sanitizer_syscall_pre_impl_fdatasync; + __sanitizer_syscall_pre_impl_fgetxattr; + __sanitizer_syscall_pre_impl_flistxattr; + __sanitizer_syscall_pre_impl_flock; + __sanitizer_syscall_pre_impl_fork; + __sanitizer_syscall_pre_impl_fremovexattr; + __sanitizer_syscall_pre_impl_fsetxattr; + __sanitizer_syscall_pre_impl_fstat; + __sanitizer_syscall_pre_impl_fstat64; + __sanitizer_syscall_pre_impl_fstatat64; + __sanitizer_syscall_pre_impl_fstatfs; + __sanitizer_syscall_pre_impl_fstatfs64; + __sanitizer_syscall_pre_impl_fsync; + __sanitizer_syscall_pre_impl_ftruncate; + __sanitizer_syscall_pre_impl_futimesat; + __sanitizer_syscall_pre_impl_get_mempolicy; + __sanitizer_syscall_pre_impl_get_robust_list; + __sanitizer_syscall_pre_impl_getcpu; + __sanitizer_syscall_pre_impl_getcwd; + __sanitizer_syscall_pre_impl_getdents; + __sanitizer_syscall_pre_impl_getdents64; + __sanitizer_syscall_pre_impl_getegid; + __sanitizer_syscall_pre_impl_geteuid; + __sanitizer_syscall_pre_impl_getgid; + __sanitizer_syscall_pre_impl_getgroups; + __sanitizer_syscall_pre_impl_gethostname; + __sanitizer_syscall_pre_impl_getitimer; + __sanitizer_syscall_pre_impl_getpeername; + __sanitizer_syscall_pre_impl_getpgid; + __sanitizer_syscall_pre_impl_getpgrp; + __sanitizer_syscall_pre_impl_getpid; + __sanitizer_syscall_pre_impl_getppid; + __sanitizer_syscall_pre_impl_getpriority; + __sanitizer_syscall_pre_impl_getrandom; + __sanitizer_syscall_pre_impl_getresgid; + __sanitizer_syscall_pre_impl_getresuid; + __sanitizer_syscall_pre_impl_getrlimit; + __sanitizer_syscall_pre_impl_getrusage; + __sanitizer_syscall_pre_impl_getsid; + __sanitizer_syscall_pre_impl_getsockname; + __sanitizer_syscall_pre_impl_getsockopt; + __sanitizer_syscall_pre_impl_gettid; + __sanitizer_syscall_pre_impl_gettimeofday; + __sanitizer_syscall_pre_impl_getuid; + __sanitizer_syscall_pre_impl_getxattr; + __sanitizer_syscall_pre_impl_init_module; + __sanitizer_syscall_pre_impl_inotify_add_watch; + __sanitizer_syscall_pre_impl_inotify_init; + __sanitizer_syscall_pre_impl_inotify_init1; + __sanitizer_syscall_pre_impl_inotify_rm_watch; + __sanitizer_syscall_pre_impl_io_cancel; + __sanitizer_syscall_pre_impl_io_destroy; + __sanitizer_syscall_pre_impl_io_getevents; + __sanitizer_syscall_pre_impl_io_setup; + __sanitizer_syscall_pre_impl_io_submit; + __sanitizer_syscall_pre_impl_ioctl; + __sanitizer_syscall_pre_impl_ioperm; + __sanitizer_syscall_pre_impl_ioprio_get; + __sanitizer_syscall_pre_impl_ioprio_set; + __sanitizer_syscall_pre_impl_ipc; + __sanitizer_syscall_pre_impl_kexec_load; + __sanitizer_syscall_pre_impl_keyctl; + __sanitizer_syscall_pre_impl_kill; + __sanitizer_syscall_pre_impl_lchown; + __sanitizer_syscall_pre_impl_lgetxattr; + __sanitizer_syscall_pre_impl_link; + __sanitizer_syscall_pre_impl_linkat; + __sanitizer_syscall_pre_impl_listen; + __sanitizer_syscall_pre_impl_listxattr; + __sanitizer_syscall_pre_impl_llistxattr; + __sanitizer_syscall_pre_impl_llseek; + __sanitizer_syscall_pre_impl_lookup_dcookie; + __sanitizer_syscall_pre_impl_lremovexattr; + __sanitizer_syscall_pre_impl_lseek; + __sanitizer_syscall_pre_impl_lsetxattr; + __sanitizer_syscall_pre_impl_lstat; + __sanitizer_syscall_pre_impl_lstat64; + __sanitizer_syscall_pre_impl_madvise; + __sanitizer_syscall_pre_impl_mbind; + __sanitizer_syscall_pre_impl_migrate_pages; + __sanitizer_syscall_pre_impl_mincore; + __sanitizer_syscall_pre_impl_mkdir; + __sanitizer_syscall_pre_impl_mkdirat; + __sanitizer_syscall_pre_impl_mknod; + __sanitizer_syscall_pre_impl_mknodat; + __sanitizer_syscall_pre_impl_mlock; + __sanitizer_syscall_pre_impl_mlockall; + __sanitizer_syscall_pre_impl_mmap_pgoff; + __sanitizer_syscall_pre_impl_mount; + __sanitizer_syscall_pre_impl_move_pages; + __sanitizer_syscall_pre_impl_mprotect; + __sanitizer_syscall_pre_impl_mq_getsetattr; + __sanitizer_syscall_pre_impl_mq_notify; + __sanitizer_syscall_pre_impl_mq_open; + __sanitizer_syscall_pre_impl_mq_timedreceive; + __sanitizer_syscall_pre_impl_mq_timedsend; + __sanitizer_syscall_pre_impl_mq_unlink; + __sanitizer_syscall_pre_impl_mremap; + __sanitizer_syscall_pre_impl_msgctl; + __sanitizer_syscall_pre_impl_msgget; + __sanitizer_syscall_pre_impl_msgrcv; + __sanitizer_syscall_pre_impl_msgsnd; + __sanitizer_syscall_pre_impl_msync; + __sanitizer_syscall_pre_impl_munlock; + __sanitizer_syscall_pre_impl_munlockall; + __sanitizer_syscall_pre_impl_munmap; + __sanitizer_syscall_pre_impl_name_to_handle_at; + __sanitizer_syscall_pre_impl_nanosleep; + __sanitizer_syscall_pre_impl_newfstat; + __sanitizer_syscall_pre_impl_newfstatat; + __sanitizer_syscall_pre_impl_newlstat; + __sanitizer_syscall_pre_impl_newstat; + __sanitizer_syscall_pre_impl_newuname; + __sanitizer_syscall_pre_impl_ni_syscall; + __sanitizer_syscall_pre_impl_nice; + __sanitizer_syscall_pre_impl_old_getrlimit; + __sanitizer_syscall_pre_impl_old_mmap; + __sanitizer_syscall_pre_impl_old_readdir; + __sanitizer_syscall_pre_impl_old_select; + __sanitizer_syscall_pre_impl_oldumount; + __sanitizer_syscall_pre_impl_olduname; + __sanitizer_syscall_pre_impl_open; + __sanitizer_syscall_pre_impl_open_by_handle_at; + __sanitizer_syscall_pre_impl_openat; + __sanitizer_syscall_pre_impl_pause; + __sanitizer_syscall_pre_impl_pciconfig_iobase; + __sanitizer_syscall_pre_impl_pciconfig_read; + __sanitizer_syscall_pre_impl_pciconfig_write; + __sanitizer_syscall_pre_impl_perf_event_open; + __sanitizer_syscall_pre_impl_personality; + __sanitizer_syscall_pre_impl_pipe; + __sanitizer_syscall_pre_impl_pipe2; + __sanitizer_syscall_pre_impl_pivot_root; + __sanitizer_syscall_pre_impl_poll; + __sanitizer_syscall_pre_impl_ppoll; + __sanitizer_syscall_pre_impl_pread64; + __sanitizer_syscall_pre_impl_preadv; + __sanitizer_syscall_pre_impl_prlimit64; + __sanitizer_syscall_pre_impl_process_vm_readv; + __sanitizer_syscall_pre_impl_process_vm_writev; + __sanitizer_syscall_pre_impl_pselect6; + __sanitizer_syscall_pre_impl_ptrace; + __sanitizer_syscall_pre_impl_pwrite64; + __sanitizer_syscall_pre_impl_pwritev; + __sanitizer_syscall_pre_impl_quotactl; + __sanitizer_syscall_pre_impl_read; + __sanitizer_syscall_pre_impl_readlink; + __sanitizer_syscall_pre_impl_readlinkat; + __sanitizer_syscall_pre_impl_readv; + __sanitizer_syscall_pre_impl_reboot; + __sanitizer_syscall_pre_impl_recv; + __sanitizer_syscall_pre_impl_recvfrom; + __sanitizer_syscall_pre_impl_recvmmsg; + __sanitizer_syscall_pre_impl_recvmsg; + __sanitizer_syscall_pre_impl_remap_file_pages; + __sanitizer_syscall_pre_impl_removexattr; + __sanitizer_syscall_pre_impl_rename; + __sanitizer_syscall_pre_impl_renameat; + __sanitizer_syscall_pre_impl_request_key; + __sanitizer_syscall_pre_impl_restart_syscall; + __sanitizer_syscall_pre_impl_rmdir; + __sanitizer_syscall_pre_impl_rt_sigaction; + __sanitizer_syscall_pre_impl_rt_sigpending; + __sanitizer_syscall_pre_impl_rt_sigprocmask; + __sanitizer_syscall_pre_impl_rt_sigqueueinfo; + __sanitizer_syscall_pre_impl_rt_sigtimedwait; + __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo; + __sanitizer_syscall_pre_impl_sched_get_priority_max; + __sanitizer_syscall_pre_impl_sched_get_priority_min; + __sanitizer_syscall_pre_impl_sched_getaffinity; + __sanitizer_syscall_pre_impl_sched_getparam; + __sanitizer_syscall_pre_impl_sched_getscheduler; + __sanitizer_syscall_pre_impl_sched_rr_get_interval; + __sanitizer_syscall_pre_impl_sched_setaffinity; + __sanitizer_syscall_pre_impl_sched_setparam; + __sanitizer_syscall_pre_impl_sched_setscheduler; + __sanitizer_syscall_pre_impl_sched_yield; + __sanitizer_syscall_pre_impl_select; + __sanitizer_syscall_pre_impl_semctl; + __sanitizer_syscall_pre_impl_semget; + __sanitizer_syscall_pre_impl_semop; + __sanitizer_syscall_pre_impl_semtimedop; + __sanitizer_syscall_pre_impl_send; + __sanitizer_syscall_pre_impl_sendfile; + __sanitizer_syscall_pre_impl_sendfile64; + __sanitizer_syscall_pre_impl_sendmmsg; + __sanitizer_syscall_pre_impl_sendmsg; + __sanitizer_syscall_pre_impl_sendto; + __sanitizer_syscall_pre_impl_set_mempolicy; + __sanitizer_syscall_pre_impl_set_robust_list; + __sanitizer_syscall_pre_impl_set_tid_address; + __sanitizer_syscall_pre_impl_setdomainname; + __sanitizer_syscall_pre_impl_setfsgid; + __sanitizer_syscall_pre_impl_setfsuid; + __sanitizer_syscall_pre_impl_setgid; + __sanitizer_syscall_pre_impl_setgroups; + __sanitizer_syscall_pre_impl_sethostname; + __sanitizer_syscall_pre_impl_setitimer; + __sanitizer_syscall_pre_impl_setns; + __sanitizer_syscall_pre_impl_setpgid; + __sanitizer_syscall_pre_impl_setpriority; + __sanitizer_syscall_pre_impl_setregid; + __sanitizer_syscall_pre_impl_setresgid; + __sanitizer_syscall_pre_impl_setresuid; + __sanitizer_syscall_pre_impl_setreuid; + __sanitizer_syscall_pre_impl_setrlimit; + __sanitizer_syscall_pre_impl_setsid; + __sanitizer_syscall_pre_impl_setsockopt; + __sanitizer_syscall_pre_impl_settimeofday; + __sanitizer_syscall_pre_impl_setuid; + __sanitizer_syscall_pre_impl_setxattr; + __sanitizer_syscall_pre_impl_sgetmask; + __sanitizer_syscall_pre_impl_shmat; + __sanitizer_syscall_pre_impl_shmctl; + __sanitizer_syscall_pre_impl_shmdt; + __sanitizer_syscall_pre_impl_shmget; + __sanitizer_syscall_pre_impl_shutdown; + __sanitizer_syscall_pre_impl_sigaction; + __sanitizer_syscall_pre_impl_sigaltstack; + __sanitizer_syscall_pre_impl_signal; + __sanitizer_syscall_pre_impl_signalfd; + __sanitizer_syscall_pre_impl_signalfd4; + __sanitizer_syscall_pre_impl_sigpending; + __sanitizer_syscall_pre_impl_sigprocmask; + __sanitizer_syscall_pre_impl_socket; + __sanitizer_syscall_pre_impl_socketcall; + __sanitizer_syscall_pre_impl_socketpair; + __sanitizer_syscall_pre_impl_splice; + __sanitizer_syscall_pre_impl_spu_create; + __sanitizer_syscall_pre_impl_spu_run; + __sanitizer_syscall_pre_impl_ssetmask; + __sanitizer_syscall_pre_impl_stat; + __sanitizer_syscall_pre_impl_stat64; + __sanitizer_syscall_pre_impl_statfs; + __sanitizer_syscall_pre_impl_statfs64; + __sanitizer_syscall_pre_impl_stime; + __sanitizer_syscall_pre_impl_swapoff; + __sanitizer_syscall_pre_impl_swapon; + __sanitizer_syscall_pre_impl_symlink; + __sanitizer_syscall_pre_impl_symlinkat; + __sanitizer_syscall_pre_impl_sync; + __sanitizer_syscall_pre_impl_syncfs; + __sanitizer_syscall_pre_impl_sysctl; + __sanitizer_syscall_pre_impl_sysfs; + __sanitizer_syscall_pre_impl_sysinfo; + __sanitizer_syscall_pre_impl_syslog; + __sanitizer_syscall_pre_impl_tee; + __sanitizer_syscall_pre_impl_tgkill; + __sanitizer_syscall_pre_impl_time; + __sanitizer_syscall_pre_impl_timer_create; + __sanitizer_syscall_pre_impl_timer_delete; + __sanitizer_syscall_pre_impl_timer_getoverrun; + __sanitizer_syscall_pre_impl_timer_gettime; + __sanitizer_syscall_pre_impl_timer_settime; + __sanitizer_syscall_pre_impl_timerfd_create; + __sanitizer_syscall_pre_impl_timerfd_gettime; + __sanitizer_syscall_pre_impl_timerfd_settime; + __sanitizer_syscall_pre_impl_times; + __sanitizer_syscall_pre_impl_tkill; + __sanitizer_syscall_pre_impl_truncate; + __sanitizer_syscall_pre_impl_umask; + __sanitizer_syscall_pre_impl_umount; + __sanitizer_syscall_pre_impl_uname; + __sanitizer_syscall_pre_impl_unlink; + __sanitizer_syscall_pre_impl_unlinkat; + __sanitizer_syscall_pre_impl_unshare; + __sanitizer_syscall_pre_impl_uselib; + __sanitizer_syscall_pre_impl_ustat; + __sanitizer_syscall_pre_impl_utime; + __sanitizer_syscall_pre_impl_utimensat; + __sanitizer_syscall_pre_impl_utimes; + __sanitizer_syscall_pre_impl_vfork; + __sanitizer_syscall_pre_impl_vhangup; + __sanitizer_syscall_pre_impl_vmsplice; + __sanitizer_syscall_pre_impl_wait4; + __sanitizer_syscall_pre_impl_waitid; + __sanitizer_syscall_pre_impl_waitpid; + __sanitizer_syscall_pre_impl_write; + __sanitizer_syscall_pre_impl_writev; + __sanitizer_unaligned_load16; + __sanitizer_unaligned_load32; + __sanitizer_unaligned_load64; + __sanitizer_unaligned_store16; + __sanitizer_unaligned_store32; + __sanitizer_unaligned_store64; + __sanitizer_verify_contiguous_container; + __sanitizer_weak_hook_memcmp; + __sanitizer_weak_hook_memmem; + __sanitizer_weak_hook_strcasecmp; + __sanitizer_weak_hook_strcasestr; + __sanitizer_weak_hook_strcmp; + __sanitizer_weak_hook_strncasecmp; + __sanitizer_weak_hook_strncmp; + __sanitizer_weak_hook_strstr; + __snprintf_chk; + __sprintf_chk; + __strdup; + __strndup; + __strxfrm_l; + __tls_get_addr; + __ubsan_*; + __uflow; + __underflow; + __vsnprintf_chk; + __vsprintf_chk; + __wcsxfrm_l; + __woverflow; + __wuflow; + __wunderflow; + __xpg_strerror_r; + __xstat; + __xstat64; + _exit; + _longjmp; + _obstack_begin; + _obstack_begin_1; + _obstack_newchunk; + accept; + accept4; + aligned_alloc; + asctime; + asctime_r; + asprintf; + atoi; + atol; + atoll; + backtrace; + backtrace_symbols; + bcmp; + bsearch; + bzero; + calloc; + canonicalize_file_name; + capget; + capset; + cfree; + clock_getcpuclockid; + clock_getres; + clock_gettime; + clock_settime; + confstr; + crypt; + crypt_r; + ctermid; + ctime; + ctime_r; + dlclose; + dlopen; + drand48_r; + endgrent; + endpwent; + ether_aton; + ether_aton_r; + ether_hostton; + ether_line; + ether_ntoa; + ether_ntoa_r; + ether_ntohost; + eventfd_read; + eventfd_write; + fclose; + fdopen; + fflush; + fgetgrent; + fgetgrent_r; + fgetpwent; + fgetpwent_r; + fgets; + fgetxattr; + flistxattr; + fmemopen; + fopen; + fopen64; + fopencookie; + fprintf; + fputs; + fread; + free; + freopen; + freopen64; + frexp; + frexpf; + frexpl; + fscanf; + fstatfs; + fstatfs64; + fstatvfs; + fstatvfs64; + ftime; + fwrite; + get_current_dir_name; + getaddrinfo; + getcwd; + getdelim; + getgrent; + getgrent_r; + getgrgid; + getgrgid_r; + getgrnam; + getgrnam_r; + getgroups; + gethostbyaddr; + gethostbyaddr_r; + gethostbyname; + gethostbyname2; + gethostbyname2_r; + gethostbyname_r; + gethostent; + gethostent_r; + getifaddrs; + getitimer; + getline; + getloadavg; + getmntent; + getmntent_r; + getnameinfo; + getpass; + getpeername; + getprotobyname; + getprotobyname_r; + getprotobynumber; + getprotobynumber_r; + getprotoent; + getprotoent_r; + getpwent; + getpwent_r; + getpwnam; + getpwnam_r; + getpwuid; + getpwuid_r; + getresgid; + getresuid; + getsockname; + getsockopt; + getusershell; + getutent; + getutid; + getutline; + getutxent; + getutxid; + getutxline; + getxattr; + glob; + glob64; + gmtime; + gmtime_r; + iconv; + if_indextoname; + if_nametoindex; + index; + inet_aton; + inet_ntop; + inet_pton; + initgroups; + ioctl; + lgamma; + lgamma_r; + lgammaf; + lgammaf_r; + lgammal; + lgammal_r; + lgetxattr; + listxattr; + llistxattr; + localtime; + localtime_r; + longjmp; + lrand48_r; + mallinfo; + malloc; + malloc_stats; + malloc_usable_size; + mallopt; + mbsnrtowcs; + mbsrtowcs; + mbstowcs; + mcheck; + mcheck_pedantic; + memalign; + memchr; + memcmp; + memmem; + memmove; + memrchr; + memset; + mincore; + mktime; + mlock; + mlockall; + mmap; + mmap64; + modf; + modff; + modfl; + mprobe; + mprotect; + msgrcv; + msgsnd; + munlock; + munlockall; + name_to_handle_at; + open_by_handle_at; + open_memstream; + open_wmemstream; + opendir; + pclose; + poll; + popen; + posix_memalign; + ppoll; + prctl; + pread; + pread64; + preadv; + preadv64; + printf; + process_vm_readv; + process_vm_writev; + pthread_attr_getdetachstate; + pthread_attr_getguardsize; + pthread_attr_getinheritsched; + pthread_attr_getschedparam; + pthread_attr_getschedpolicy; + pthread_attr_getscope; + pthread_attr_getstack; + pthread_attr_getstacksize; + pthread_barrierattr_getpshared; + pthread_condattr_getclock; + pthread_condattr_getpshared; + pthread_create; + pthread_getname_np; + pthread_getschedparam; + pthread_join; + pthread_mutex_lock; + pthread_mutex_unlock; + pthread_mutexattr_getprioceiling; + pthread_mutexattr_getprotocol; + pthread_mutexattr_getpshared; + pthread_mutexattr_getrobust; + pthread_mutexattr_getrobust_np; + pthread_mutexattr_gettype; + pthread_rwlockattr_getkind_np; + pthread_rwlockattr_getpshared; + pthread_setcancelstate; + pthread_setcanceltype; + pthread_setname_np; + pthread_sigmask; + ptrace; + ptsname; + ptsname_r; + puts; + pututxline; + pvalloc; + pwrite; + pwrite64; + pwritev; + pwritev64; + qsort; + qsort_r; + rand_r; + random_r; + read; + readdir; + readdir64; + readdir64_r; + readdir_r; + readlink; + readlinkat; + readv; + realloc; + reallocarray; + recv; + recvfrom; + recvmmsg; + recvmsg; + regcomp; + regerror; + regexec; + regfree; + remquo; + remquof; + remquol; + scandir; + scandir64; + scanf; + sched_getparam; + sem_destroy; + sem_getvalue; + sem_init; + sem_post; + sem_timedwait; + sem_trywait; + sem_wait; + send; + sendmmsg; + sendmsg; + sendto; + setbuf; + setbuffer; + setgrent; + setitimer; + setlinebuf; + setlocale; + setpwent; + setvbuf; + shmctl; + sigaction; + sigaltstack; + sigandset; + sigemptyset; + sigfillset; + siglongjmp; + signal; + sigorset; + sigpending; + sigprocmask; + sigtimedwait; + sigwait; + sigwaitinfo; + sincos; + sincosf; + sincosl; + snprintf; + sprintf; + sscanf; + statfs; + statfs64; + statvfs; + statvfs64; + strcasecmp; + strcasestr; + strcat; + strchr; + strchrnul; + strcmp; + strcpy; + strcspn; + strdup; + strerror; + strerror_r; + strlen; + strncasecmp; + strncat; + strncmp; + strncpy; + strndup; + strnlen; + strpbrk; + strptime; + strrchr; + strspn; + strstr; + strtoimax; + strtok; + strtol; + strtoll; + strtoumax; + strxfrm; + strxfrm_l; + swapcontext; + sysinfo; + tcgetattr; + tempnam; + textdomain; + time; + timerfd_gettime; + timerfd_settime; + times; + tmpnam; + tmpnam_r; + tsearch; + ttyname; + ttyname_r; + uname; + valloc; + vasprintf; + vfork; + vfprintf; + vfscanf; + vprintf; + vscanf; + vsnprintf; + vsprintf; + vsscanf; + wait; + wait3; + wait4; + waitid; + waitpid; + wcrtomb; + wcscat; + wcsdup; + wcslen; + wcsncat; + wcsnlen; + wcsnrtombs; + wcsrtombs; + wcstombs; + wcsxfrm; + wcsxfrm_l; + wctomb; + wordexp; + write; + writev; + xdr_bool; + xdr_bytes; + xdr_char; + xdr_destroy; + xdr_double; + xdr_enum; + xdr_float; + xdr_hyper; + xdr_int; + xdr_int16_t; + xdr_int32_t; + xdr_int64_t; + xdr_int8_t; + xdr_long; + xdr_longlong_t; + xdr_quad_t; + xdr_short; + xdr_string; + xdr_u_char; + xdr_u_hyper; + xdr_u_int; + xdr_u_long; + xdr_u_longlong_t; + xdr_u_quad_t; + xdr_u_short; + xdr_uint16_t; + xdr_uint32_t; + xdr_uint64_t; + xdr_uint8_t; + xdrmem_create; + xdrrec_create; + xdrstdio_create; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-i386.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-i386.a new file mode 100644 index 0000000..0f86ef9 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-i386.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-x86_64.a new file mode 100644 index 0000000..c730959 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-x86_64.a.syms new file mode 100644 index 0000000..22a7cd0 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.asan_cxx-x86_64.a.syms @@ -0,0 +1,22 @@ +{ + _ZdaPv; + _ZdaPvRKSt9nothrow_t; + _ZdaPvSt11align_val_t; + _ZdaPvSt11align_val_tRKSt9nothrow_t; + _ZdaPvm; + _ZdaPvmSt11align_val_t; + _ZdlPv; + _ZdlPvRKSt9nothrow_t; + _ZdlPvSt11align_val_t; + _ZdlPvSt11align_val_tRKSt9nothrow_t; + _ZdlPvm; + _ZdlPvmSt11align_val_t; + _Znam; + _ZnamRKSt9nothrow_t; + _ZnamSt11align_val_t; + _ZnamSt11align_val_tRKSt9nothrow_t; + _Znwm; + _ZnwmRKSt9nothrow_t; + _ZnwmSt11align_val_t; + _ZnwmSt11align_val_tRKSt9nothrow_t; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.hwasan-aarch64-android.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.hwasan-aarch64-android.so new file mode 100755 index 0000000..8bb236b Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.hwasan-aarch64-android.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan-x86_64.a new file mode 100644 index 0000000..00a6abc Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan-x86_64.a.syms new file mode 100644 index 0000000..621396e --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan-x86_64.a.syms @@ -0,0 +1,1856 @@ +{ + __bzero; + __cxa_atexit; + __fprintf_chk; + __fxstat; + __fxstat64; + __fxstatat; + __fxstatat64; + __getdelim; + __getrlimit; + __interceptor___bzero; + __interceptor___cxa_atexit; + __interceptor___fprintf_chk; + __interceptor___fxstat; + __interceptor___fxstat64; + __interceptor___fxstatat; + __interceptor___fxstatat64; + __interceptor___getdelim; + __interceptor___getrlimit; + __interceptor___isoc99_fprintf; + __interceptor___isoc99_fscanf; + __interceptor___isoc99_printf; + __interceptor___isoc99_scanf; + __interceptor___isoc99_snprintf; + __interceptor___isoc99_sprintf; + __interceptor___isoc99_sscanf; + __interceptor___isoc99_vfprintf; + __interceptor___isoc99_vfscanf; + __interceptor___isoc99_vprintf; + __interceptor___isoc99_vscanf; + __interceptor___isoc99_vsnprintf; + __interceptor___isoc99_vsprintf; + __interceptor___isoc99_vsscanf; + __interceptor___libc_memalign; + __interceptor___lxstat; + __interceptor___lxstat64; + __interceptor___overflow; + __interceptor___pthread_mutex_lock; + __interceptor___pthread_mutex_unlock; + __interceptor___snprintf_chk; + __interceptor___sprintf_chk; + __interceptor___strdup; + __interceptor___strftime_l; + __interceptor___strndup; + __interceptor___strtod_internal; + __interceptor___strtod_l; + __interceptor___strtof_internal; + __interceptor___strtof_l; + __interceptor___strtol_internal; + __interceptor___strtol_l; + __interceptor___strtold_internal; + __interceptor___strtold_l; + __interceptor___strtoll_internal; + __interceptor___strtoll_l; + __interceptor___strtoul_internal; + __interceptor___strtoul_l; + __interceptor___strtoull_internal; + __interceptor___strtoull_l; + __interceptor___strtouq_internal; + __interceptor___strtouq_l; + __interceptor___strxfrm_l; + __interceptor___tls_get_addr; + __interceptor___uflow; + __interceptor___underflow; + __interceptor___vsnprintf_chk; + __interceptor___vsprintf_chk; + __interceptor___wcsftime_l; + __interceptor___wcstod_internal; + __interceptor___wcstod_l; + __interceptor___wcstof_internal; + __interceptor___wcstof_l; + __interceptor___wcstol_internal; + __interceptor___wcstol_l; + __interceptor___wcstold_internal; + __interceptor___wcstold_l; + __interceptor___wcstoll_internal; + __interceptor___wcstoll_l; + __interceptor___wcstoul_internal; + __interceptor___wcstoul_l; + __interceptor___wcstoull_internal; + __interceptor___wcstoull_l; + __interceptor___wcsxfrm_l; + __interceptor___woverflow; + __interceptor___wuflow; + __interceptor___wunderflow; + __interceptor___xpg_strerror_r; + __interceptor___xstat; + __interceptor___xstat64; + __interceptor__exit; + __interceptor__obstack_begin; + __interceptor__obstack_begin_1; + __interceptor__obstack_newchunk; + __interceptor_accept; + __interceptor_accept4; + __interceptor_aligned_alloc; + __interceptor_asctime; + __interceptor_asctime_r; + __interceptor_asprintf; + __interceptor_atexit; + __interceptor_backtrace; + __interceptor_backtrace_symbols; + __interceptor_bcmp; + __interceptor_bcopy; + __interceptor_bsearch; + __interceptor_bzero; + __interceptor_calloc; + __interceptor_canonicalize_file_name; + __interceptor_capget; + __interceptor_capset; + __interceptor_cfree; + __interceptor_clock_getcpuclockid; + __interceptor_clock_getres; + __interceptor_clock_gettime; + __interceptor_clock_settime; + __interceptor_confstr; + __interceptor_crypt; + __interceptor_crypt_r; + __interceptor_ctermid; + __interceptor_ctime; + __interceptor_ctime_r; + __interceptor_dl_iterate_phdr; + __interceptor_dladdr; + __interceptor_dlclose; + __interceptor_dlerror; + __interceptor_dlopen; + __interceptor_drand48_r; + __interceptor_endgrent; + __interceptor_endpwent; + __interceptor_epoll_pwait; + __interceptor_epoll_wait; + __interceptor_ether_aton; + __interceptor_ether_aton_r; + __interceptor_ether_hostton; + __interceptor_ether_line; + __interceptor_ether_ntoa; + __interceptor_ether_ntoa_r; + __interceptor_ether_ntohost; + __interceptor_eventfd_read; + __interceptor_eventfd_write; + __interceptor_fclose; + __interceptor_fcvt; + __interceptor_fdopen; + __interceptor_fflush; + __interceptor_fgetgrent; + __interceptor_fgetgrent_r; + __interceptor_fgetpwent; + __interceptor_fgetpwent_r; + __interceptor_fgets; + __interceptor_fgets_unlocked; + __interceptor_fgetxattr; + __interceptor_flistxattr; + __interceptor_fmemopen; + __interceptor_fopen; + __interceptor_fopen64; + __interceptor_fopencookie; + __interceptor_fork; + __interceptor_forkpty; + __interceptor_fprintf; + __interceptor_fputs; + __interceptor_fread; + __interceptor_fread_unlocked; + __interceptor_free; + __interceptor_freopen; + __interceptor_freopen64; + __interceptor_frexp; + __interceptor_frexpf; + __interceptor_frexpl; + __interceptor_fscanf; + __interceptor_fstatfs; + __interceptor_fstatfs64; + __interceptor_fstatvfs; + __interceptor_fstatvfs64; + __interceptor_ftime; + __interceptor_fwrite; + __interceptor_gcvt; + __interceptor_get_current_dir_name; + __interceptor_getaddrinfo; + __interceptor_getcwd; + __interceptor_getdelim; + __interceptor_getenv; + __interceptor_getgrent; + __interceptor_getgrent_r; + __interceptor_getgrgid; + __interceptor_getgrgid_r; + __interceptor_getgrnam; + __interceptor_getgrnam_r; + __interceptor_getgroups; + __interceptor_gethostbyaddr; + __interceptor_gethostbyaddr_r; + __interceptor_gethostbyname; + __interceptor_gethostbyname2; + __interceptor_gethostbyname2_r; + __interceptor_gethostbyname_r; + __interceptor_gethostent; + __interceptor_gethostent_r; + __interceptor_gethostname; + __interceptor_getifaddrs; + __interceptor_getitimer; + __interceptor_getline; + __interceptor_getloadavg; + __interceptor_getmntent; + __interceptor_getmntent_r; + __interceptor_getnameinfo; + __interceptor_getpass; + __interceptor_getpeername; + __interceptor_getprotobyname; + __interceptor_getprotobyname_r; + __interceptor_getprotobynumber; + __interceptor_getprotobynumber_r; + __interceptor_getprotoent; + __interceptor_getprotoent_r; + __interceptor_getpwent; + __interceptor_getpwent_r; + __interceptor_getpwnam; + __interceptor_getpwnam_r; + __interceptor_getpwuid; + __interceptor_getpwuid_r; + __interceptor_getresgid; + __interceptor_getresuid; + __interceptor_getrlimit; + __interceptor_getrlimit64; + __interceptor_getrusage; + __interceptor_getsockname; + __interceptor_getsockopt; + __interceptor_gettimeofday; + __interceptor_getusershell; + __interceptor_getutent; + __interceptor_getutid; + __interceptor_getutline; + __interceptor_getutxent; + __interceptor_getutxid; + __interceptor_getutxline; + __interceptor_getxattr; + __interceptor_glob; + __interceptor_glob64; + __interceptor_gmtime; + __interceptor_gmtime_r; + __interceptor_iconv; + __interceptor_if_indextoname; + __interceptor_if_nametoindex; + __interceptor_inet_aton; + __interceptor_inet_ntop; + __interceptor_inet_pton; + __interceptor_initgroups; + __interceptor_ioctl; + __interceptor_lgamma; + __interceptor_lgamma_r; + __interceptor_lgammaf; + __interceptor_lgammaf_r; + __interceptor_lgammal; + __interceptor_lgammal_r; + __interceptor_lgetxattr; + __interceptor_listxattr; + __interceptor_llistxattr; + __interceptor_localtime; + __interceptor_localtime_r; + __interceptor_lrand48_r; + __interceptor_mallinfo; + __interceptor_malloc; + __interceptor_malloc_stats; + __interceptor_malloc_usable_size; + __interceptor_mallopt; + __interceptor_mbrtowc; + __interceptor_mbsnrtowcs; + __interceptor_mbsrtowcs; + __interceptor_mbstowcs; + __interceptor_mbtowc; + __interceptor_mcheck; + __interceptor_mcheck_pedantic; + __interceptor_memalign; + __interceptor_memccpy; + __interceptor_memchr; + __interceptor_memcmp; + __interceptor_memcpy; + __interceptor_memmem; + __interceptor_memmove; + __interceptor_mempcpy; + __interceptor_memrchr; + __interceptor_memset; + __interceptor_mincore; + __interceptor_mktime; + __interceptor_mlock; + __interceptor_mlockall; + __interceptor_mmap; + __interceptor_mmap64; + __interceptor_modf; + __interceptor_modff; + __interceptor_modfl; + __interceptor_mprobe; + __interceptor_mprotect; + __interceptor_msgrcv; + __interceptor_msgsnd; + __interceptor_munlock; + __interceptor_munlockall; + __interceptor_name_to_handle_at; + __interceptor_open_by_handle_at; + __interceptor_open_memstream; + __interceptor_open_wmemstream; + __interceptor_opendir; + __interceptor_openpty; + __interceptor_pclose; + __interceptor_pipe; + __interceptor_pipe2; + __interceptor_poll; + __interceptor_popen; + __interceptor_posix_memalign; + __interceptor_ppoll; + __interceptor_prctl; + __interceptor_pread; + __interceptor_pread64; + __interceptor_preadv; + __interceptor_preadv64; + __interceptor_printf; + __interceptor_prlimit; + __interceptor_prlimit64; + __interceptor_process_vm_readv; + __interceptor_process_vm_writev; + __interceptor_pthread_attr_getaffinity_np; + __interceptor_pthread_attr_getdetachstate; + __interceptor_pthread_attr_getguardsize; + __interceptor_pthread_attr_getinheritsched; + __interceptor_pthread_attr_getschedparam; + __interceptor_pthread_attr_getschedpolicy; + __interceptor_pthread_attr_getscope; + __interceptor_pthread_attr_getstack; + __interceptor_pthread_attr_getstacksize; + __interceptor_pthread_barrierattr_getpshared; + __interceptor_pthread_condattr_getclock; + __interceptor_pthread_condattr_getpshared; + __interceptor_pthread_create; + __interceptor_pthread_getname_np; + __interceptor_pthread_getschedparam; + __interceptor_pthread_join; + __interceptor_pthread_key_create; + __interceptor_pthread_mutex_lock; + __interceptor_pthread_mutex_unlock; + __interceptor_pthread_mutexattr_getprioceiling; + __interceptor_pthread_mutexattr_getprotocol; + __interceptor_pthread_mutexattr_getpshared; + __interceptor_pthread_mutexattr_getrobust; + __interceptor_pthread_mutexattr_getrobust_np; + __interceptor_pthread_mutexattr_gettype; + __interceptor_pthread_rwlockattr_getkind_np; + __interceptor_pthread_rwlockattr_getpshared; + __interceptor_pthread_setcancelstate; + __interceptor_pthread_setcanceltype; + __interceptor_pthread_setname_np; + __interceptor_pthread_sigmask; + __interceptor_ptrace; + __interceptor_ptsname; + __interceptor_ptsname_r; + __interceptor_putenv; + __interceptor_puts; + __interceptor_pututxline; + __interceptor_pvalloc; + __interceptor_pwrite; + __interceptor_pwrite64; + __interceptor_pwritev; + __interceptor_pwritev64; + __interceptor_qsort; + __interceptor_qsort_r; + __interceptor_rand_r; + __interceptor_random_r; + __interceptor_read; + __interceptor_readdir; + __interceptor_readdir64; + __interceptor_readdir64_r; + __interceptor_readdir_r; + __interceptor_readlink; + __interceptor_readlinkat; + __interceptor_readv; + __interceptor_realloc; + __interceptor_reallocarray; + __interceptor_realpath; + __interceptor_recv; + __interceptor_recvfrom; + __interceptor_recvmmsg; + __interceptor_recvmsg; + __interceptor_regcomp; + __interceptor_regerror; + __interceptor_regexec; + __interceptor_regfree; + __interceptor_remquo; + __interceptor_remquof; + __interceptor_remquol; + __interceptor_scandir; + __interceptor_scandir64; + __interceptor_scanf; + __interceptor_sched_getaffinity; + __interceptor_sched_getparam; + __interceptor_sem_destroy; + __interceptor_sem_getvalue; + __interceptor_sem_init; + __interceptor_sem_post; + __interceptor_sem_timedwait; + __interceptor_sem_trywait; + __interceptor_sem_wait; + __interceptor_send; + __interceptor_sendmmsg; + __interceptor_sendmsg; + __interceptor_sendto; + __interceptor_setbuf; + __interceptor_setbuffer; + __interceptor_setenv; + __interceptor_setgrent; + __interceptor_setitimer; + __interceptor_setlinebuf; + __interceptor_setlocale; + __interceptor_setpwent; + __interceptor_setvbuf; + __interceptor_shmat; + __interceptor_shmctl; + __interceptor_sigaction; + __interceptor_sigaltstack; + __interceptor_sigandset; + __interceptor_sigemptyset; + __interceptor_sigfillset; + __interceptor_signal; + __interceptor_sigorset; + __interceptor_sigpending; + __interceptor_sigprocmask; + __interceptor_sigtimedwait; + __interceptor_sigwait; + __interceptor_sigwaitinfo; + __interceptor_sincos; + __interceptor_sincosf; + __interceptor_sincosl; + __interceptor_snprintf; + __interceptor_socketpair; + __interceptor_sprintf; + __interceptor_sscanf; + __interceptor_statfs; + __interceptor_statfs64; + __interceptor_statvfs; + __interceptor_statvfs64; + __interceptor_stpcpy; + __interceptor_strcasecmp; + __interceptor_strcasestr; + __interceptor_strcat; + __interceptor_strchr; + __interceptor_strchrnul; + __interceptor_strcmp; + __interceptor_strcpy; + __interceptor_strcspn; + __interceptor_strdup; + __interceptor_strerror; + __interceptor_strerror_r; + __interceptor_strftime; + __interceptor_strftime_l; + __interceptor_strlen; + __interceptor_strncasecmp; + __interceptor_strncat; + __interceptor_strncmp; + __interceptor_strncpy; + __interceptor_strndup; + __interceptor_strnlen; + __interceptor_strpbrk; + __interceptor_strptime; + __interceptor_strrchr; + __interceptor_strspn; + __interceptor_strstr; + __interceptor_strtod; + __interceptor_strtod_l; + __interceptor_strtof; + __interceptor_strtof_l; + __interceptor_strtoimax; + __interceptor_strtok; + __interceptor_strtol; + __interceptor_strtol_l; + __interceptor_strtold; + __interceptor_strtold_l; + __interceptor_strtoll; + __interceptor_strtoll_l; + __interceptor_strtoul; + __interceptor_strtoul_l; + __interceptor_strtoull; + __interceptor_strtoull_l; + __interceptor_strtoumax; + __interceptor_strtouq; + __interceptor_strtouq_l; + __interceptor_strxfrm; + __interceptor_strxfrm_l; + __interceptor_swprintf; + __interceptor_sysinfo; + __interceptor_tcgetattr; + __interceptor_tempnam; + __interceptor_textdomain; + __interceptor_time; + __interceptor_timerfd_gettime; + __interceptor_timerfd_settime; + __interceptor_times; + __interceptor_tmpnam; + __interceptor_tmpnam_r; + __interceptor_tsearch; + __interceptor_ttyname; + __interceptor_ttyname_r; + __interceptor_tzset; + __interceptor_uname; + __interceptor_valloc; + __interceptor_vasprintf; + __interceptor_vfprintf; + __interceptor_vfscanf; + __interceptor_vprintf; + __interceptor_vscanf; + __interceptor_vsnprintf; + __interceptor_vsprintf; + __interceptor_vsscanf; + __interceptor_vswprintf; + __interceptor_wait; + __interceptor_wait3; + __interceptor_wait4; + __interceptor_waitid; + __interceptor_waitpid; + __interceptor_wcrtomb; + __interceptor_wcscat; + __interceptor_wcschr; + __interceptor_wcscmp; + __interceptor_wcscpy; + __interceptor_wcsdup; + __interceptor_wcsftime; + __interceptor_wcsftime_l; + __interceptor_wcslen; + __interceptor_wcsncat; + __interceptor_wcsncpy; + __interceptor_wcsnlen; + __interceptor_wcsnrtombs; + __interceptor_wcsrtombs; + __interceptor_wcstod; + __interceptor_wcstod_l; + __interceptor_wcstof; + __interceptor_wcstof_l; + __interceptor_wcstol; + __interceptor_wcstol_l; + __interceptor_wcstold; + __interceptor_wcstold_l; + __interceptor_wcstoll; + __interceptor_wcstoll_l; + __interceptor_wcstombs; + __interceptor_wcstoul; + __interceptor_wcstoul_l; + __interceptor_wcstoull; + __interceptor_wcstoull_l; + __interceptor_wcsxfrm; + __interceptor_wcsxfrm_l; + __interceptor_wctomb; + __interceptor_wmemcpy; + __interceptor_wmemmove; + __interceptor_wmempcpy; + __interceptor_wmemset; + __interceptor_wordexp; + __interceptor_write; + __interceptor_writev; + __interceptor_xdr_bool; + __interceptor_xdr_bytes; + __interceptor_xdr_char; + __interceptor_xdr_destroy; + __interceptor_xdr_double; + __interceptor_xdr_enum; + __interceptor_xdr_float; + __interceptor_xdr_hyper; + __interceptor_xdr_int; + __interceptor_xdr_int16_t; + __interceptor_xdr_int32_t; + __interceptor_xdr_int64_t; + __interceptor_xdr_int8_t; + __interceptor_xdr_long; + __interceptor_xdr_longlong_t; + __interceptor_xdr_quad_t; + __interceptor_xdr_short; + __interceptor_xdr_string; + __interceptor_xdr_u_char; + __interceptor_xdr_u_hyper; + __interceptor_xdr_u_int; + __interceptor_xdr_u_long; + __interceptor_xdr_u_longlong_t; + __interceptor_xdr_u_quad_t; + __interceptor_xdr_u_short; + __interceptor_xdr_uint16_t; + __interceptor_xdr_uint32_t; + __interceptor_xdr_uint64_t; + __interceptor_xdr_uint8_t; + __interceptor_xdrmem_create; + __interceptor_xdrrec_create; + __interceptor_xdrstdio_create; + __isoc99_fprintf; + __isoc99_fscanf; + __isoc99_printf; + __isoc99_scanf; + __isoc99_snprintf; + __isoc99_sprintf; + __isoc99_sscanf; + __isoc99_vfprintf; + __isoc99_vfscanf; + __isoc99_vprintf; + __isoc99_vscanf; + __isoc99_vsnprintf; + __isoc99_vsprintf; + __isoc99_vsscanf; + __libc_memalign; + __lxstat; + __lxstat64; + __msan_*; + __overflow; + __pthread_mutex_lock; + __pthread_mutex_unlock; + __sanitizer_acquire_crash_state; + __sanitizer_cov_8bit_counters_init; + __sanitizer_cov_bool_flag_init; + __sanitizer_cov_dump; + __sanitizer_cov_pcs_init; + __sanitizer_cov_reset; + __sanitizer_cov_trace_cmp; + __sanitizer_cov_trace_cmp1; + __sanitizer_cov_trace_cmp2; + __sanitizer_cov_trace_cmp4; + __sanitizer_cov_trace_cmp8; + __sanitizer_cov_trace_const_cmp1; + __sanitizer_cov_trace_const_cmp2; + __sanitizer_cov_trace_const_cmp4; + __sanitizer_cov_trace_const_cmp8; + __sanitizer_cov_trace_div4; + __sanitizer_cov_trace_div8; + __sanitizer_cov_trace_gep; + __sanitizer_cov_trace_pc_guard; + __sanitizer_cov_trace_pc_guard_init; + __sanitizer_cov_trace_pc_indir; + __sanitizer_cov_trace_switch; + __sanitizer_dtor_callback; + __sanitizer_dump_coverage; + __sanitizer_dump_trace_pc_guard_coverage; + __sanitizer_get_allocated_size; + __sanitizer_get_current_allocated_bytes; + __sanitizer_get_estimated_allocated_size; + __sanitizer_get_free_bytes; + __sanitizer_get_heap_size; + __sanitizer_get_module_and_offset_for_pc; + __sanitizer_get_ownership; + __sanitizer_get_report_path; + __sanitizer_get_unmapped_bytes; + __sanitizer_install_malloc_and_free_hooks; + __sanitizer_on_print; + __sanitizer_print_stack_trace; + __sanitizer_report_error_summary; + __sanitizer_sandbox_on_notify; + __sanitizer_set_death_callback; + __sanitizer_set_report_fd; + __sanitizer_set_report_path; + __sanitizer_symbolize_global; + __sanitizer_symbolize_pc; + __sanitizer_syscall_post_impl_accept; + __sanitizer_syscall_post_impl_accept4; + __sanitizer_syscall_post_impl_access; + __sanitizer_syscall_post_impl_acct; + __sanitizer_syscall_post_impl_add_key; + __sanitizer_syscall_post_impl_adjtimex; + __sanitizer_syscall_post_impl_alarm; + __sanitizer_syscall_post_impl_bdflush; + __sanitizer_syscall_post_impl_bind; + __sanitizer_syscall_post_impl_brk; + __sanitizer_syscall_post_impl_capget; + __sanitizer_syscall_post_impl_capset; + __sanitizer_syscall_post_impl_chdir; + __sanitizer_syscall_post_impl_chmod; + __sanitizer_syscall_post_impl_chown; + __sanitizer_syscall_post_impl_chroot; + __sanitizer_syscall_post_impl_clock_adjtime; + __sanitizer_syscall_post_impl_clock_getres; + __sanitizer_syscall_post_impl_clock_gettime; + __sanitizer_syscall_post_impl_clock_nanosleep; + __sanitizer_syscall_post_impl_clock_settime; + __sanitizer_syscall_post_impl_close; + __sanitizer_syscall_post_impl_connect; + __sanitizer_syscall_post_impl_creat; + __sanitizer_syscall_post_impl_delete_module; + __sanitizer_syscall_post_impl_dup; + __sanitizer_syscall_post_impl_dup2; + __sanitizer_syscall_post_impl_dup3; + __sanitizer_syscall_post_impl_epoll_create; + __sanitizer_syscall_post_impl_epoll_create1; + __sanitizer_syscall_post_impl_epoll_ctl; + __sanitizer_syscall_post_impl_epoll_pwait; + __sanitizer_syscall_post_impl_epoll_pwait2; + __sanitizer_syscall_post_impl_epoll_wait; + __sanitizer_syscall_post_impl_eventfd; + __sanitizer_syscall_post_impl_eventfd2; + __sanitizer_syscall_post_impl_exit; + __sanitizer_syscall_post_impl_exit_group; + __sanitizer_syscall_post_impl_faccessat; + __sanitizer_syscall_post_impl_fchdir; + __sanitizer_syscall_post_impl_fchmod; + __sanitizer_syscall_post_impl_fchmodat; + __sanitizer_syscall_post_impl_fchown; + __sanitizer_syscall_post_impl_fchownat; + __sanitizer_syscall_post_impl_fcntl; + __sanitizer_syscall_post_impl_fcntl64; + __sanitizer_syscall_post_impl_fdatasync; + __sanitizer_syscall_post_impl_fgetxattr; + __sanitizer_syscall_post_impl_flistxattr; + __sanitizer_syscall_post_impl_flock; + __sanitizer_syscall_post_impl_fork; + __sanitizer_syscall_post_impl_fremovexattr; + __sanitizer_syscall_post_impl_fsetxattr; + __sanitizer_syscall_post_impl_fstat; + __sanitizer_syscall_post_impl_fstat64; + __sanitizer_syscall_post_impl_fstatat64; + __sanitizer_syscall_post_impl_fstatfs; + __sanitizer_syscall_post_impl_fstatfs64; + __sanitizer_syscall_post_impl_fsync; + __sanitizer_syscall_post_impl_ftruncate; + __sanitizer_syscall_post_impl_futimesat; + __sanitizer_syscall_post_impl_get_mempolicy; + __sanitizer_syscall_post_impl_get_robust_list; + __sanitizer_syscall_post_impl_getcpu; + __sanitizer_syscall_post_impl_getcwd; + __sanitizer_syscall_post_impl_getdents; + __sanitizer_syscall_post_impl_getdents64; + __sanitizer_syscall_post_impl_getegid; + __sanitizer_syscall_post_impl_geteuid; + __sanitizer_syscall_post_impl_getgid; + __sanitizer_syscall_post_impl_getgroups; + __sanitizer_syscall_post_impl_gethostname; + __sanitizer_syscall_post_impl_getitimer; + __sanitizer_syscall_post_impl_getpeername; + __sanitizer_syscall_post_impl_getpgid; + __sanitizer_syscall_post_impl_getpgrp; + __sanitizer_syscall_post_impl_getpid; + __sanitizer_syscall_post_impl_getppid; + __sanitizer_syscall_post_impl_getpriority; + __sanitizer_syscall_post_impl_getrandom; + __sanitizer_syscall_post_impl_getresgid; + __sanitizer_syscall_post_impl_getresuid; + __sanitizer_syscall_post_impl_getrlimit; + __sanitizer_syscall_post_impl_getrusage; + __sanitizer_syscall_post_impl_getsid; + __sanitizer_syscall_post_impl_getsockname; + __sanitizer_syscall_post_impl_getsockopt; + __sanitizer_syscall_post_impl_gettid; + __sanitizer_syscall_post_impl_gettimeofday; + __sanitizer_syscall_post_impl_getuid; + __sanitizer_syscall_post_impl_getxattr; + __sanitizer_syscall_post_impl_init_module; + __sanitizer_syscall_post_impl_inotify_add_watch; + __sanitizer_syscall_post_impl_inotify_init; + __sanitizer_syscall_post_impl_inotify_init1; + __sanitizer_syscall_post_impl_inotify_rm_watch; + __sanitizer_syscall_post_impl_io_cancel; + __sanitizer_syscall_post_impl_io_destroy; + __sanitizer_syscall_post_impl_io_getevents; + __sanitizer_syscall_post_impl_io_setup; + __sanitizer_syscall_post_impl_io_submit; + __sanitizer_syscall_post_impl_ioctl; + __sanitizer_syscall_post_impl_ioperm; + __sanitizer_syscall_post_impl_ioprio_get; + __sanitizer_syscall_post_impl_ioprio_set; + __sanitizer_syscall_post_impl_ipc; + __sanitizer_syscall_post_impl_kexec_load; + __sanitizer_syscall_post_impl_keyctl; + __sanitizer_syscall_post_impl_kill; + __sanitizer_syscall_post_impl_lchown; + __sanitizer_syscall_post_impl_lgetxattr; + __sanitizer_syscall_post_impl_link; + __sanitizer_syscall_post_impl_linkat; + __sanitizer_syscall_post_impl_listen; + __sanitizer_syscall_post_impl_listxattr; + __sanitizer_syscall_post_impl_llistxattr; + __sanitizer_syscall_post_impl_llseek; + __sanitizer_syscall_post_impl_lookup_dcookie; + __sanitizer_syscall_post_impl_lremovexattr; + __sanitizer_syscall_post_impl_lseek; + __sanitizer_syscall_post_impl_lsetxattr; + __sanitizer_syscall_post_impl_lstat; + __sanitizer_syscall_post_impl_lstat64; + __sanitizer_syscall_post_impl_madvise; + __sanitizer_syscall_post_impl_mbind; + __sanitizer_syscall_post_impl_migrate_pages; + __sanitizer_syscall_post_impl_mincore; + __sanitizer_syscall_post_impl_mkdir; + __sanitizer_syscall_post_impl_mkdirat; + __sanitizer_syscall_post_impl_mknod; + __sanitizer_syscall_post_impl_mknodat; + __sanitizer_syscall_post_impl_mlock; + __sanitizer_syscall_post_impl_mlockall; + __sanitizer_syscall_post_impl_mmap_pgoff; + __sanitizer_syscall_post_impl_mount; + __sanitizer_syscall_post_impl_move_pages; + __sanitizer_syscall_post_impl_mprotect; + __sanitizer_syscall_post_impl_mq_getsetattr; + __sanitizer_syscall_post_impl_mq_notify; + __sanitizer_syscall_post_impl_mq_open; + __sanitizer_syscall_post_impl_mq_timedreceive; + __sanitizer_syscall_post_impl_mq_timedsend; + __sanitizer_syscall_post_impl_mq_unlink; + __sanitizer_syscall_post_impl_mremap; + __sanitizer_syscall_post_impl_msgctl; + __sanitizer_syscall_post_impl_msgget; + __sanitizer_syscall_post_impl_msgrcv; + __sanitizer_syscall_post_impl_msgsnd; + __sanitizer_syscall_post_impl_msync; + __sanitizer_syscall_post_impl_munlock; + __sanitizer_syscall_post_impl_munlockall; + __sanitizer_syscall_post_impl_munmap; + __sanitizer_syscall_post_impl_name_to_handle_at; + __sanitizer_syscall_post_impl_nanosleep; + __sanitizer_syscall_post_impl_newfstat; + __sanitizer_syscall_post_impl_newfstatat; + __sanitizer_syscall_post_impl_newlstat; + __sanitizer_syscall_post_impl_newstat; + __sanitizer_syscall_post_impl_newuname; + __sanitizer_syscall_post_impl_ni_syscall; + __sanitizer_syscall_post_impl_nice; + __sanitizer_syscall_post_impl_old_getrlimit; + __sanitizer_syscall_post_impl_old_mmap; + __sanitizer_syscall_post_impl_old_readdir; + __sanitizer_syscall_post_impl_old_select; + __sanitizer_syscall_post_impl_oldumount; + __sanitizer_syscall_post_impl_olduname; + __sanitizer_syscall_post_impl_open; + __sanitizer_syscall_post_impl_open_by_handle_at; + __sanitizer_syscall_post_impl_openat; + __sanitizer_syscall_post_impl_pause; + __sanitizer_syscall_post_impl_pciconfig_iobase; + __sanitizer_syscall_post_impl_pciconfig_read; + __sanitizer_syscall_post_impl_pciconfig_write; + __sanitizer_syscall_post_impl_perf_event_open; + __sanitizer_syscall_post_impl_personality; + __sanitizer_syscall_post_impl_pipe; + __sanitizer_syscall_post_impl_pipe2; + __sanitizer_syscall_post_impl_pivot_root; + __sanitizer_syscall_post_impl_poll; + __sanitizer_syscall_post_impl_ppoll; + __sanitizer_syscall_post_impl_pread64; + __sanitizer_syscall_post_impl_preadv; + __sanitizer_syscall_post_impl_prlimit64; + __sanitizer_syscall_post_impl_process_vm_readv; + __sanitizer_syscall_post_impl_process_vm_writev; + __sanitizer_syscall_post_impl_pselect6; + __sanitizer_syscall_post_impl_ptrace; + __sanitizer_syscall_post_impl_pwrite64; + __sanitizer_syscall_post_impl_pwritev; + __sanitizer_syscall_post_impl_quotactl; + __sanitizer_syscall_post_impl_read; + __sanitizer_syscall_post_impl_readlink; + __sanitizer_syscall_post_impl_readlinkat; + __sanitizer_syscall_post_impl_readv; + __sanitizer_syscall_post_impl_reboot; + __sanitizer_syscall_post_impl_recv; + __sanitizer_syscall_post_impl_recvfrom; + __sanitizer_syscall_post_impl_recvmmsg; + __sanitizer_syscall_post_impl_recvmsg; + __sanitizer_syscall_post_impl_remap_file_pages; + __sanitizer_syscall_post_impl_removexattr; + __sanitizer_syscall_post_impl_rename; + __sanitizer_syscall_post_impl_renameat; + __sanitizer_syscall_post_impl_request_key; + __sanitizer_syscall_post_impl_restart_syscall; + __sanitizer_syscall_post_impl_rmdir; + __sanitizer_syscall_post_impl_rt_sigaction; + __sanitizer_syscall_post_impl_rt_sigpending; + __sanitizer_syscall_post_impl_rt_sigprocmask; + __sanitizer_syscall_post_impl_rt_sigqueueinfo; + __sanitizer_syscall_post_impl_rt_sigtimedwait; + __sanitizer_syscall_post_impl_rt_tgsigqueueinfo; + __sanitizer_syscall_post_impl_sched_get_priority_max; + __sanitizer_syscall_post_impl_sched_get_priority_min; + __sanitizer_syscall_post_impl_sched_getaffinity; + __sanitizer_syscall_post_impl_sched_getparam; + __sanitizer_syscall_post_impl_sched_getscheduler; + __sanitizer_syscall_post_impl_sched_rr_get_interval; + __sanitizer_syscall_post_impl_sched_setaffinity; + __sanitizer_syscall_post_impl_sched_setparam; + __sanitizer_syscall_post_impl_sched_setscheduler; + __sanitizer_syscall_post_impl_sched_yield; + __sanitizer_syscall_post_impl_select; + __sanitizer_syscall_post_impl_semctl; + __sanitizer_syscall_post_impl_semget; + __sanitizer_syscall_post_impl_semop; + __sanitizer_syscall_post_impl_semtimedop; + __sanitizer_syscall_post_impl_send; + __sanitizer_syscall_post_impl_sendfile; + __sanitizer_syscall_post_impl_sendfile64; + __sanitizer_syscall_post_impl_sendmmsg; + __sanitizer_syscall_post_impl_sendmsg; + __sanitizer_syscall_post_impl_sendto; + __sanitizer_syscall_post_impl_set_mempolicy; + __sanitizer_syscall_post_impl_set_robust_list; + __sanitizer_syscall_post_impl_set_tid_address; + __sanitizer_syscall_post_impl_setdomainname; + __sanitizer_syscall_post_impl_setfsgid; + __sanitizer_syscall_post_impl_setfsuid; + __sanitizer_syscall_post_impl_setgid; + __sanitizer_syscall_post_impl_setgroups; + __sanitizer_syscall_post_impl_sethostname; + __sanitizer_syscall_post_impl_setitimer; + __sanitizer_syscall_post_impl_setns; + __sanitizer_syscall_post_impl_setpgid; + __sanitizer_syscall_post_impl_setpriority; + __sanitizer_syscall_post_impl_setregid; + __sanitizer_syscall_post_impl_setresgid; + __sanitizer_syscall_post_impl_setresuid; + __sanitizer_syscall_post_impl_setreuid; + __sanitizer_syscall_post_impl_setrlimit; + __sanitizer_syscall_post_impl_setsid; + __sanitizer_syscall_post_impl_setsockopt; + __sanitizer_syscall_post_impl_settimeofday; + __sanitizer_syscall_post_impl_setuid; + __sanitizer_syscall_post_impl_setxattr; + __sanitizer_syscall_post_impl_sgetmask; + __sanitizer_syscall_post_impl_shmat; + __sanitizer_syscall_post_impl_shmctl; + __sanitizer_syscall_post_impl_shmdt; + __sanitizer_syscall_post_impl_shmget; + __sanitizer_syscall_post_impl_shutdown; + __sanitizer_syscall_post_impl_sigaction; + __sanitizer_syscall_post_impl_sigaltstack; + __sanitizer_syscall_post_impl_signal; + __sanitizer_syscall_post_impl_signalfd; + __sanitizer_syscall_post_impl_signalfd4; + __sanitizer_syscall_post_impl_sigpending; + __sanitizer_syscall_post_impl_sigprocmask; + __sanitizer_syscall_post_impl_socket; + __sanitizer_syscall_post_impl_socketcall; + __sanitizer_syscall_post_impl_socketpair; + __sanitizer_syscall_post_impl_splice; + __sanitizer_syscall_post_impl_spu_create; + __sanitizer_syscall_post_impl_spu_run; + __sanitizer_syscall_post_impl_ssetmask; + __sanitizer_syscall_post_impl_stat; + __sanitizer_syscall_post_impl_stat64; + __sanitizer_syscall_post_impl_statfs; + __sanitizer_syscall_post_impl_statfs64; + __sanitizer_syscall_post_impl_stime; + __sanitizer_syscall_post_impl_swapoff; + __sanitizer_syscall_post_impl_swapon; + __sanitizer_syscall_post_impl_symlink; + __sanitizer_syscall_post_impl_symlinkat; + __sanitizer_syscall_post_impl_sync; + __sanitizer_syscall_post_impl_syncfs; + __sanitizer_syscall_post_impl_sysctl; + __sanitizer_syscall_post_impl_sysfs; + __sanitizer_syscall_post_impl_sysinfo; + __sanitizer_syscall_post_impl_syslog; + __sanitizer_syscall_post_impl_tee; + __sanitizer_syscall_post_impl_tgkill; + __sanitizer_syscall_post_impl_time; + __sanitizer_syscall_post_impl_timer_create; + __sanitizer_syscall_post_impl_timer_delete; + __sanitizer_syscall_post_impl_timer_getoverrun; + __sanitizer_syscall_post_impl_timer_gettime; + __sanitizer_syscall_post_impl_timer_settime; + __sanitizer_syscall_post_impl_timerfd_create; + __sanitizer_syscall_post_impl_timerfd_gettime; + __sanitizer_syscall_post_impl_timerfd_settime; + __sanitizer_syscall_post_impl_times; + __sanitizer_syscall_post_impl_tkill; + __sanitizer_syscall_post_impl_truncate; + __sanitizer_syscall_post_impl_umask; + __sanitizer_syscall_post_impl_umount; + __sanitizer_syscall_post_impl_uname; + __sanitizer_syscall_post_impl_unlink; + __sanitizer_syscall_post_impl_unlinkat; + __sanitizer_syscall_post_impl_unshare; + __sanitizer_syscall_post_impl_uselib; + __sanitizer_syscall_post_impl_ustat; + __sanitizer_syscall_post_impl_utime; + __sanitizer_syscall_post_impl_utimensat; + __sanitizer_syscall_post_impl_utimes; + __sanitizer_syscall_post_impl_vfork; + __sanitizer_syscall_post_impl_vhangup; + __sanitizer_syscall_post_impl_vmsplice; + __sanitizer_syscall_post_impl_wait4; + __sanitizer_syscall_post_impl_waitid; + __sanitizer_syscall_post_impl_waitpid; + __sanitizer_syscall_post_impl_write; + __sanitizer_syscall_post_impl_writev; + __sanitizer_syscall_pre_impl_accept; + __sanitizer_syscall_pre_impl_accept4; + __sanitizer_syscall_pre_impl_access; + __sanitizer_syscall_pre_impl_acct; + __sanitizer_syscall_pre_impl_add_key; + __sanitizer_syscall_pre_impl_adjtimex; + __sanitizer_syscall_pre_impl_alarm; + __sanitizer_syscall_pre_impl_bdflush; + __sanitizer_syscall_pre_impl_bind; + __sanitizer_syscall_pre_impl_brk; + __sanitizer_syscall_pre_impl_capget; + __sanitizer_syscall_pre_impl_capset; + __sanitizer_syscall_pre_impl_chdir; + __sanitizer_syscall_pre_impl_chmod; + __sanitizer_syscall_pre_impl_chown; + __sanitizer_syscall_pre_impl_chroot; + __sanitizer_syscall_pre_impl_clock_adjtime; + __sanitizer_syscall_pre_impl_clock_getres; + __sanitizer_syscall_pre_impl_clock_gettime; + __sanitizer_syscall_pre_impl_clock_nanosleep; + __sanitizer_syscall_pre_impl_clock_settime; + __sanitizer_syscall_pre_impl_close; + __sanitizer_syscall_pre_impl_connect; + __sanitizer_syscall_pre_impl_creat; + __sanitizer_syscall_pre_impl_delete_module; + __sanitizer_syscall_pre_impl_dup; + __sanitizer_syscall_pre_impl_dup2; + __sanitizer_syscall_pre_impl_dup3; + __sanitizer_syscall_pre_impl_epoll_create; + __sanitizer_syscall_pre_impl_epoll_create1; + __sanitizer_syscall_pre_impl_epoll_ctl; + __sanitizer_syscall_pre_impl_epoll_pwait; + __sanitizer_syscall_pre_impl_epoll_pwait2; + __sanitizer_syscall_pre_impl_epoll_wait; + __sanitizer_syscall_pre_impl_eventfd; + __sanitizer_syscall_pre_impl_eventfd2; + __sanitizer_syscall_pre_impl_exit; + __sanitizer_syscall_pre_impl_exit_group; + __sanitizer_syscall_pre_impl_faccessat; + __sanitizer_syscall_pre_impl_fchdir; + __sanitizer_syscall_pre_impl_fchmod; + __sanitizer_syscall_pre_impl_fchmodat; + __sanitizer_syscall_pre_impl_fchown; + __sanitizer_syscall_pre_impl_fchownat; + __sanitizer_syscall_pre_impl_fcntl; + __sanitizer_syscall_pre_impl_fcntl64; + __sanitizer_syscall_pre_impl_fdatasync; + __sanitizer_syscall_pre_impl_fgetxattr; + __sanitizer_syscall_pre_impl_flistxattr; + __sanitizer_syscall_pre_impl_flock; + __sanitizer_syscall_pre_impl_fork; + __sanitizer_syscall_pre_impl_fremovexattr; + __sanitizer_syscall_pre_impl_fsetxattr; + __sanitizer_syscall_pre_impl_fstat; + __sanitizer_syscall_pre_impl_fstat64; + __sanitizer_syscall_pre_impl_fstatat64; + __sanitizer_syscall_pre_impl_fstatfs; + __sanitizer_syscall_pre_impl_fstatfs64; + __sanitizer_syscall_pre_impl_fsync; + __sanitizer_syscall_pre_impl_ftruncate; + __sanitizer_syscall_pre_impl_futimesat; + __sanitizer_syscall_pre_impl_get_mempolicy; + __sanitizer_syscall_pre_impl_get_robust_list; + __sanitizer_syscall_pre_impl_getcpu; + __sanitizer_syscall_pre_impl_getcwd; + __sanitizer_syscall_pre_impl_getdents; + __sanitizer_syscall_pre_impl_getdents64; + __sanitizer_syscall_pre_impl_getegid; + __sanitizer_syscall_pre_impl_geteuid; + __sanitizer_syscall_pre_impl_getgid; + __sanitizer_syscall_pre_impl_getgroups; + __sanitizer_syscall_pre_impl_gethostname; + __sanitizer_syscall_pre_impl_getitimer; + __sanitizer_syscall_pre_impl_getpeername; + __sanitizer_syscall_pre_impl_getpgid; + __sanitizer_syscall_pre_impl_getpgrp; + __sanitizer_syscall_pre_impl_getpid; + __sanitizer_syscall_pre_impl_getppid; + __sanitizer_syscall_pre_impl_getpriority; + __sanitizer_syscall_pre_impl_getrandom; + __sanitizer_syscall_pre_impl_getresgid; + __sanitizer_syscall_pre_impl_getresuid; + __sanitizer_syscall_pre_impl_getrlimit; + __sanitizer_syscall_pre_impl_getrusage; + __sanitizer_syscall_pre_impl_getsid; + __sanitizer_syscall_pre_impl_getsockname; + __sanitizer_syscall_pre_impl_getsockopt; + __sanitizer_syscall_pre_impl_gettid; + __sanitizer_syscall_pre_impl_gettimeofday; + __sanitizer_syscall_pre_impl_getuid; + __sanitizer_syscall_pre_impl_getxattr; + __sanitizer_syscall_pre_impl_init_module; + __sanitizer_syscall_pre_impl_inotify_add_watch; + __sanitizer_syscall_pre_impl_inotify_init; + __sanitizer_syscall_pre_impl_inotify_init1; + __sanitizer_syscall_pre_impl_inotify_rm_watch; + __sanitizer_syscall_pre_impl_io_cancel; + __sanitizer_syscall_pre_impl_io_destroy; + __sanitizer_syscall_pre_impl_io_getevents; + __sanitizer_syscall_pre_impl_io_setup; + __sanitizer_syscall_pre_impl_io_submit; + __sanitizer_syscall_pre_impl_ioctl; + __sanitizer_syscall_pre_impl_ioperm; + __sanitizer_syscall_pre_impl_ioprio_get; + __sanitizer_syscall_pre_impl_ioprio_set; + __sanitizer_syscall_pre_impl_ipc; + __sanitizer_syscall_pre_impl_kexec_load; + __sanitizer_syscall_pre_impl_keyctl; + __sanitizer_syscall_pre_impl_kill; + __sanitizer_syscall_pre_impl_lchown; + __sanitizer_syscall_pre_impl_lgetxattr; + __sanitizer_syscall_pre_impl_link; + __sanitizer_syscall_pre_impl_linkat; + __sanitizer_syscall_pre_impl_listen; + __sanitizer_syscall_pre_impl_listxattr; + __sanitizer_syscall_pre_impl_llistxattr; + __sanitizer_syscall_pre_impl_llseek; + __sanitizer_syscall_pre_impl_lookup_dcookie; + __sanitizer_syscall_pre_impl_lremovexattr; + __sanitizer_syscall_pre_impl_lseek; + __sanitizer_syscall_pre_impl_lsetxattr; + __sanitizer_syscall_pre_impl_lstat; + __sanitizer_syscall_pre_impl_lstat64; + __sanitizer_syscall_pre_impl_madvise; + __sanitizer_syscall_pre_impl_mbind; + __sanitizer_syscall_pre_impl_migrate_pages; + __sanitizer_syscall_pre_impl_mincore; + __sanitizer_syscall_pre_impl_mkdir; + __sanitizer_syscall_pre_impl_mkdirat; + __sanitizer_syscall_pre_impl_mknod; + __sanitizer_syscall_pre_impl_mknodat; + __sanitizer_syscall_pre_impl_mlock; + __sanitizer_syscall_pre_impl_mlockall; + __sanitizer_syscall_pre_impl_mmap_pgoff; + __sanitizer_syscall_pre_impl_mount; + __sanitizer_syscall_pre_impl_move_pages; + __sanitizer_syscall_pre_impl_mprotect; + __sanitizer_syscall_pre_impl_mq_getsetattr; + __sanitizer_syscall_pre_impl_mq_notify; + __sanitizer_syscall_pre_impl_mq_open; + __sanitizer_syscall_pre_impl_mq_timedreceive; + __sanitizer_syscall_pre_impl_mq_timedsend; + __sanitizer_syscall_pre_impl_mq_unlink; + __sanitizer_syscall_pre_impl_mremap; + __sanitizer_syscall_pre_impl_msgctl; + __sanitizer_syscall_pre_impl_msgget; + __sanitizer_syscall_pre_impl_msgrcv; + __sanitizer_syscall_pre_impl_msgsnd; + __sanitizer_syscall_pre_impl_msync; + __sanitizer_syscall_pre_impl_munlock; + __sanitizer_syscall_pre_impl_munlockall; + __sanitizer_syscall_pre_impl_munmap; + __sanitizer_syscall_pre_impl_name_to_handle_at; + __sanitizer_syscall_pre_impl_nanosleep; + __sanitizer_syscall_pre_impl_newfstat; + __sanitizer_syscall_pre_impl_newfstatat; + __sanitizer_syscall_pre_impl_newlstat; + __sanitizer_syscall_pre_impl_newstat; + __sanitizer_syscall_pre_impl_newuname; + __sanitizer_syscall_pre_impl_ni_syscall; + __sanitizer_syscall_pre_impl_nice; + __sanitizer_syscall_pre_impl_old_getrlimit; + __sanitizer_syscall_pre_impl_old_mmap; + __sanitizer_syscall_pre_impl_old_readdir; + __sanitizer_syscall_pre_impl_old_select; + __sanitizer_syscall_pre_impl_oldumount; + __sanitizer_syscall_pre_impl_olduname; + __sanitizer_syscall_pre_impl_open; + __sanitizer_syscall_pre_impl_open_by_handle_at; + __sanitizer_syscall_pre_impl_openat; + __sanitizer_syscall_pre_impl_pause; + __sanitizer_syscall_pre_impl_pciconfig_iobase; + __sanitizer_syscall_pre_impl_pciconfig_read; + __sanitizer_syscall_pre_impl_pciconfig_write; + __sanitizer_syscall_pre_impl_perf_event_open; + __sanitizer_syscall_pre_impl_personality; + __sanitizer_syscall_pre_impl_pipe; + __sanitizer_syscall_pre_impl_pipe2; + __sanitizer_syscall_pre_impl_pivot_root; + __sanitizer_syscall_pre_impl_poll; + __sanitizer_syscall_pre_impl_ppoll; + __sanitizer_syscall_pre_impl_pread64; + __sanitizer_syscall_pre_impl_preadv; + __sanitizer_syscall_pre_impl_prlimit64; + __sanitizer_syscall_pre_impl_process_vm_readv; + __sanitizer_syscall_pre_impl_process_vm_writev; + __sanitizer_syscall_pre_impl_pselect6; + __sanitizer_syscall_pre_impl_ptrace; + __sanitizer_syscall_pre_impl_pwrite64; + __sanitizer_syscall_pre_impl_pwritev; + __sanitizer_syscall_pre_impl_quotactl; + __sanitizer_syscall_pre_impl_read; + __sanitizer_syscall_pre_impl_readlink; + __sanitizer_syscall_pre_impl_readlinkat; + __sanitizer_syscall_pre_impl_readv; + __sanitizer_syscall_pre_impl_reboot; + __sanitizer_syscall_pre_impl_recv; + __sanitizer_syscall_pre_impl_recvfrom; + __sanitizer_syscall_pre_impl_recvmmsg; + __sanitizer_syscall_pre_impl_recvmsg; + __sanitizer_syscall_pre_impl_remap_file_pages; + __sanitizer_syscall_pre_impl_removexattr; + __sanitizer_syscall_pre_impl_rename; + __sanitizer_syscall_pre_impl_renameat; + __sanitizer_syscall_pre_impl_request_key; + __sanitizer_syscall_pre_impl_restart_syscall; + __sanitizer_syscall_pre_impl_rmdir; + __sanitizer_syscall_pre_impl_rt_sigaction; + __sanitizer_syscall_pre_impl_rt_sigpending; + __sanitizer_syscall_pre_impl_rt_sigprocmask; + __sanitizer_syscall_pre_impl_rt_sigqueueinfo; + __sanitizer_syscall_pre_impl_rt_sigtimedwait; + __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo; + __sanitizer_syscall_pre_impl_sched_get_priority_max; + __sanitizer_syscall_pre_impl_sched_get_priority_min; + __sanitizer_syscall_pre_impl_sched_getaffinity; + __sanitizer_syscall_pre_impl_sched_getparam; + __sanitizer_syscall_pre_impl_sched_getscheduler; + __sanitizer_syscall_pre_impl_sched_rr_get_interval; + __sanitizer_syscall_pre_impl_sched_setaffinity; + __sanitizer_syscall_pre_impl_sched_setparam; + __sanitizer_syscall_pre_impl_sched_setscheduler; + __sanitizer_syscall_pre_impl_sched_yield; + __sanitizer_syscall_pre_impl_select; + __sanitizer_syscall_pre_impl_semctl; + __sanitizer_syscall_pre_impl_semget; + __sanitizer_syscall_pre_impl_semop; + __sanitizer_syscall_pre_impl_semtimedop; + __sanitizer_syscall_pre_impl_send; + __sanitizer_syscall_pre_impl_sendfile; + __sanitizer_syscall_pre_impl_sendfile64; + __sanitizer_syscall_pre_impl_sendmmsg; + __sanitizer_syscall_pre_impl_sendmsg; + __sanitizer_syscall_pre_impl_sendto; + __sanitizer_syscall_pre_impl_set_mempolicy; + __sanitizer_syscall_pre_impl_set_robust_list; + __sanitizer_syscall_pre_impl_set_tid_address; + __sanitizer_syscall_pre_impl_setdomainname; + __sanitizer_syscall_pre_impl_setfsgid; + __sanitizer_syscall_pre_impl_setfsuid; + __sanitizer_syscall_pre_impl_setgid; + __sanitizer_syscall_pre_impl_setgroups; + __sanitizer_syscall_pre_impl_sethostname; + __sanitizer_syscall_pre_impl_setitimer; + __sanitizer_syscall_pre_impl_setns; + __sanitizer_syscall_pre_impl_setpgid; + __sanitizer_syscall_pre_impl_setpriority; + __sanitizer_syscall_pre_impl_setregid; + __sanitizer_syscall_pre_impl_setresgid; + __sanitizer_syscall_pre_impl_setresuid; + __sanitizer_syscall_pre_impl_setreuid; + __sanitizer_syscall_pre_impl_setrlimit; + __sanitizer_syscall_pre_impl_setsid; + __sanitizer_syscall_pre_impl_setsockopt; + __sanitizer_syscall_pre_impl_settimeofday; + __sanitizer_syscall_pre_impl_setuid; + __sanitizer_syscall_pre_impl_setxattr; + __sanitizer_syscall_pre_impl_sgetmask; + __sanitizer_syscall_pre_impl_shmat; + __sanitizer_syscall_pre_impl_shmctl; + __sanitizer_syscall_pre_impl_shmdt; + __sanitizer_syscall_pre_impl_shmget; + __sanitizer_syscall_pre_impl_shutdown; + __sanitizer_syscall_pre_impl_sigaction; + __sanitizer_syscall_pre_impl_sigaltstack; + __sanitizer_syscall_pre_impl_signal; + __sanitizer_syscall_pre_impl_signalfd; + __sanitizer_syscall_pre_impl_signalfd4; + __sanitizer_syscall_pre_impl_sigpending; + __sanitizer_syscall_pre_impl_sigprocmask; + __sanitizer_syscall_pre_impl_socket; + __sanitizer_syscall_pre_impl_socketcall; + __sanitizer_syscall_pre_impl_socketpair; + __sanitizer_syscall_pre_impl_splice; + __sanitizer_syscall_pre_impl_spu_create; + __sanitizer_syscall_pre_impl_spu_run; + __sanitizer_syscall_pre_impl_ssetmask; + __sanitizer_syscall_pre_impl_stat; + __sanitizer_syscall_pre_impl_stat64; + __sanitizer_syscall_pre_impl_statfs; + __sanitizer_syscall_pre_impl_statfs64; + __sanitizer_syscall_pre_impl_stime; + __sanitizer_syscall_pre_impl_swapoff; + __sanitizer_syscall_pre_impl_swapon; + __sanitizer_syscall_pre_impl_symlink; + __sanitizer_syscall_pre_impl_symlinkat; + __sanitizer_syscall_pre_impl_sync; + __sanitizer_syscall_pre_impl_syncfs; + __sanitizer_syscall_pre_impl_sysctl; + __sanitizer_syscall_pre_impl_sysfs; + __sanitizer_syscall_pre_impl_sysinfo; + __sanitizer_syscall_pre_impl_syslog; + __sanitizer_syscall_pre_impl_tee; + __sanitizer_syscall_pre_impl_tgkill; + __sanitizer_syscall_pre_impl_time; + __sanitizer_syscall_pre_impl_timer_create; + __sanitizer_syscall_pre_impl_timer_delete; + __sanitizer_syscall_pre_impl_timer_getoverrun; + __sanitizer_syscall_pre_impl_timer_gettime; + __sanitizer_syscall_pre_impl_timer_settime; + __sanitizer_syscall_pre_impl_timerfd_create; + __sanitizer_syscall_pre_impl_timerfd_gettime; + __sanitizer_syscall_pre_impl_timerfd_settime; + __sanitizer_syscall_pre_impl_times; + __sanitizer_syscall_pre_impl_tkill; + __sanitizer_syscall_pre_impl_truncate; + __sanitizer_syscall_pre_impl_umask; + __sanitizer_syscall_pre_impl_umount; + __sanitizer_syscall_pre_impl_uname; + __sanitizer_syscall_pre_impl_unlink; + __sanitizer_syscall_pre_impl_unlinkat; + __sanitizer_syscall_pre_impl_unshare; + __sanitizer_syscall_pre_impl_uselib; + __sanitizer_syscall_pre_impl_ustat; + __sanitizer_syscall_pre_impl_utime; + __sanitizer_syscall_pre_impl_utimensat; + __sanitizer_syscall_pre_impl_utimes; + __sanitizer_syscall_pre_impl_vfork; + __sanitizer_syscall_pre_impl_vhangup; + __sanitizer_syscall_pre_impl_vmsplice; + __sanitizer_syscall_pre_impl_wait4; + __sanitizer_syscall_pre_impl_waitid; + __sanitizer_syscall_pre_impl_waitpid; + __sanitizer_syscall_pre_impl_write; + __sanitizer_syscall_pre_impl_writev; + __sanitizer_unaligned_load16; + __sanitizer_unaligned_load32; + __sanitizer_unaligned_load64; + __sanitizer_unaligned_store16; + __sanitizer_unaligned_store32; + __sanitizer_unaligned_store64; + __sanitizer_weak_hook_memcmp; + __sanitizer_weak_hook_memmem; + __sanitizer_weak_hook_strcasecmp; + __sanitizer_weak_hook_strcasestr; + __sanitizer_weak_hook_strcmp; + __sanitizer_weak_hook_strncasecmp; + __sanitizer_weak_hook_strncmp; + __sanitizer_weak_hook_strstr; + __snprintf_chk; + __sprintf_chk; + __strdup; + __strftime_l; + __strndup; + __strtod_internal; + __strtod_l; + __strtof_internal; + __strtof_l; + __strtol_internal; + __strtol_l; + __strtold_internal; + __strtold_l; + __strtoll_internal; + __strtoll_l; + __strtoul_internal; + __strtoul_l; + __strtoull_internal; + __strtoull_l; + __strtouq_internal; + __strtouq_l; + __strxfrm_l; + __tls_get_addr; + __ubsan_*; + __uflow; + __underflow; + __vsnprintf_chk; + __vsprintf_chk; + __wcsftime_l; + __wcstod_internal; + __wcstod_l; + __wcstof_internal; + __wcstof_l; + __wcstol_internal; + __wcstol_l; + __wcstold_internal; + __wcstold_l; + __wcstoll_internal; + __wcstoll_l; + __wcstoul_internal; + __wcstoul_l; + __wcstoull_internal; + __wcstoull_l; + __wcsxfrm_l; + __woverflow; + __wuflow; + __wunderflow; + __xpg_strerror_r; + __xstat; + __xstat64; + _exit; + _obstack_begin; + _obstack_begin_1; + _obstack_newchunk; + accept; + accept4; + aligned_alloc; + asctime; + asctime_r; + asprintf; + atexit; + backtrace; + backtrace_symbols; + bcmp; + bcopy; + bsearch; + bzero; + calloc; + canonicalize_file_name; + capget; + capset; + cfree; + clock_getcpuclockid; + clock_getres; + clock_gettime; + clock_settime; + confstr; + crypt; + crypt_r; + ctermid; + ctime; + ctime_r; + dl_iterate_phdr; + dladdr; + dlclose; + dlerror; + dlopen; + drand48_r; + endgrent; + endpwent; + epoll_pwait; + epoll_wait; + ether_aton; + ether_aton_r; + ether_hostton; + ether_line; + ether_ntoa; + ether_ntoa_r; + ether_ntohost; + eventfd_read; + eventfd_write; + fclose; + fcvt; + fdopen; + fflush; + fgetgrent; + fgetgrent_r; + fgetpwent; + fgetpwent_r; + fgets; + fgets_unlocked; + fgetxattr; + flistxattr; + fmemopen; + fopen; + fopen64; + fopencookie; + fork; + forkpty; + fprintf; + fputs; + fread; + fread_unlocked; + free; + freopen; + freopen64; + frexp; + frexpf; + frexpl; + fscanf; + fstatfs; + fstatfs64; + fstatvfs; + fstatvfs64; + ftime; + fwrite; + gcvt; + get_current_dir_name; + getaddrinfo; + getcwd; + getdelim; + getenv; + getgrent; + getgrent_r; + getgrgid; + getgrgid_r; + getgrnam; + getgrnam_r; + getgroups; + gethostbyaddr; + gethostbyaddr_r; + gethostbyname; + gethostbyname2; + gethostbyname2_r; + gethostbyname_r; + gethostent; + gethostent_r; + gethostname; + getifaddrs; + getitimer; + getline; + getloadavg; + getmntent; + getmntent_r; + getnameinfo; + getpass; + getpeername; + getprotobyname; + getprotobyname_r; + getprotobynumber; + getprotobynumber_r; + getprotoent; + getprotoent_r; + getpwent; + getpwent_r; + getpwnam; + getpwnam_r; + getpwuid; + getpwuid_r; + getresgid; + getresuid; + getrlimit; + getrlimit64; + getrusage; + getsockname; + getsockopt; + gettimeofday; + getusershell; + getutent; + getutid; + getutline; + getutxent; + getutxid; + getutxline; + getxattr; + glob; + glob64; + gmtime; + gmtime_r; + iconv; + if_indextoname; + if_nametoindex; + inet_aton; + inet_ntop; + inet_pton; + initgroups; + ioctl; + lgamma; + lgamma_r; + lgammaf; + lgammaf_r; + lgammal; + lgammal_r; + lgetxattr; + listxattr; + llistxattr; + localtime; + localtime_r; + lrand48_r; + mallinfo; + malloc; + malloc_stats; + malloc_usable_size; + mallopt; + mbrtowc; + mbsnrtowcs; + mbsrtowcs; + mbstowcs; + mbtowc; + mcheck; + mcheck_pedantic; + memalign; + memccpy; + memchr; + memcmp; + memmem; + memmove; + mempcpy; + memrchr; + memset; + mincore; + mktime; + mlock; + mlockall; + mmap; + mmap64; + modf; + modff; + modfl; + mprobe; + mprotect; + msgrcv; + msgsnd; + munlock; + munlockall; + name_to_handle_at; + open_by_handle_at; + open_memstream; + open_wmemstream; + opendir; + openpty; + pclose; + pipe; + pipe2; + poll; + popen; + posix_memalign; + ppoll; + prctl; + pread; + pread64; + preadv; + preadv64; + printf; + prlimit; + prlimit64; + process_vm_readv; + process_vm_writev; + pthread_attr_getdetachstate; + pthread_attr_getguardsize; + pthread_attr_getinheritsched; + pthread_attr_getschedparam; + pthread_attr_getschedpolicy; + pthread_attr_getscope; + pthread_attr_getstack; + pthread_attr_getstacksize; + pthread_barrierattr_getpshared; + pthread_condattr_getclock; + pthread_condattr_getpshared; + pthread_create; + pthread_getname_np; + pthread_getschedparam; + pthread_join; + pthread_key_create; + pthread_mutex_lock; + pthread_mutex_unlock; + pthread_mutexattr_getprioceiling; + pthread_mutexattr_getprotocol; + pthread_mutexattr_getpshared; + pthread_mutexattr_getrobust; + pthread_mutexattr_getrobust_np; + pthread_mutexattr_gettype; + pthread_rwlockattr_getkind_np; + pthread_rwlockattr_getpshared; + pthread_setcancelstate; + pthread_setcanceltype; + pthread_setname_np; + pthread_sigmask; + ptrace; + ptsname; + ptsname_r; + putenv; + puts; + pututxline; + pvalloc; + pwrite; + pwrite64; + pwritev; + pwritev64; + qsort; + qsort_r; + rand_r; + random_r; + read; + readdir; + readdir64; + readdir64_r; + readdir_r; + readlink; + readlinkat; + readv; + realloc; + reallocarray; + recv; + recvfrom; + recvmmsg; + recvmsg; + regcomp; + regerror; + regexec; + regfree; + remquo; + remquof; + remquol; + scandir; + scandir64; + scanf; + sched_getparam; + sem_destroy; + sem_getvalue; + sem_init; + sem_post; + sem_timedwait; + sem_trywait; + sem_wait; + send; + sendmmsg; + sendmsg; + sendto; + setbuf; + setbuffer; + setenv; + setgrent; + setitimer; + setlinebuf; + setlocale; + setpwent; + setvbuf; + shmat; + shmctl; + sigaction; + sigaltstack; + sigandset; + sigemptyset; + sigfillset; + signal; + sigorset; + sigpending; + sigprocmask; + sigtimedwait; + sigwait; + sigwaitinfo; + sincos; + sincosf; + sincosl; + snprintf; + socketpair; + sprintf; + sscanf; + statfs; + statfs64; + statvfs; + statvfs64; + stpcpy; + strcasecmp; + strcasestr; + strcat; + strchr; + strchrnul; + strcmp; + strcpy; + strcspn; + strdup; + strerror; + strerror_r; + strftime; + strftime_l; + strlen; + strncasecmp; + strncat; + strncmp; + strncpy; + strndup; + strnlen; + strpbrk; + strptime; + strrchr; + strspn; + strstr; + strtod; + strtod_l; + strtof; + strtof_l; + strtoimax; + strtok; + strtol; + strtol_l; + strtold; + strtold_l; + strtoll; + strtoll_l; + strtoul; + strtoul_l; + strtoull; + strtoull_l; + strtoumax; + strtouq; + strtouq_l; + strxfrm; + strxfrm_l; + swprintf; + sysinfo; + tcgetattr; + tempnam; + textdomain; + time; + timerfd_gettime; + timerfd_settime; + times; + tmpnam; + tmpnam_r; + tsearch; + ttyname; + ttyname_r; + tzset; + uname; + valloc; + vasprintf; + vfprintf; + vfscanf; + vprintf; + vscanf; + vsnprintf; + vsprintf; + vsscanf; + vswprintf; + wait; + wait3; + wait4; + waitid; + waitpid; + wcrtomb; + wcscat; + wcschr; + wcscmp; + wcscpy; + wcsdup; + wcsftime; + wcsftime_l; + wcslen; + wcsncat; + wcsncpy; + wcsnlen; + wcsnrtombs; + wcsrtombs; + wcstod; + wcstod_l; + wcstof; + wcstof_l; + wcstol; + wcstol_l; + wcstold; + wcstold_l; + wcstoll; + wcstoll_l; + wcstombs; + wcstoul; + wcstoul_l; + wcstoull; + wcstoull_l; + wcsxfrm; + wcsxfrm_l; + wctomb; + wmemcpy; + wmemmove; + wmempcpy; + wmemset; + wordexp; + write; + writev; + xdr_bool; + xdr_bytes; + xdr_char; + xdr_destroy; + xdr_double; + xdr_enum; + xdr_float; + xdr_hyper; + xdr_int; + xdr_int16_t; + xdr_int32_t; + xdr_int64_t; + xdr_int8_t; + xdr_long; + xdr_longlong_t; + xdr_quad_t; + xdr_short; + xdr_string; + xdr_u_char; + xdr_u_hyper; + xdr_u_int; + xdr_u_long; + xdr_u_longlong_t; + xdr_u_quad_t; + xdr_u_short; + xdr_uint16_t; + xdr_uint32_t; + xdr_uint64_t; + xdr_uint8_t; + xdrmem_create; + xdrrec_create; + xdrstdio_create; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan_cxx-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan_cxx-x86_64.a new file mode 100644 index 0000000..995ed4a Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan_cxx-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan_cxx-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan_cxx-x86_64.a.syms new file mode 100644 index 0000000..0096b05 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.msan_cxx-x86_64.a.syms @@ -0,0 +1,24 @@ +{ + _ZdaPv; + _ZdaPvRKSt9nothrow_t; + _ZdaPvSt11align_val_t; + _ZdaPvSt11align_val_tRKSt9nothrow_t; + _ZdaPvm; + _ZdaPvmSt11align_val_t; + _ZdlPv; + _ZdlPvRKSt9nothrow_t; + _ZdlPvSt11align_val_t; + _ZdlPvSt11align_val_tRKSt9nothrow_t; + _ZdlPvm; + _ZdlPvmSt11align_val_t; + _Znam; + _ZnamRKSt9nothrow_t; + _ZnamSt11align_val_t; + _ZnamSt11align_val_tRKSt9nothrow_t; + _Znwm; + _ZnwmRKSt9nothrow_t; + _ZnwmSt11align_val_t; + _ZnwmSt11align_val_tRKSt9nothrow_t; + __msan_*; + __ubsan_*; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-aarch64-android.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-aarch64-android.a new file mode 100644 index 0000000..47ce4af Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-aarch64-android.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-arm-android.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-arm-android.a new file mode 100644 index 0000000..cf52d722 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-arm-android.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-i386.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-i386.a new file mode 100644 index 0000000..864408d Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-i386.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-x86_64.a new file mode 100644 index 0000000..cc790c8 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.profile-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan-x86_64.a new file mode 100644 index 0000000..442a00d Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan-x86_64.a.syms new file mode 100644 index 0000000..18ecd37 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan-x86_64.a.syms @@ -0,0 +1,1820 @@ +{ + Annotate*; + RunningOnValgrind; + ValgrindSlowdown; + WTFAnnotate*; + __bzero; + __close; + __cxa_atexit; + __fprintf_chk; + __fxstat; + __fxstat64; + __getdelim; + __interceptor___bzero; + __interceptor___close; + __interceptor___cxa_atexit; + __interceptor___fprintf_chk; + __interceptor___fxstat; + __interceptor___fxstat64; + __interceptor___getdelim; + __interceptor___isoc99_fprintf; + __interceptor___isoc99_fscanf; + __interceptor___isoc99_printf; + __interceptor___isoc99_scanf; + __interceptor___isoc99_snprintf; + __interceptor___isoc99_sprintf; + __interceptor___isoc99_sscanf; + __interceptor___isoc99_vfprintf; + __interceptor___isoc99_vfscanf; + __interceptor___isoc99_vprintf; + __interceptor___isoc99_vscanf; + __interceptor___isoc99_vsnprintf; + __interceptor___isoc99_vsprintf; + __interceptor___isoc99_vsscanf; + __interceptor___libc_memalign; + __interceptor___lxstat; + __interceptor___lxstat64; + __interceptor___overflow; + __interceptor___pthread_mutex_lock; + __interceptor___pthread_mutex_unlock; + __interceptor___res_iclose; + __interceptor___sigsetjmp; + __interceptor___snprintf_chk; + __interceptor___sprintf_chk; + __interceptor___strndup; + __interceptor___strxfrm_l; + __interceptor___tls_get_addr; + __interceptor___uflow; + __interceptor___underflow; + __interceptor___vsnprintf_chk; + __interceptor___vsprintf_chk; + __interceptor___wcsxfrm_l; + __interceptor___woverflow; + __interceptor___wuflow; + __interceptor___wunderflow; + __interceptor___xpg_strerror_r; + __interceptor___xstat; + __interceptor___xstat64; + __interceptor__exit; + __interceptor__obstack_begin; + __interceptor__obstack_begin_1; + __interceptor__obstack_newchunk; + __interceptor__setjmp; + __interceptor_abort; + __interceptor_accept; + __interceptor_accept4; + __interceptor_aligned_alloc; + __interceptor_asctime; + __interceptor_asctime_r; + __interceptor_asprintf; + __interceptor_atexit; + __interceptor_backtrace; + __interceptor_backtrace_symbols; + __interceptor_bcmp; + __interceptor_bind; + __interceptor_bsearch; + __interceptor_bzero; + __interceptor_calloc; + __interceptor_canonicalize_file_name; + __interceptor_capget; + __interceptor_capset; + __interceptor_cfree; + __interceptor_clock_getcpuclockid; + __interceptor_clock_getres; + __interceptor_clock_gettime; + __interceptor_clock_settime; + __interceptor_close; + __interceptor_closedir; + __interceptor_confstr; + __interceptor_connect; + __interceptor_creat; + __interceptor_creat64; + __interceptor_crypt; + __interceptor_crypt_r; + __interceptor_ctermid; + __interceptor_ctime; + __interceptor_ctime_r; + __interceptor_dl_iterate_phdr; + __interceptor_dlclose; + __interceptor_dlopen; + __interceptor_drand48_r; + __interceptor_dup; + __interceptor_dup2; + __interceptor_dup3; + __interceptor_endgrent; + __interceptor_endpwent; + __interceptor_epoll_create; + __interceptor_epoll_create1; + __interceptor_epoll_ctl; + __interceptor_epoll_pwait; + __interceptor_epoll_wait; + __interceptor_ether_aton; + __interceptor_ether_aton_r; + __interceptor_ether_hostton; + __interceptor_ether_line; + __interceptor_ether_ntoa; + __interceptor_ether_ntoa_r; + __interceptor_ether_ntohost; + __interceptor_eventfd; + __interceptor_eventfd_read; + __interceptor_eventfd_write; + __interceptor_fclose; + __interceptor_fdopen; + __interceptor_fflush; + __interceptor_fgetgrent; + __interceptor_fgetgrent_r; + __interceptor_fgetpwent; + __interceptor_fgetpwent_r; + __interceptor_fgets; + __interceptor_fgetxattr; + __interceptor_flistxattr; + __interceptor_fmemopen; + __interceptor_fopen; + __interceptor_fopen64; + __interceptor_fopencookie; + __interceptor_fork; + __interceptor_fprintf; + __interceptor_fputs; + __interceptor_fread; + __interceptor_free; + __interceptor_freopen; + __interceptor_freopen64; + __interceptor_frexp; + __interceptor_frexpf; + __interceptor_frexpl; + __interceptor_fscanf; + __interceptor_fstat; + __interceptor_fstat64; + __interceptor_fstatfs; + __interceptor_fstatfs64; + __interceptor_fstatvfs; + __interceptor_fstatvfs64; + __interceptor_ftime; + __interceptor_fwrite; + __interceptor_get_current_dir_name; + __interceptor_getaddrinfo; + __interceptor_getcwd; + __interceptor_getdelim; + __interceptor_getgrent; + __interceptor_getgrent_r; + __interceptor_getgrgid; + __interceptor_getgrgid_r; + __interceptor_getgrnam; + __interceptor_getgrnam_r; + __interceptor_getgroups; + __interceptor_gethostbyaddr; + __interceptor_gethostbyaddr_r; + __interceptor_gethostbyname; + __interceptor_gethostbyname2; + __interceptor_gethostbyname2_r; + __interceptor_gethostbyname_r; + __interceptor_gethostent; + __interceptor_gethostent_r; + __interceptor_getifaddrs; + __interceptor_getitimer; + __interceptor_getline; + __interceptor_getloadavg; + __interceptor_getmntent; + __interceptor_getmntent_r; + __interceptor_getnameinfo; + __interceptor_getpass; + __interceptor_getpeername; + __interceptor_getprotobyname; + __interceptor_getprotobyname_r; + __interceptor_getprotobynumber; + __interceptor_getprotobynumber_r; + __interceptor_getprotoent; + __interceptor_getprotoent_r; + __interceptor_getpwent; + __interceptor_getpwent_r; + __interceptor_getpwnam; + __interceptor_getpwnam_r; + __interceptor_getpwuid; + __interceptor_getpwuid_r; + __interceptor_getresgid; + __interceptor_getresuid; + __interceptor_getsockname; + __interceptor_getsockopt; + __interceptor_gettimeofday; + __interceptor_getusershell; + __interceptor_getutent; + __interceptor_getutid; + __interceptor_getutline; + __interceptor_getutxent; + __interceptor_getutxid; + __interceptor_getutxline; + __interceptor_getxattr; + __interceptor_glob; + __interceptor_glob64; + __interceptor_gmtime; + __interceptor_gmtime_r; + __interceptor_iconv; + __interceptor_if_indextoname; + __interceptor_if_nametoindex; + __interceptor_inet_aton; + __interceptor_inet_ntop; + __interceptor_inet_pton; + __interceptor_initgroups; + __interceptor_inotify_init; + __interceptor_inotify_init1; + __interceptor_ioctl; + __interceptor_kill; + __interceptor_lgamma; + __interceptor_lgamma_r; + __interceptor_lgammaf; + __interceptor_lgammaf_r; + __interceptor_lgammal; + __interceptor_lgammal_r; + __interceptor_lgetxattr; + __interceptor_listen; + __interceptor_listxattr; + __interceptor_llistxattr; + __interceptor_localtime; + __interceptor_localtime_r; + __interceptor_longjmp; + __interceptor_lrand48_r; + __interceptor_malloc; + __interceptor_malloc_usable_size; + __interceptor_mbsnrtowcs; + __interceptor_mbsrtowcs; + __interceptor_mbstowcs; + __interceptor_mcheck; + __interceptor_mcheck_pedantic; + __interceptor_memalign; + __interceptor_memchr; + __interceptor_memcmp; + __interceptor_memcpy; + __interceptor_memmem; + __interceptor_memmove; + __interceptor_memrchr; + __interceptor_memset; + __interceptor_mincore; + __interceptor_mktime; + __interceptor_mlock; + __interceptor_mlockall; + __interceptor_mmap; + __interceptor_mmap64; + __interceptor_modf; + __interceptor_modff; + __interceptor_modfl; + __interceptor_mprobe; + __interceptor_mprotect; + __interceptor_msgrcv; + __interceptor_msgsnd; + __interceptor_munlock; + __interceptor_munlockall; + __interceptor_munmap; + __interceptor_name_to_handle_at; + __interceptor_nanosleep; + __interceptor_on_exit; + __interceptor_open; + __interceptor_open64; + __interceptor_open_by_handle_at; + __interceptor_open_memstream; + __interceptor_open_wmemstream; + __interceptor_opendir; + __interceptor_pause; + __interceptor_pclose; + __interceptor_pipe; + __interceptor_pipe2; + __interceptor_poll; + __interceptor_popen; + __interceptor_posix_memalign; + __interceptor_ppoll; + __interceptor_prctl; + __interceptor_pread; + __interceptor_pread64; + __interceptor_preadv; + __interceptor_preadv64; + __interceptor_printf; + __interceptor_process_vm_readv; + __interceptor_process_vm_writev; + __interceptor_pthread_attr_getaffinity_np; + __interceptor_pthread_attr_getdetachstate; + __interceptor_pthread_attr_getguardsize; + __interceptor_pthread_attr_getinheritsched; + __interceptor_pthread_attr_getschedparam; + __interceptor_pthread_attr_getschedpolicy; + __interceptor_pthread_attr_getscope; + __interceptor_pthread_attr_getstack; + __interceptor_pthread_attr_getstacksize; + __interceptor_pthread_barrier_destroy; + __interceptor_pthread_barrier_init; + __interceptor_pthread_barrier_wait; + __interceptor_pthread_barrierattr_getpshared; + __interceptor_pthread_cond_broadcast; + __interceptor_pthread_cond_clockwait; + __interceptor_pthread_cond_destroy; + __interceptor_pthread_cond_init; + __interceptor_pthread_cond_signal; + __interceptor_pthread_cond_timedwait; + __interceptor_pthread_cond_wait; + __interceptor_pthread_condattr_getclock; + __interceptor_pthread_condattr_getpshared; + __interceptor_pthread_create; + __interceptor_pthread_detach; + __interceptor_pthread_exit; + __interceptor_pthread_getname_np; + __interceptor_pthread_getschedparam; + __interceptor_pthread_join; + __interceptor_pthread_kill; + __interceptor_pthread_mutex_destroy; + __interceptor_pthread_mutex_init; + __interceptor_pthread_mutex_lock; + __interceptor_pthread_mutex_timedlock; + __interceptor_pthread_mutex_trylock; + __interceptor_pthread_mutex_unlock; + __interceptor_pthread_mutexattr_getprioceiling; + __interceptor_pthread_mutexattr_getprotocol; + __interceptor_pthread_mutexattr_getpshared; + __interceptor_pthread_mutexattr_getrobust; + __interceptor_pthread_mutexattr_getrobust_np; + __interceptor_pthread_mutexattr_gettype; + __interceptor_pthread_once; + __interceptor_pthread_rwlock_destroy; + __interceptor_pthread_rwlock_init; + __interceptor_pthread_rwlock_rdlock; + __interceptor_pthread_rwlock_timedrdlock; + __interceptor_pthread_rwlock_timedwrlock; + __interceptor_pthread_rwlock_tryrdlock; + __interceptor_pthread_rwlock_trywrlock; + __interceptor_pthread_rwlock_unlock; + __interceptor_pthread_rwlock_wrlock; + __interceptor_pthread_rwlockattr_getkind_np; + __interceptor_pthread_rwlockattr_getpshared; + __interceptor_pthread_setcancelstate; + __interceptor_pthread_setcanceltype; + __interceptor_pthread_setname_np; + __interceptor_pthread_sigmask; + __interceptor_pthread_spin_destroy; + __interceptor_pthread_spin_init; + __interceptor_pthread_spin_lock; + __interceptor_pthread_spin_trylock; + __interceptor_pthread_spin_unlock; + __interceptor_pthread_timedjoin_np; + __interceptor_pthread_tryjoin_np; + __interceptor_ptrace; + __interceptor_ptsname; + __interceptor_ptsname_r; + __interceptor_puts; + __interceptor_pututxline; + __interceptor_pvalloc; + __interceptor_pwrite; + __interceptor_pwrite64; + __interceptor_pwritev; + __interceptor_pwritev64; + __interceptor_qsort; + __interceptor_qsort_r; + __interceptor_raise; + __interceptor_rand_r; + __interceptor_random_r; + __interceptor_read; + __interceptor_readdir; + __interceptor_readdir64; + __interceptor_readdir64_r; + __interceptor_readdir_r; + __interceptor_readlink; + __interceptor_readlinkat; + __interceptor_readv; + __interceptor_realloc; + __interceptor_reallocarray; + __interceptor_realpath; + __interceptor_recv; + __interceptor_recvfrom; + __interceptor_recvmmsg; + __interceptor_recvmsg; + __interceptor_regcomp; + __interceptor_regerror; + __interceptor_regexec; + __interceptor_regfree; + __interceptor_remquo; + __interceptor_remquof; + __interceptor_remquol; + __interceptor_rmdir; + __interceptor_scandir; + __interceptor_scandir64; + __interceptor_scanf; + __interceptor_sched_getaffinity; + __interceptor_sched_getparam; + __interceptor_sem_destroy; + __interceptor_sem_getvalue; + __interceptor_sem_init; + __interceptor_sem_post; + __interceptor_sem_timedwait; + __interceptor_sem_trywait; + __interceptor_sem_wait; + __interceptor_send; + __interceptor_sendmmsg; + __interceptor_sendmsg; + __interceptor_sendto; + __interceptor_setbuf; + __interceptor_setbuffer; + __interceptor_setgrent; + __interceptor_setitimer; + __interceptor_setjmp; + __interceptor_setlinebuf; + __interceptor_setlocale; + __interceptor_setpwent; + __interceptor_setvbuf; + __interceptor_shmctl; + __interceptor_sigaction; + __interceptor_sigaltstack; + __interceptor_sigandset; + __interceptor_sigblock; + __interceptor_sigemptyset; + __interceptor_sigfillset; + __interceptor_siglongjmp; + __interceptor_signal; + __interceptor_signalfd; + __interceptor_sigorset; + __interceptor_sigpending; + __interceptor_sigprocmask; + __interceptor_sigsetjmp; + __interceptor_sigsetmask; + __interceptor_sigsuspend; + __interceptor_sigtimedwait; + __interceptor_sigwait; + __interceptor_sigwaitinfo; + __interceptor_sincos; + __interceptor_sincosf; + __interceptor_sincosl; + __interceptor_sleep; + __interceptor_snprintf; + __interceptor_socket; + __interceptor_socketpair; + __interceptor_sprintf; + __interceptor_sscanf; + __interceptor_statfs; + __interceptor_statfs64; + __interceptor_statvfs; + __interceptor_statvfs64; + __interceptor_strcasecmp; + __interceptor_strcasestr; + __interceptor_strchr; + __interceptor_strchrnul; + __interceptor_strcmp; + __interceptor_strcpy; + __interceptor_strcspn; + __interceptor_strdup; + __interceptor_strerror; + __interceptor_strerror_r; + __interceptor_strlen; + __interceptor_strncasecmp; + __interceptor_strncmp; + __interceptor_strncpy; + __interceptor_strndup; + __interceptor_strnlen; + __interceptor_strpbrk; + __interceptor_strptime; + __interceptor_strrchr; + __interceptor_strspn; + __interceptor_strstr; + __interceptor_strtoimax; + __interceptor_strtok; + __interceptor_strtoumax; + __interceptor_strxfrm; + __interceptor_strxfrm_l; + __interceptor_sysinfo; + __interceptor_tcgetattr; + __interceptor_tempnam; + __interceptor_textdomain; + __interceptor_time; + __interceptor_timerfd_gettime; + __interceptor_timerfd_settime; + __interceptor_times; + __interceptor_tmpfile; + __interceptor_tmpfile64; + __interceptor_tmpnam; + __interceptor_tmpnam_r; + __interceptor_tsearch; + __interceptor_ttyname; + __interceptor_ttyname_r; + __interceptor_uname; + __interceptor_unlink; + __interceptor_usleep; + __interceptor_valloc; + __interceptor_vasprintf; + __interceptor_vfork; + __interceptor_vfprintf; + __interceptor_vfscanf; + __interceptor_vprintf; + __interceptor_vscanf; + __interceptor_vsnprintf; + __interceptor_vsprintf; + __interceptor_vsscanf; + __interceptor_wait; + __interceptor_wait3; + __interceptor_wait4; + __interceptor_waitid; + __interceptor_waitpid; + __interceptor_wcrtomb; + __interceptor_wcscat; + __interceptor_wcsdup; + __interceptor_wcslen; + __interceptor_wcsncat; + __interceptor_wcsnlen; + __interceptor_wcsnrtombs; + __interceptor_wcsrtombs; + __interceptor_wcstombs; + __interceptor_wcsxfrm; + __interceptor_wcsxfrm_l; + __interceptor_wctomb; + __interceptor_wordexp; + __interceptor_write; + __interceptor_writev; + __interceptor_xdr_bool; + __interceptor_xdr_bytes; + __interceptor_xdr_char; + __interceptor_xdr_destroy; + __interceptor_xdr_double; + __interceptor_xdr_enum; + __interceptor_xdr_float; + __interceptor_xdr_hyper; + __interceptor_xdr_int; + __interceptor_xdr_int16_t; + __interceptor_xdr_int32_t; + __interceptor_xdr_int64_t; + __interceptor_xdr_int8_t; + __interceptor_xdr_long; + __interceptor_xdr_longlong_t; + __interceptor_xdr_quad_t; + __interceptor_xdr_short; + __interceptor_xdr_string; + __interceptor_xdr_u_char; + __interceptor_xdr_u_hyper; + __interceptor_xdr_u_int; + __interceptor_xdr_u_long; + __interceptor_xdr_u_longlong_t; + __interceptor_xdr_u_quad_t; + __interceptor_xdr_u_short; + __interceptor_xdr_uint16_t; + __interceptor_xdr_uint32_t; + __interceptor_xdr_uint64_t; + __interceptor_xdr_uint8_t; + __interceptor_xdrmem_create; + __interceptor_xdrrec_create; + __interceptor_xdrstdio_create; + __isoc99_fprintf; + __isoc99_fscanf; + __isoc99_printf; + __isoc99_scanf; + __isoc99_snprintf; + __isoc99_sprintf; + __isoc99_sscanf; + __isoc99_vfprintf; + __isoc99_vfscanf; + __isoc99_vprintf; + __isoc99_vscanf; + __isoc99_vsnprintf; + __isoc99_vsprintf; + __isoc99_vsscanf; + __libc_memalign; + __lxstat; + __lxstat64; + __overflow; + __pthread_mutex_lock; + __pthread_mutex_unlock; + __res_iclose; + __sanitizer_acquire_crash_state; + __sanitizer_cov_8bit_counters_init; + __sanitizer_cov_bool_flag_init; + __sanitizer_cov_dump; + __sanitizer_cov_pcs_init; + __sanitizer_cov_reset; + __sanitizer_cov_trace_cmp; + __sanitizer_cov_trace_cmp1; + __sanitizer_cov_trace_cmp2; + __sanitizer_cov_trace_cmp4; + __sanitizer_cov_trace_cmp8; + __sanitizer_cov_trace_const_cmp1; + __sanitizer_cov_trace_const_cmp2; + __sanitizer_cov_trace_const_cmp4; + __sanitizer_cov_trace_const_cmp8; + __sanitizer_cov_trace_div4; + __sanitizer_cov_trace_div8; + __sanitizer_cov_trace_gep; + __sanitizer_cov_trace_pc_guard; + __sanitizer_cov_trace_pc_guard_init; + __sanitizer_cov_trace_pc_indir; + __sanitizer_cov_trace_switch; + __sanitizer_dump_coverage; + __sanitizer_dump_trace_pc_guard_coverage; + __sanitizer_free_hook; + __sanitizer_get_allocated_size; + __sanitizer_get_current_allocated_bytes; + __sanitizer_get_estimated_allocated_size; + __sanitizer_get_free_bytes; + __sanitizer_get_heap_size; + __sanitizer_get_module_and_offset_for_pc; + __sanitizer_get_ownership; + __sanitizer_get_report_path; + __sanitizer_get_unmapped_bytes; + __sanitizer_install_malloc_and_free_hooks; + __sanitizer_malloc_hook; + __sanitizer_on_print; + __sanitizer_print_stack_trace; + __sanitizer_report_error_summary; + __sanitizer_sandbox_on_notify; + __sanitizer_set_death_callback; + __sanitizer_set_report_fd; + __sanitizer_set_report_path; + __sanitizer_symbolize_global; + __sanitizer_symbolize_pc; + __sanitizer_syscall_post_impl_accept; + __sanitizer_syscall_post_impl_accept4; + __sanitizer_syscall_post_impl_access; + __sanitizer_syscall_post_impl_acct; + __sanitizer_syscall_post_impl_add_key; + __sanitizer_syscall_post_impl_adjtimex; + __sanitizer_syscall_post_impl_alarm; + __sanitizer_syscall_post_impl_bdflush; + __sanitizer_syscall_post_impl_bind; + __sanitizer_syscall_post_impl_brk; + __sanitizer_syscall_post_impl_capget; + __sanitizer_syscall_post_impl_capset; + __sanitizer_syscall_post_impl_chdir; + __sanitizer_syscall_post_impl_chmod; + __sanitizer_syscall_post_impl_chown; + __sanitizer_syscall_post_impl_chroot; + __sanitizer_syscall_post_impl_clock_adjtime; + __sanitizer_syscall_post_impl_clock_getres; + __sanitizer_syscall_post_impl_clock_gettime; + __sanitizer_syscall_post_impl_clock_nanosleep; + __sanitizer_syscall_post_impl_clock_settime; + __sanitizer_syscall_post_impl_close; + __sanitizer_syscall_post_impl_connect; + __sanitizer_syscall_post_impl_creat; + __sanitizer_syscall_post_impl_delete_module; + __sanitizer_syscall_post_impl_dup; + __sanitizer_syscall_post_impl_dup2; + __sanitizer_syscall_post_impl_dup3; + __sanitizer_syscall_post_impl_epoll_create; + __sanitizer_syscall_post_impl_epoll_create1; + __sanitizer_syscall_post_impl_epoll_ctl; + __sanitizer_syscall_post_impl_epoll_pwait; + __sanitizer_syscall_post_impl_epoll_pwait2; + __sanitizer_syscall_post_impl_epoll_wait; + __sanitizer_syscall_post_impl_eventfd; + __sanitizer_syscall_post_impl_eventfd2; + __sanitizer_syscall_post_impl_exit; + __sanitizer_syscall_post_impl_exit_group; + __sanitizer_syscall_post_impl_faccessat; + __sanitizer_syscall_post_impl_fchdir; + __sanitizer_syscall_post_impl_fchmod; + __sanitizer_syscall_post_impl_fchmodat; + __sanitizer_syscall_post_impl_fchown; + __sanitizer_syscall_post_impl_fchownat; + __sanitizer_syscall_post_impl_fcntl; + __sanitizer_syscall_post_impl_fcntl64; + __sanitizer_syscall_post_impl_fdatasync; + __sanitizer_syscall_post_impl_fgetxattr; + __sanitizer_syscall_post_impl_flistxattr; + __sanitizer_syscall_post_impl_flock; + __sanitizer_syscall_post_impl_fork; + __sanitizer_syscall_post_impl_fremovexattr; + __sanitizer_syscall_post_impl_fsetxattr; + __sanitizer_syscall_post_impl_fstat; + __sanitizer_syscall_post_impl_fstat64; + __sanitizer_syscall_post_impl_fstatat64; + __sanitizer_syscall_post_impl_fstatfs; + __sanitizer_syscall_post_impl_fstatfs64; + __sanitizer_syscall_post_impl_fsync; + __sanitizer_syscall_post_impl_ftruncate; + __sanitizer_syscall_post_impl_futimesat; + __sanitizer_syscall_post_impl_get_mempolicy; + __sanitizer_syscall_post_impl_get_robust_list; + __sanitizer_syscall_post_impl_getcpu; + __sanitizer_syscall_post_impl_getcwd; + __sanitizer_syscall_post_impl_getdents; + __sanitizer_syscall_post_impl_getdents64; + __sanitizer_syscall_post_impl_getegid; + __sanitizer_syscall_post_impl_geteuid; + __sanitizer_syscall_post_impl_getgid; + __sanitizer_syscall_post_impl_getgroups; + __sanitizer_syscall_post_impl_gethostname; + __sanitizer_syscall_post_impl_getitimer; + __sanitizer_syscall_post_impl_getpeername; + __sanitizer_syscall_post_impl_getpgid; + __sanitizer_syscall_post_impl_getpgrp; + __sanitizer_syscall_post_impl_getpid; + __sanitizer_syscall_post_impl_getppid; + __sanitizer_syscall_post_impl_getpriority; + __sanitizer_syscall_post_impl_getrandom; + __sanitizer_syscall_post_impl_getresgid; + __sanitizer_syscall_post_impl_getresuid; + __sanitizer_syscall_post_impl_getrlimit; + __sanitizer_syscall_post_impl_getrusage; + __sanitizer_syscall_post_impl_getsid; + __sanitizer_syscall_post_impl_getsockname; + __sanitizer_syscall_post_impl_getsockopt; + __sanitizer_syscall_post_impl_gettid; + __sanitizer_syscall_post_impl_gettimeofday; + __sanitizer_syscall_post_impl_getuid; + __sanitizer_syscall_post_impl_getxattr; + __sanitizer_syscall_post_impl_init_module; + __sanitizer_syscall_post_impl_inotify_add_watch; + __sanitizer_syscall_post_impl_inotify_init; + __sanitizer_syscall_post_impl_inotify_init1; + __sanitizer_syscall_post_impl_inotify_rm_watch; + __sanitizer_syscall_post_impl_io_cancel; + __sanitizer_syscall_post_impl_io_destroy; + __sanitizer_syscall_post_impl_io_getevents; + __sanitizer_syscall_post_impl_io_setup; + __sanitizer_syscall_post_impl_io_submit; + __sanitizer_syscall_post_impl_ioctl; + __sanitizer_syscall_post_impl_ioperm; + __sanitizer_syscall_post_impl_ioprio_get; + __sanitizer_syscall_post_impl_ioprio_set; + __sanitizer_syscall_post_impl_ipc; + __sanitizer_syscall_post_impl_kexec_load; + __sanitizer_syscall_post_impl_keyctl; + __sanitizer_syscall_post_impl_kill; + __sanitizer_syscall_post_impl_lchown; + __sanitizer_syscall_post_impl_lgetxattr; + __sanitizer_syscall_post_impl_link; + __sanitizer_syscall_post_impl_linkat; + __sanitizer_syscall_post_impl_listen; + __sanitizer_syscall_post_impl_listxattr; + __sanitizer_syscall_post_impl_llistxattr; + __sanitizer_syscall_post_impl_llseek; + __sanitizer_syscall_post_impl_lookup_dcookie; + __sanitizer_syscall_post_impl_lremovexattr; + __sanitizer_syscall_post_impl_lseek; + __sanitizer_syscall_post_impl_lsetxattr; + __sanitizer_syscall_post_impl_lstat; + __sanitizer_syscall_post_impl_lstat64; + __sanitizer_syscall_post_impl_madvise; + __sanitizer_syscall_post_impl_mbind; + __sanitizer_syscall_post_impl_migrate_pages; + __sanitizer_syscall_post_impl_mincore; + __sanitizer_syscall_post_impl_mkdir; + __sanitizer_syscall_post_impl_mkdirat; + __sanitizer_syscall_post_impl_mknod; + __sanitizer_syscall_post_impl_mknodat; + __sanitizer_syscall_post_impl_mlock; + __sanitizer_syscall_post_impl_mlockall; + __sanitizer_syscall_post_impl_mmap_pgoff; + __sanitizer_syscall_post_impl_mount; + __sanitizer_syscall_post_impl_move_pages; + __sanitizer_syscall_post_impl_mprotect; + __sanitizer_syscall_post_impl_mq_getsetattr; + __sanitizer_syscall_post_impl_mq_notify; + __sanitizer_syscall_post_impl_mq_open; + __sanitizer_syscall_post_impl_mq_timedreceive; + __sanitizer_syscall_post_impl_mq_timedsend; + __sanitizer_syscall_post_impl_mq_unlink; + __sanitizer_syscall_post_impl_mremap; + __sanitizer_syscall_post_impl_msgctl; + __sanitizer_syscall_post_impl_msgget; + __sanitizer_syscall_post_impl_msgrcv; + __sanitizer_syscall_post_impl_msgsnd; + __sanitizer_syscall_post_impl_msync; + __sanitizer_syscall_post_impl_munlock; + __sanitizer_syscall_post_impl_munlockall; + __sanitizer_syscall_post_impl_munmap; + __sanitizer_syscall_post_impl_name_to_handle_at; + __sanitizer_syscall_post_impl_nanosleep; + __sanitizer_syscall_post_impl_newfstat; + __sanitizer_syscall_post_impl_newfstatat; + __sanitizer_syscall_post_impl_newlstat; + __sanitizer_syscall_post_impl_newstat; + __sanitizer_syscall_post_impl_newuname; + __sanitizer_syscall_post_impl_ni_syscall; + __sanitizer_syscall_post_impl_nice; + __sanitizer_syscall_post_impl_old_getrlimit; + __sanitizer_syscall_post_impl_old_mmap; + __sanitizer_syscall_post_impl_old_readdir; + __sanitizer_syscall_post_impl_old_select; + __sanitizer_syscall_post_impl_oldumount; + __sanitizer_syscall_post_impl_olduname; + __sanitizer_syscall_post_impl_open; + __sanitizer_syscall_post_impl_open_by_handle_at; + __sanitizer_syscall_post_impl_openat; + __sanitizer_syscall_post_impl_pause; + __sanitizer_syscall_post_impl_pciconfig_iobase; + __sanitizer_syscall_post_impl_pciconfig_read; + __sanitizer_syscall_post_impl_pciconfig_write; + __sanitizer_syscall_post_impl_perf_event_open; + __sanitizer_syscall_post_impl_personality; + __sanitizer_syscall_post_impl_pipe; + __sanitizer_syscall_post_impl_pipe2; + __sanitizer_syscall_post_impl_pivot_root; + __sanitizer_syscall_post_impl_poll; + __sanitizer_syscall_post_impl_ppoll; + __sanitizer_syscall_post_impl_pread64; + __sanitizer_syscall_post_impl_preadv; + __sanitizer_syscall_post_impl_prlimit64; + __sanitizer_syscall_post_impl_process_vm_readv; + __sanitizer_syscall_post_impl_process_vm_writev; + __sanitizer_syscall_post_impl_pselect6; + __sanitizer_syscall_post_impl_ptrace; + __sanitizer_syscall_post_impl_pwrite64; + __sanitizer_syscall_post_impl_pwritev; + __sanitizer_syscall_post_impl_quotactl; + __sanitizer_syscall_post_impl_read; + __sanitizer_syscall_post_impl_readlink; + __sanitizer_syscall_post_impl_readlinkat; + __sanitizer_syscall_post_impl_readv; + __sanitizer_syscall_post_impl_reboot; + __sanitizer_syscall_post_impl_recv; + __sanitizer_syscall_post_impl_recvfrom; + __sanitizer_syscall_post_impl_recvmmsg; + __sanitizer_syscall_post_impl_recvmsg; + __sanitizer_syscall_post_impl_remap_file_pages; + __sanitizer_syscall_post_impl_removexattr; + __sanitizer_syscall_post_impl_rename; + __sanitizer_syscall_post_impl_renameat; + __sanitizer_syscall_post_impl_request_key; + __sanitizer_syscall_post_impl_restart_syscall; + __sanitizer_syscall_post_impl_rmdir; + __sanitizer_syscall_post_impl_rt_sigaction; + __sanitizer_syscall_post_impl_rt_sigpending; + __sanitizer_syscall_post_impl_rt_sigprocmask; + __sanitizer_syscall_post_impl_rt_sigqueueinfo; + __sanitizer_syscall_post_impl_rt_sigtimedwait; + __sanitizer_syscall_post_impl_rt_tgsigqueueinfo; + __sanitizer_syscall_post_impl_sched_get_priority_max; + __sanitizer_syscall_post_impl_sched_get_priority_min; + __sanitizer_syscall_post_impl_sched_getaffinity; + __sanitizer_syscall_post_impl_sched_getparam; + __sanitizer_syscall_post_impl_sched_getscheduler; + __sanitizer_syscall_post_impl_sched_rr_get_interval; + __sanitizer_syscall_post_impl_sched_setaffinity; + __sanitizer_syscall_post_impl_sched_setparam; + __sanitizer_syscall_post_impl_sched_setscheduler; + __sanitizer_syscall_post_impl_sched_yield; + __sanitizer_syscall_post_impl_select; + __sanitizer_syscall_post_impl_semctl; + __sanitizer_syscall_post_impl_semget; + __sanitizer_syscall_post_impl_semop; + __sanitizer_syscall_post_impl_semtimedop; + __sanitizer_syscall_post_impl_send; + __sanitizer_syscall_post_impl_sendfile; + __sanitizer_syscall_post_impl_sendfile64; + __sanitizer_syscall_post_impl_sendmmsg; + __sanitizer_syscall_post_impl_sendmsg; + __sanitizer_syscall_post_impl_sendto; + __sanitizer_syscall_post_impl_set_mempolicy; + __sanitizer_syscall_post_impl_set_robust_list; + __sanitizer_syscall_post_impl_set_tid_address; + __sanitizer_syscall_post_impl_setdomainname; + __sanitizer_syscall_post_impl_setfsgid; + __sanitizer_syscall_post_impl_setfsuid; + __sanitizer_syscall_post_impl_setgid; + __sanitizer_syscall_post_impl_setgroups; + __sanitizer_syscall_post_impl_sethostname; + __sanitizer_syscall_post_impl_setitimer; + __sanitizer_syscall_post_impl_setns; + __sanitizer_syscall_post_impl_setpgid; + __sanitizer_syscall_post_impl_setpriority; + __sanitizer_syscall_post_impl_setregid; + __sanitizer_syscall_post_impl_setresgid; + __sanitizer_syscall_post_impl_setresuid; + __sanitizer_syscall_post_impl_setreuid; + __sanitizer_syscall_post_impl_setrlimit; + __sanitizer_syscall_post_impl_setsid; + __sanitizer_syscall_post_impl_setsockopt; + __sanitizer_syscall_post_impl_settimeofday; + __sanitizer_syscall_post_impl_setuid; + __sanitizer_syscall_post_impl_setxattr; + __sanitizer_syscall_post_impl_sgetmask; + __sanitizer_syscall_post_impl_shmat; + __sanitizer_syscall_post_impl_shmctl; + __sanitizer_syscall_post_impl_shmdt; + __sanitizer_syscall_post_impl_shmget; + __sanitizer_syscall_post_impl_shutdown; + __sanitizer_syscall_post_impl_sigaction; + __sanitizer_syscall_post_impl_sigaltstack; + __sanitizer_syscall_post_impl_signal; + __sanitizer_syscall_post_impl_signalfd; + __sanitizer_syscall_post_impl_signalfd4; + __sanitizer_syscall_post_impl_sigpending; + __sanitizer_syscall_post_impl_sigprocmask; + __sanitizer_syscall_post_impl_socket; + __sanitizer_syscall_post_impl_socketcall; + __sanitizer_syscall_post_impl_socketpair; + __sanitizer_syscall_post_impl_splice; + __sanitizer_syscall_post_impl_spu_create; + __sanitizer_syscall_post_impl_spu_run; + __sanitizer_syscall_post_impl_ssetmask; + __sanitizer_syscall_post_impl_stat; + __sanitizer_syscall_post_impl_stat64; + __sanitizer_syscall_post_impl_statfs; + __sanitizer_syscall_post_impl_statfs64; + __sanitizer_syscall_post_impl_stime; + __sanitizer_syscall_post_impl_swapoff; + __sanitizer_syscall_post_impl_swapon; + __sanitizer_syscall_post_impl_symlink; + __sanitizer_syscall_post_impl_symlinkat; + __sanitizer_syscall_post_impl_sync; + __sanitizer_syscall_post_impl_syncfs; + __sanitizer_syscall_post_impl_sysctl; + __sanitizer_syscall_post_impl_sysfs; + __sanitizer_syscall_post_impl_sysinfo; + __sanitizer_syscall_post_impl_syslog; + __sanitizer_syscall_post_impl_tee; + __sanitizer_syscall_post_impl_tgkill; + __sanitizer_syscall_post_impl_time; + __sanitizer_syscall_post_impl_timer_create; + __sanitizer_syscall_post_impl_timer_delete; + __sanitizer_syscall_post_impl_timer_getoverrun; + __sanitizer_syscall_post_impl_timer_gettime; + __sanitizer_syscall_post_impl_timer_settime; + __sanitizer_syscall_post_impl_timerfd_create; + __sanitizer_syscall_post_impl_timerfd_gettime; + __sanitizer_syscall_post_impl_timerfd_settime; + __sanitizer_syscall_post_impl_times; + __sanitizer_syscall_post_impl_tkill; + __sanitizer_syscall_post_impl_truncate; + __sanitizer_syscall_post_impl_umask; + __sanitizer_syscall_post_impl_umount; + __sanitizer_syscall_post_impl_uname; + __sanitizer_syscall_post_impl_unlink; + __sanitizer_syscall_post_impl_unlinkat; + __sanitizer_syscall_post_impl_unshare; + __sanitizer_syscall_post_impl_uselib; + __sanitizer_syscall_post_impl_ustat; + __sanitizer_syscall_post_impl_utime; + __sanitizer_syscall_post_impl_utimensat; + __sanitizer_syscall_post_impl_utimes; + __sanitizer_syscall_post_impl_vfork; + __sanitizer_syscall_post_impl_vhangup; + __sanitizer_syscall_post_impl_vmsplice; + __sanitizer_syscall_post_impl_wait4; + __sanitizer_syscall_post_impl_waitid; + __sanitizer_syscall_post_impl_waitpid; + __sanitizer_syscall_post_impl_write; + __sanitizer_syscall_post_impl_writev; + __sanitizer_syscall_pre_impl_accept; + __sanitizer_syscall_pre_impl_accept4; + __sanitizer_syscall_pre_impl_access; + __sanitizer_syscall_pre_impl_acct; + __sanitizer_syscall_pre_impl_add_key; + __sanitizer_syscall_pre_impl_adjtimex; + __sanitizer_syscall_pre_impl_alarm; + __sanitizer_syscall_pre_impl_bdflush; + __sanitizer_syscall_pre_impl_bind; + __sanitizer_syscall_pre_impl_brk; + __sanitizer_syscall_pre_impl_capget; + __sanitizer_syscall_pre_impl_capset; + __sanitizer_syscall_pre_impl_chdir; + __sanitizer_syscall_pre_impl_chmod; + __sanitizer_syscall_pre_impl_chown; + __sanitizer_syscall_pre_impl_chroot; + __sanitizer_syscall_pre_impl_clock_adjtime; + __sanitizer_syscall_pre_impl_clock_getres; + __sanitizer_syscall_pre_impl_clock_gettime; + __sanitizer_syscall_pre_impl_clock_nanosleep; + __sanitizer_syscall_pre_impl_clock_settime; + __sanitizer_syscall_pre_impl_close; + __sanitizer_syscall_pre_impl_connect; + __sanitizer_syscall_pre_impl_creat; + __sanitizer_syscall_pre_impl_delete_module; + __sanitizer_syscall_pre_impl_dup; + __sanitizer_syscall_pre_impl_dup2; + __sanitizer_syscall_pre_impl_dup3; + __sanitizer_syscall_pre_impl_epoll_create; + __sanitizer_syscall_pre_impl_epoll_create1; + __sanitizer_syscall_pre_impl_epoll_ctl; + __sanitizer_syscall_pre_impl_epoll_pwait; + __sanitizer_syscall_pre_impl_epoll_pwait2; + __sanitizer_syscall_pre_impl_epoll_wait; + __sanitizer_syscall_pre_impl_eventfd; + __sanitizer_syscall_pre_impl_eventfd2; + __sanitizer_syscall_pre_impl_exit; + __sanitizer_syscall_pre_impl_exit_group; + __sanitizer_syscall_pre_impl_faccessat; + __sanitizer_syscall_pre_impl_fchdir; + __sanitizer_syscall_pre_impl_fchmod; + __sanitizer_syscall_pre_impl_fchmodat; + __sanitizer_syscall_pre_impl_fchown; + __sanitizer_syscall_pre_impl_fchownat; + __sanitizer_syscall_pre_impl_fcntl; + __sanitizer_syscall_pre_impl_fcntl64; + __sanitizer_syscall_pre_impl_fdatasync; + __sanitizer_syscall_pre_impl_fgetxattr; + __sanitizer_syscall_pre_impl_flistxattr; + __sanitizer_syscall_pre_impl_flock; + __sanitizer_syscall_pre_impl_fork; + __sanitizer_syscall_pre_impl_fremovexattr; + __sanitizer_syscall_pre_impl_fsetxattr; + __sanitizer_syscall_pre_impl_fstat; + __sanitizer_syscall_pre_impl_fstat64; + __sanitizer_syscall_pre_impl_fstatat64; + __sanitizer_syscall_pre_impl_fstatfs; + __sanitizer_syscall_pre_impl_fstatfs64; + __sanitizer_syscall_pre_impl_fsync; + __sanitizer_syscall_pre_impl_ftruncate; + __sanitizer_syscall_pre_impl_futimesat; + __sanitizer_syscall_pre_impl_get_mempolicy; + __sanitizer_syscall_pre_impl_get_robust_list; + __sanitizer_syscall_pre_impl_getcpu; + __sanitizer_syscall_pre_impl_getcwd; + __sanitizer_syscall_pre_impl_getdents; + __sanitizer_syscall_pre_impl_getdents64; + __sanitizer_syscall_pre_impl_getegid; + __sanitizer_syscall_pre_impl_geteuid; + __sanitizer_syscall_pre_impl_getgid; + __sanitizer_syscall_pre_impl_getgroups; + __sanitizer_syscall_pre_impl_gethostname; + __sanitizer_syscall_pre_impl_getitimer; + __sanitizer_syscall_pre_impl_getpeername; + __sanitizer_syscall_pre_impl_getpgid; + __sanitizer_syscall_pre_impl_getpgrp; + __sanitizer_syscall_pre_impl_getpid; + __sanitizer_syscall_pre_impl_getppid; + __sanitizer_syscall_pre_impl_getpriority; + __sanitizer_syscall_pre_impl_getrandom; + __sanitizer_syscall_pre_impl_getresgid; + __sanitizer_syscall_pre_impl_getresuid; + __sanitizer_syscall_pre_impl_getrlimit; + __sanitizer_syscall_pre_impl_getrusage; + __sanitizer_syscall_pre_impl_getsid; + __sanitizer_syscall_pre_impl_getsockname; + __sanitizer_syscall_pre_impl_getsockopt; + __sanitizer_syscall_pre_impl_gettid; + __sanitizer_syscall_pre_impl_gettimeofday; + __sanitizer_syscall_pre_impl_getuid; + __sanitizer_syscall_pre_impl_getxattr; + __sanitizer_syscall_pre_impl_init_module; + __sanitizer_syscall_pre_impl_inotify_add_watch; + __sanitizer_syscall_pre_impl_inotify_init; + __sanitizer_syscall_pre_impl_inotify_init1; + __sanitizer_syscall_pre_impl_inotify_rm_watch; + __sanitizer_syscall_pre_impl_io_cancel; + __sanitizer_syscall_pre_impl_io_destroy; + __sanitizer_syscall_pre_impl_io_getevents; + __sanitizer_syscall_pre_impl_io_setup; + __sanitizer_syscall_pre_impl_io_submit; + __sanitizer_syscall_pre_impl_ioctl; + __sanitizer_syscall_pre_impl_ioperm; + __sanitizer_syscall_pre_impl_ioprio_get; + __sanitizer_syscall_pre_impl_ioprio_set; + __sanitizer_syscall_pre_impl_ipc; + __sanitizer_syscall_pre_impl_kexec_load; + __sanitizer_syscall_pre_impl_keyctl; + __sanitizer_syscall_pre_impl_kill; + __sanitizer_syscall_pre_impl_lchown; + __sanitizer_syscall_pre_impl_lgetxattr; + __sanitizer_syscall_pre_impl_link; + __sanitizer_syscall_pre_impl_linkat; + __sanitizer_syscall_pre_impl_listen; + __sanitizer_syscall_pre_impl_listxattr; + __sanitizer_syscall_pre_impl_llistxattr; + __sanitizer_syscall_pre_impl_llseek; + __sanitizer_syscall_pre_impl_lookup_dcookie; + __sanitizer_syscall_pre_impl_lremovexattr; + __sanitizer_syscall_pre_impl_lseek; + __sanitizer_syscall_pre_impl_lsetxattr; + __sanitizer_syscall_pre_impl_lstat; + __sanitizer_syscall_pre_impl_lstat64; + __sanitizer_syscall_pre_impl_madvise; + __sanitizer_syscall_pre_impl_mbind; + __sanitizer_syscall_pre_impl_migrate_pages; + __sanitizer_syscall_pre_impl_mincore; + __sanitizer_syscall_pre_impl_mkdir; + __sanitizer_syscall_pre_impl_mkdirat; + __sanitizer_syscall_pre_impl_mknod; + __sanitizer_syscall_pre_impl_mknodat; + __sanitizer_syscall_pre_impl_mlock; + __sanitizer_syscall_pre_impl_mlockall; + __sanitizer_syscall_pre_impl_mmap_pgoff; + __sanitizer_syscall_pre_impl_mount; + __sanitizer_syscall_pre_impl_move_pages; + __sanitizer_syscall_pre_impl_mprotect; + __sanitizer_syscall_pre_impl_mq_getsetattr; + __sanitizer_syscall_pre_impl_mq_notify; + __sanitizer_syscall_pre_impl_mq_open; + __sanitizer_syscall_pre_impl_mq_timedreceive; + __sanitizer_syscall_pre_impl_mq_timedsend; + __sanitizer_syscall_pre_impl_mq_unlink; + __sanitizer_syscall_pre_impl_mremap; + __sanitizer_syscall_pre_impl_msgctl; + __sanitizer_syscall_pre_impl_msgget; + __sanitizer_syscall_pre_impl_msgrcv; + __sanitizer_syscall_pre_impl_msgsnd; + __sanitizer_syscall_pre_impl_msync; + __sanitizer_syscall_pre_impl_munlock; + __sanitizer_syscall_pre_impl_munlockall; + __sanitizer_syscall_pre_impl_munmap; + __sanitizer_syscall_pre_impl_name_to_handle_at; + __sanitizer_syscall_pre_impl_nanosleep; + __sanitizer_syscall_pre_impl_newfstat; + __sanitizer_syscall_pre_impl_newfstatat; + __sanitizer_syscall_pre_impl_newlstat; + __sanitizer_syscall_pre_impl_newstat; + __sanitizer_syscall_pre_impl_newuname; + __sanitizer_syscall_pre_impl_ni_syscall; + __sanitizer_syscall_pre_impl_nice; + __sanitizer_syscall_pre_impl_old_getrlimit; + __sanitizer_syscall_pre_impl_old_mmap; + __sanitizer_syscall_pre_impl_old_readdir; + __sanitizer_syscall_pre_impl_old_select; + __sanitizer_syscall_pre_impl_oldumount; + __sanitizer_syscall_pre_impl_olduname; + __sanitizer_syscall_pre_impl_open; + __sanitizer_syscall_pre_impl_open_by_handle_at; + __sanitizer_syscall_pre_impl_openat; + __sanitizer_syscall_pre_impl_pause; + __sanitizer_syscall_pre_impl_pciconfig_iobase; + __sanitizer_syscall_pre_impl_pciconfig_read; + __sanitizer_syscall_pre_impl_pciconfig_write; + __sanitizer_syscall_pre_impl_perf_event_open; + __sanitizer_syscall_pre_impl_personality; + __sanitizer_syscall_pre_impl_pipe; + __sanitizer_syscall_pre_impl_pipe2; + __sanitizer_syscall_pre_impl_pivot_root; + __sanitizer_syscall_pre_impl_poll; + __sanitizer_syscall_pre_impl_ppoll; + __sanitizer_syscall_pre_impl_pread64; + __sanitizer_syscall_pre_impl_preadv; + __sanitizer_syscall_pre_impl_prlimit64; + __sanitizer_syscall_pre_impl_process_vm_readv; + __sanitizer_syscall_pre_impl_process_vm_writev; + __sanitizer_syscall_pre_impl_pselect6; + __sanitizer_syscall_pre_impl_ptrace; + __sanitizer_syscall_pre_impl_pwrite64; + __sanitizer_syscall_pre_impl_pwritev; + __sanitizer_syscall_pre_impl_quotactl; + __sanitizer_syscall_pre_impl_read; + __sanitizer_syscall_pre_impl_readlink; + __sanitizer_syscall_pre_impl_readlinkat; + __sanitizer_syscall_pre_impl_readv; + __sanitizer_syscall_pre_impl_reboot; + __sanitizer_syscall_pre_impl_recv; + __sanitizer_syscall_pre_impl_recvfrom; + __sanitizer_syscall_pre_impl_recvmmsg; + __sanitizer_syscall_pre_impl_recvmsg; + __sanitizer_syscall_pre_impl_remap_file_pages; + __sanitizer_syscall_pre_impl_removexattr; + __sanitizer_syscall_pre_impl_rename; + __sanitizer_syscall_pre_impl_renameat; + __sanitizer_syscall_pre_impl_request_key; + __sanitizer_syscall_pre_impl_restart_syscall; + __sanitizer_syscall_pre_impl_rmdir; + __sanitizer_syscall_pre_impl_rt_sigaction; + __sanitizer_syscall_pre_impl_rt_sigpending; + __sanitizer_syscall_pre_impl_rt_sigprocmask; + __sanitizer_syscall_pre_impl_rt_sigqueueinfo; + __sanitizer_syscall_pre_impl_rt_sigtimedwait; + __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo; + __sanitizer_syscall_pre_impl_sched_get_priority_max; + __sanitizer_syscall_pre_impl_sched_get_priority_min; + __sanitizer_syscall_pre_impl_sched_getaffinity; + __sanitizer_syscall_pre_impl_sched_getparam; + __sanitizer_syscall_pre_impl_sched_getscheduler; + __sanitizer_syscall_pre_impl_sched_rr_get_interval; + __sanitizer_syscall_pre_impl_sched_setaffinity; + __sanitizer_syscall_pre_impl_sched_setparam; + __sanitizer_syscall_pre_impl_sched_setscheduler; + __sanitizer_syscall_pre_impl_sched_yield; + __sanitizer_syscall_pre_impl_select; + __sanitizer_syscall_pre_impl_semctl; + __sanitizer_syscall_pre_impl_semget; + __sanitizer_syscall_pre_impl_semop; + __sanitizer_syscall_pre_impl_semtimedop; + __sanitizer_syscall_pre_impl_send; + __sanitizer_syscall_pre_impl_sendfile; + __sanitizer_syscall_pre_impl_sendfile64; + __sanitizer_syscall_pre_impl_sendmmsg; + __sanitizer_syscall_pre_impl_sendmsg; + __sanitizer_syscall_pre_impl_sendto; + __sanitizer_syscall_pre_impl_set_mempolicy; + __sanitizer_syscall_pre_impl_set_robust_list; + __sanitizer_syscall_pre_impl_set_tid_address; + __sanitizer_syscall_pre_impl_setdomainname; + __sanitizer_syscall_pre_impl_setfsgid; + __sanitizer_syscall_pre_impl_setfsuid; + __sanitizer_syscall_pre_impl_setgid; + __sanitizer_syscall_pre_impl_setgroups; + __sanitizer_syscall_pre_impl_sethostname; + __sanitizer_syscall_pre_impl_setitimer; + __sanitizer_syscall_pre_impl_setns; + __sanitizer_syscall_pre_impl_setpgid; + __sanitizer_syscall_pre_impl_setpriority; + __sanitizer_syscall_pre_impl_setregid; + __sanitizer_syscall_pre_impl_setresgid; + __sanitizer_syscall_pre_impl_setresuid; + __sanitizer_syscall_pre_impl_setreuid; + __sanitizer_syscall_pre_impl_setrlimit; + __sanitizer_syscall_pre_impl_setsid; + __sanitizer_syscall_pre_impl_setsockopt; + __sanitizer_syscall_pre_impl_settimeofday; + __sanitizer_syscall_pre_impl_setuid; + __sanitizer_syscall_pre_impl_setxattr; + __sanitizer_syscall_pre_impl_sgetmask; + __sanitizer_syscall_pre_impl_shmat; + __sanitizer_syscall_pre_impl_shmctl; + __sanitizer_syscall_pre_impl_shmdt; + __sanitizer_syscall_pre_impl_shmget; + __sanitizer_syscall_pre_impl_shutdown; + __sanitizer_syscall_pre_impl_sigaction; + __sanitizer_syscall_pre_impl_sigaltstack; + __sanitizer_syscall_pre_impl_signal; + __sanitizer_syscall_pre_impl_signalfd; + __sanitizer_syscall_pre_impl_signalfd4; + __sanitizer_syscall_pre_impl_sigpending; + __sanitizer_syscall_pre_impl_sigprocmask; + __sanitizer_syscall_pre_impl_socket; + __sanitizer_syscall_pre_impl_socketcall; + __sanitizer_syscall_pre_impl_socketpair; + __sanitizer_syscall_pre_impl_splice; + __sanitizer_syscall_pre_impl_spu_create; + __sanitizer_syscall_pre_impl_spu_run; + __sanitizer_syscall_pre_impl_ssetmask; + __sanitizer_syscall_pre_impl_stat; + __sanitizer_syscall_pre_impl_stat64; + __sanitizer_syscall_pre_impl_statfs; + __sanitizer_syscall_pre_impl_statfs64; + __sanitizer_syscall_pre_impl_stime; + __sanitizer_syscall_pre_impl_swapoff; + __sanitizer_syscall_pre_impl_swapon; + __sanitizer_syscall_pre_impl_symlink; + __sanitizer_syscall_pre_impl_symlinkat; + __sanitizer_syscall_pre_impl_sync; + __sanitizer_syscall_pre_impl_syncfs; + __sanitizer_syscall_pre_impl_sysctl; + __sanitizer_syscall_pre_impl_sysfs; + __sanitizer_syscall_pre_impl_sysinfo; + __sanitizer_syscall_pre_impl_syslog; + __sanitizer_syscall_pre_impl_tee; + __sanitizer_syscall_pre_impl_tgkill; + __sanitizer_syscall_pre_impl_time; + __sanitizer_syscall_pre_impl_timer_create; + __sanitizer_syscall_pre_impl_timer_delete; + __sanitizer_syscall_pre_impl_timer_getoverrun; + __sanitizer_syscall_pre_impl_timer_gettime; + __sanitizer_syscall_pre_impl_timer_settime; + __sanitizer_syscall_pre_impl_timerfd_create; + __sanitizer_syscall_pre_impl_timerfd_gettime; + __sanitizer_syscall_pre_impl_timerfd_settime; + __sanitizer_syscall_pre_impl_times; + __sanitizer_syscall_pre_impl_tkill; + __sanitizer_syscall_pre_impl_truncate; + __sanitizer_syscall_pre_impl_umask; + __sanitizer_syscall_pre_impl_umount; + __sanitizer_syscall_pre_impl_uname; + __sanitizer_syscall_pre_impl_unlink; + __sanitizer_syscall_pre_impl_unlinkat; + __sanitizer_syscall_pre_impl_unshare; + __sanitizer_syscall_pre_impl_uselib; + __sanitizer_syscall_pre_impl_ustat; + __sanitizer_syscall_pre_impl_utime; + __sanitizer_syscall_pre_impl_utimensat; + __sanitizer_syscall_pre_impl_utimes; + __sanitizer_syscall_pre_impl_vfork; + __sanitizer_syscall_pre_impl_vhangup; + __sanitizer_syscall_pre_impl_vmsplice; + __sanitizer_syscall_pre_impl_wait4; + __sanitizer_syscall_pre_impl_waitid; + __sanitizer_syscall_pre_impl_waitpid; + __sanitizer_syscall_pre_impl_write; + __sanitizer_syscall_pre_impl_writev; + __sanitizer_unaligned_load16; + __sanitizer_unaligned_load32; + __sanitizer_unaligned_load64; + __sanitizer_unaligned_store16; + __sanitizer_unaligned_store32; + __sanitizer_unaligned_store64; + __sanitizer_weak_hook_memcmp; + __sanitizer_weak_hook_memmem; + __sanitizer_weak_hook_strcasecmp; + __sanitizer_weak_hook_strcasestr; + __sanitizer_weak_hook_strcmp; + __sanitizer_weak_hook_strncasecmp; + __sanitizer_weak_hook_strncmp; + __sanitizer_weak_hook_strstr; + __sigsetjmp; + __snprintf_chk; + __sprintf_chk; + __strndup; + __strxfrm_l; + __tls_get_addr; + __tsan_acquire; + __tsan_atomic*; + __tsan_create_fiber; + __tsan_destroy_fiber; + __tsan_flush_memory; + __tsan_func*; + __tsan_get_current_fiber; + __tsan_init; + __tsan_java*; + __tsan_mutex_create; + __tsan_mutex_destroy; + __tsan_mutex_post_divert; + __tsan_mutex_post_lock; + __tsan_mutex_post_signal; + __tsan_mutex_post_unlock; + __tsan_mutex_pre_divert; + __tsan_mutex_pre_lock; + __tsan_mutex_pre_signal; + __tsan_mutex_pre_unlock; + __tsan_read*; + __tsan_release; + __tsan_set_fiber_name; + __tsan_switch_to_fiber; + __tsan_unaligned*; + __tsan_vptr*; + __tsan_write*; + __ubsan_*; + __uflow; + __underflow; + __vsnprintf_chk; + __vsprintf_chk; + __wcsxfrm_l; + __woverflow; + __wuflow; + __wunderflow; + __xpg_strerror_r; + __xstat; + __xstat64; + _exit; + _obstack_begin; + _obstack_begin_1; + _obstack_newchunk; + _setjmp; + abort; + accept; + accept4; + aligned_alloc; + asctime; + asctime_r; + asprintf; + atexit; + backtrace; + backtrace_symbols; + bcmp; + bind; + bsearch; + bzero; + calloc; + canonicalize_file_name; + capget; + capset; + cfree; + clock_getcpuclockid; + clock_getres; + clock_gettime; + clock_settime; + close; + closedir; + confstr; + connect; + creat; + creat64; + crypt; + crypt_r; + ctermid; + ctime; + ctime_r; + dl_iterate_phdr; + dlclose; + dlopen; + drand48_r; + dup; + dup2; + dup3; + endgrent; + endpwent; + epoll_create; + epoll_create1; + epoll_ctl; + epoll_pwait; + epoll_wait; + ether_aton; + ether_aton_r; + ether_hostton; + ether_line; + ether_ntoa; + ether_ntoa_r; + ether_ntohost; + eventfd; + eventfd_read; + eventfd_write; + fclose; + fdopen; + fflush; + fgetgrent; + fgetgrent_r; + fgetpwent; + fgetpwent_r; + fgets; + fgetxattr; + flistxattr; + fmemopen; + fopen; + fopen64; + fopencookie; + fork; + fprintf; + fputs; + fread; + free; + freopen; + freopen64; + frexp; + frexpf; + frexpl; + fscanf; + fstat; + fstat64; + fstatfs; + fstatfs64; + fstatvfs; + fstatvfs64; + ftime; + fwrite; + get_current_dir_name; + getaddrinfo; + getcwd; + getdelim; + getgrent; + getgrent_r; + getgrgid; + getgrgid_r; + getgrnam; + getgrnam_r; + getgroups; + gethostbyaddr; + gethostbyaddr_r; + gethostbyname; + gethostbyname2; + gethostbyname2_r; + gethostbyname_r; + gethostent; + gethostent_r; + getifaddrs; + getitimer; + getline; + getloadavg; + getmntent; + getmntent_r; + getnameinfo; + getpass; + getpeername; + getprotobyname; + getprotobyname_r; + getprotobynumber; + getprotobynumber_r; + getprotoent; + getprotoent_r; + getpwent; + getpwent_r; + getpwnam; + getpwnam_r; + getpwuid; + getpwuid_r; + getresgid; + getresuid; + getsockname; + getsockopt; + gettimeofday; + getusershell; + getutent; + getutid; + getutline; + getutxent; + getutxid; + getutxline; + getxattr; + glob; + glob64; + gmtime; + gmtime_r; + iconv; + if_indextoname; + if_nametoindex; + inet_aton; + inet_ntop; + inet_pton; + initgroups; + inotify_init; + inotify_init1; + ioctl; + kill; + lgamma; + lgamma_r; + lgammaf; + lgammaf_r; + lgammal; + lgammal_r; + lgetxattr; + listen; + listxattr; + llistxattr; + localtime; + localtime_r; + longjmp; + lrand48_r; + malloc; + malloc_usable_size; + mbsnrtowcs; + mbsrtowcs; + mbstowcs; + mcheck; + mcheck_pedantic; + memalign; + memchr; + memcmp; + memmem; + memmove; + memrchr; + memset; + mincore; + mktime; + mlock; + mlockall; + mmap; + mmap64; + modf; + modff; + modfl; + mprobe; + mprotect; + msgrcv; + msgsnd; + munlock; + munlockall; + munmap; + name_to_handle_at; + nanosleep; + on_exit; + open; + open64; + open_by_handle_at; + open_memstream; + open_wmemstream; + opendir; + pause; + pclose; + pipe; + pipe2; + poll; + popen; + posix_memalign; + ppoll; + prctl; + pread; + pread64; + preadv; + preadv64; + printf; + process_vm_readv; + process_vm_writev; + pthread_attr_getdetachstate; + pthread_attr_getguardsize; + pthread_attr_getinheritsched; + pthread_attr_getschedparam; + pthread_attr_getschedpolicy; + pthread_attr_getscope; + pthread_attr_getstack; + pthread_attr_getstacksize; + pthread_barrier_destroy; + pthread_barrier_init; + pthread_barrier_wait; + pthread_barrierattr_getpshared; + pthread_cond_clockwait; + pthread_condattr_getclock; + pthread_condattr_getpshared; + pthread_create; + pthread_detach; + pthread_exit; + pthread_getname_np; + pthread_getschedparam; + pthread_join; + pthread_kill; + pthread_mutex_destroy; + pthread_mutex_init; + pthread_mutex_lock; + pthread_mutex_timedlock; + pthread_mutex_trylock; + pthread_mutex_unlock; + pthread_mutexattr_getprioceiling; + pthread_mutexattr_getprotocol; + pthread_mutexattr_getpshared; + pthread_mutexattr_getrobust; + pthread_mutexattr_getrobust_np; + pthread_mutexattr_gettype; + pthread_once; + pthread_rwlock_destroy; + pthread_rwlock_init; + pthread_rwlock_rdlock; + pthread_rwlock_timedrdlock; + pthread_rwlock_timedwrlock; + pthread_rwlock_tryrdlock; + pthread_rwlock_trywrlock; + pthread_rwlock_unlock; + pthread_rwlock_wrlock; + pthread_rwlockattr_getkind_np; + pthread_rwlockattr_getpshared; + pthread_setcancelstate; + pthread_setcanceltype; + pthread_setname_np; + pthread_sigmask; + pthread_spin_destroy; + pthread_spin_init; + pthread_spin_lock; + pthread_spin_trylock; + pthread_spin_unlock; + pthread_timedjoin_np; + pthread_tryjoin_np; + ptrace; + ptsname; + ptsname_r; + puts; + pututxline; + pvalloc; + pwrite; + pwrite64; + pwritev; + pwritev64; + qsort; + qsort_r; + raise; + rand_r; + random_r; + read; + readdir; + readdir64; + readdir64_r; + readdir_r; + readlink; + readlinkat; + readv; + realloc; + reallocarray; + recv; + recvfrom; + recvmmsg; + recvmsg; + regcomp; + regerror; + regexec; + regfree; + remquo; + remquof; + remquol; + rmdir; + scandir; + scandir64; + scanf; + sched_getparam; + sem_destroy; + sem_getvalue; + sem_init; + sem_post; + sem_timedwait; + sem_trywait; + sem_wait; + send; + sendmmsg; + sendmsg; + sendto; + setbuf; + setbuffer; + setgrent; + setitimer; + setjmp; + setlinebuf; + setlocale; + setpwent; + setvbuf; + shmctl; + sigaction; + sigaltstack; + sigandset; + sigblock; + sigemptyset; + sigfillset; + siglongjmp; + signal; + signalfd; + sigorset; + sigpending; + sigprocmask; + sigsetjmp; + sigsetmask; + sigsuspend; + sigtimedwait; + sigwait; + sigwaitinfo; + sincos; + sincosf; + sincosl; + sleep; + snprintf; + socket; + socketpair; + sprintf; + sscanf; + statfs; + statfs64; + statvfs; + statvfs64; + strcasecmp; + strcasestr; + strchr; + strchrnul; + strcmp; + strcpy; + strcspn; + strdup; + strerror; + strerror_r; + strlen; + strncasecmp; + strncmp; + strncpy; + strndup; + strnlen; + strpbrk; + strptime; + strrchr; + strspn; + strstr; + strtoimax; + strtok; + strtoumax; + strxfrm; + strxfrm_l; + sysinfo; + tcgetattr; + tempnam; + textdomain; + time; + timerfd_gettime; + timerfd_settime; + times; + tmpfile; + tmpfile64; + tmpnam; + tmpnam_r; + tsearch; + ttyname; + ttyname_r; + uname; + unlink; + usleep; + valloc; + vasprintf; + vfork; + vfprintf; + vfscanf; + vprintf; + vscanf; + vsnprintf; + vsprintf; + vsscanf; + wait; + wait3; + wait4; + waitid; + waitpid; + wcrtomb; + wcscat; + wcsdup; + wcslen; + wcsncat; + wcsnlen; + wcsnrtombs; + wcsrtombs; + wcstombs; + wcsxfrm; + wcsxfrm_l; + wctomb; + wordexp; + write; + writev; + xdr_bool; + xdr_bytes; + xdr_char; + xdr_destroy; + xdr_double; + xdr_enum; + xdr_float; + xdr_hyper; + xdr_int; + xdr_int16_t; + xdr_int32_t; + xdr_int64_t; + xdr_int8_t; + xdr_long; + xdr_longlong_t; + xdr_quad_t; + xdr_short; + xdr_string; + xdr_u_char; + xdr_u_hyper; + xdr_u_int; + xdr_u_long; + xdr_u_longlong_t; + xdr_u_quad_t; + xdr_u_short; + xdr_uint16_t; + xdr_uint32_t; + xdr_uint64_t; + xdr_uint8_t; + xdrmem_create; + xdrrec_create; + xdrstdio_create; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan_cxx-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan_cxx-x86_64.a new file mode 100644 index 0000000..fadde19 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan_cxx-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan_cxx-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan_cxx-x86_64.a.syms new file mode 100644 index 0000000..354ecad --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.tsan_cxx-x86_64.a.syms @@ -0,0 +1,53 @@ +{ + Annotate*; + RunningOnValgrind; + ValgrindSlowdown; + WTFAnnotate*; + _ZdaPv; + _ZdaPvRKSt9nothrow_t; + _ZdaPvSt11align_val_t; + _ZdaPvSt11align_val_tRKSt9nothrow_t; + _ZdaPvm; + _ZdaPvmSt11align_val_t; + _ZdlPv; + _ZdlPvRKSt9nothrow_t; + _ZdlPvSt11align_val_t; + _ZdlPvSt11align_val_tRKSt9nothrow_t; + _ZdlPvm; + _ZdlPvmSt11align_val_t; + _Znam; + _ZnamRKSt9nothrow_t; + _ZnamSt11align_val_t; + _ZnamSt11align_val_tRKSt9nothrow_t; + _Znwm; + _ZnwmRKSt9nothrow_t; + _ZnwmSt11align_val_t; + _ZnwmSt11align_val_tRKSt9nothrow_t; + __tsan_acquire; + __tsan_atomic*; + __tsan_create_fiber; + __tsan_destroy_fiber; + __tsan_flush_memory; + __tsan_func*; + __tsan_get_current_fiber; + __tsan_init; + __tsan_java*; + __tsan_mutex_create; + __tsan_mutex_destroy; + __tsan_mutex_post_divert; + __tsan_mutex_post_lock; + __tsan_mutex_post_signal; + __tsan_mutex_post_unlock; + __tsan_mutex_pre_divert; + __tsan_mutex_pre_lock; + __tsan_mutex_pre_signal; + __tsan_mutex_pre_unlock; + __tsan_read*; + __tsan_release; + __tsan_set_fiber_name; + __tsan_switch_to_fiber; + __tsan_unaligned*; + __tsan_vptr*; + __tsan_write*; + __ubsan_*; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-aarch64-android.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-aarch64-android.so new file mode 100755 index 0000000..1e41505 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-aarch64-android.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-arm-android.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-arm-android.so new file mode 100755 index 0000000..b087d8d Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-arm-android.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-i386.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-i386.a new file mode 100644 index 0000000..d2ccf38 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-i386.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-x86_64.a new file mode 100644 index 0000000..5c64ac5 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-x86_64.a.syms new file mode 100644 index 0000000..448cd7d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone-x86_64.a.syms @@ -0,0 +1,43 @@ +{ + __interceptor_sigaction; + __interceptor_signal; + __sanitizer_acquire_crash_state; + __sanitizer_cov_8bit_counters_init; + __sanitizer_cov_bool_flag_init; + __sanitizer_cov_dump; + __sanitizer_cov_pcs_init; + __sanitizer_cov_reset; + __sanitizer_cov_trace_cmp; + __sanitizer_cov_trace_cmp1; + __sanitizer_cov_trace_cmp2; + __sanitizer_cov_trace_cmp4; + __sanitizer_cov_trace_cmp8; + __sanitizer_cov_trace_const_cmp1; + __sanitizer_cov_trace_const_cmp2; + __sanitizer_cov_trace_const_cmp4; + __sanitizer_cov_trace_const_cmp8; + __sanitizer_cov_trace_div4; + __sanitizer_cov_trace_div8; + __sanitizer_cov_trace_gep; + __sanitizer_cov_trace_pc_guard; + __sanitizer_cov_trace_pc_guard_init; + __sanitizer_cov_trace_pc_indir; + __sanitizer_cov_trace_switch; + __sanitizer_dump_coverage; + __sanitizer_dump_trace_pc_guard_coverage; + __sanitizer_get_module_and_offset_for_pc; + __sanitizer_get_report_path; + __sanitizer_install_malloc_and_free_hooks; + __sanitizer_on_print; + __sanitizer_print_stack_trace; + __sanitizer_report_error_summary; + __sanitizer_sandbox_on_notify; + __sanitizer_set_death_callback; + __sanitizer_set_report_fd; + __sanitizer_set_report_path; + __sanitizer_symbolize_global; + __sanitizer_symbolize_pc; + __ubsan_*; + sigaction; + signal; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-i386.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-i386.a new file mode 100644 index 0000000..087e999 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-i386.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-x86_64.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-x86_64.a new file mode 100644 index 0000000..dffabd8 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-x86_64.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-x86_64.a.syms b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-x86_64.a.syms new file mode 100644 index 0000000..c55caef --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/linux/libclang_rt.ubsan_standalone_cxx-x86_64.a.syms @@ -0,0 +1,3 @@ +{ + __ubsan_*; +}; diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.asan.so b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.asan.so new file mode 100755 index 0000000..9c9b665 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.asan.so differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.builtins.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.builtins.a new file mode 100644 index 0000000..ef47b05 Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.builtins.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.profile.a b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.profile.a new file mode 100644 index 0000000..a5412ab Binary files /dev/null and b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/lib/x86_64-unknown-fuchsia/libclang_rt.profile.a differ diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/asan_ignorelist.txt b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/asan_ignorelist.txt new file mode 100644 index 0000000..5ce5992 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/asan_ignorelist.txt @@ -0,0 +1,13 @@ +# Ignorelist for AddressSanitizer. Turns off instrumentation of particular +# functions or sources. Use with care. You may set location of ignorelist +# at compile-time using -fsanitize-ignorelist= flag. + +# Example usage: +# fun:*bad_function_name* +# src:file_with_tricky_code.cc +# global:*global_with_bad_access_or_initialization* +# global:*global_with_initialization_issues*=init +# type:*Namespace::ClassName*=init + +# Stack buffer overflow in VC/INCLUDE/xlocnum, see http://goo.gl/L4qqUG +fun:*_Find_elem@*@std* diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/cfi_ignorelist.txt b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/cfi_ignorelist.txt new file mode 100644 index 0000000..4a0f039 --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/cfi_ignorelist.txt @@ -0,0 +1,17 @@ +[cfi-unrelated-cast] +# The specification of std::get_temporary_buffer mandates a cast to +# uninitialized T* (libstdc++, MSVC stdlib). +fun:_ZSt20get_temporary_buffer* +fun:*get_temporary_buffer@.*@std@@* + +# STL address-of magic (libstdc++). +fun:*__addressof* + +# Windows C++ stdlib headers that contain bad unrelated casts. +src:*xmemory0 +src:*xstddef + +# std::_Sp_counted_ptr_inplace::_Sp_counted_ptr_inplace() (libstdc++). +# This ctor is used by std::make_shared and needs to cast to uninitialized T* +# in order to call std::allocator_traits::construct. +fun:_ZNSt23_Sp_counted_ptr_inplace* diff --git a/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/msan_ignorelist.txt b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/msan_ignorelist.txt new file mode 100644 index 0000000..1fae64d --- /dev/null +++ b/third_party/llvm-build/Release+Asserts/lib/clang/14.0.0/share/msan_ignorelist.txt @@ -0,0 +1,10 @@ +# Ignorelist for MemorySanitizer. Turns off instrumentation of particular +# functions or sources. Use with care. You may set location of ignorelist +# at compile-time using -fsanitize-ignorelist= flag. + +# Example usage: +# fun:*bad_function_name* +# src:file_with_tricky_code.cc + +# https://bugs.llvm.org/show_bug.cgi?id=31877 +fun:__gxx_personality_* diff --git a/tools/clang/scripts/update.py b/tools/clang/scripts/update.py index 24d1aff..d256471 100755 --- a/tools/clang/scripts/update.py +++ b/tools/clang/scripts/update.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python3 -# Copyright 2012 The Chromium Authors +#!/usr/bin/env python +# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -9,37 +9,41 @@ It can also be run stand-alone as a convenient way of installing a well-tested near-tip-of-tree clang version: - $ curl -s https://raw.githubusercontent.com/chromium/chromium/main/tools/clang/scripts/update.py | python3 - --output-dir=/tmp/clang + $ curl -s https://raw.githubusercontent.com/chromium/chromium/main/tools/clang/scripts/update.py | python - --output-dir=/tmp/clang (Note that the output dir may be deleted and re-created if it exists.) """ -import sys -assert sys.version_info >= (3, 0), 'This script requires Python 3.' - +from __future__ import division +from __future__ import print_function import argparse import os import platform import shutil import stat +import sys import tarfile import tempfile import time -import urllib.request -import urllib.error + +try: + from urllib2 import HTTPError, URLError, urlopen +except ImportError: # For Py3 compatibility + from urllib.error import HTTPError, URLError + from urllib.request import urlopen + import zipfile -import zlib # Do NOT CHANGE this if you don't know what you're doing -- see # https://chromium.googlesource.com/chromium/src/+/main/docs/updating_clang.md # Reverting problematic clang rolls is safe, though. # This is the output of `git describe` and is usable as a commit-ish. -CLANG_REVISION = 'llvmorg-16-init-6578-g0d30e92f' -CLANG_SUB_REVISION = 2 +CLANG_REVISION = 'llvmorg-14-init-1002-gb5e470aa' +CLANG_SUB_REVISION = 1 PACKAGE_VERSION = '%s-%s' % (CLANG_REVISION, CLANG_SUB_REVISION) -RELEASE_VERSION = '16.0.0' +RELEASE_VERSION = '14.0.0' CDS_URL = os.environ.get('CDS_CLANG_BUCKET_OVERRIDE', 'https://commondatastorage.googleapis.com/chromium-browser-clang') @@ -97,51 +101,30 @@ def DownloadUrl(url, output_file): try: sys.stdout.write('Downloading %s ' % url) sys.stdout.flush() - request = urllib.request.Request(url) - request.add_header('Accept-Encoding', 'gzip') - response = urllib.request.urlopen(request) - total_size = None - if 'Content-Length' in response.headers: - total_size = int(response.headers['Content-Length'].strip()) - - is_gzipped = response.headers.get('Content-Encoding', - '').strip() == 'gzip' - if is_gzipped: - gzip_decode = zlib.decompressobj(zlib.MAX_WBITS + 16) - + response = urlopen(url) + total_size = int(response.info().get('Content-Length').strip()) bytes_done = 0 dots_printed = 0 while True: chunk = response.read(CHUNK_SIZE) if not chunk: break - bytes_done += len(chunk) - - if is_gzipped: - chunk = gzip_decode.decompress(chunk) output_file.write(chunk) - - if total_size is not None: - num_dots = TOTAL_DOTS * bytes_done // total_size - sys.stdout.write('.' * (num_dots - dots_printed)) - sys.stdout.flush() - dots_printed = num_dots - if total_size is not None and bytes_done != total_size: - raise urllib.error.URLError("only got %d of %d bytes" % - (bytes_done, total_size)) - if is_gzipped: - output_file.write(gzip_decode.flush()) + bytes_done += len(chunk) + num_dots = TOTAL_DOTS * bytes_done // total_size + sys.stdout.write('.' * (num_dots - dots_printed)) + sys.stdout.flush() + dots_printed = num_dots + if bytes_done != total_size: + raise URLError("only got %d of %d bytes" % (bytes_done, total_size)) print(' Done.') return - except urllib.error.URLError as e: + except URLError as e: sys.stdout.write('\n') print(e) - if num_retries == 0 or isinstance( - e, urllib.error.HTTPError) and e.code == 404: + if num_retries == 0 or isinstance(e, HTTPError) and e.code == 404: raise e num_retries -= 1 - output_file.seek(0) - output_file.truncate() print('Retrying in %d s ...' % retry_wait_s) sys.stdout.flush() time.sleep(retry_wait_s) @@ -165,7 +148,7 @@ def DownloadAndUnpack(url, output_dir, path_prefixes=None): assert path_prefixes is None zipfile.ZipFile(f).extractall(path=output_dir) else: - t = tarfile.open(mode='r:*', fileobj=f) + t = tarfile.open(mode='r:gz', fileobj=f) members = None if path_prefixes is not None: members = [m for m in t.getmembers() @@ -183,15 +166,12 @@ def GetPlatformUrlPrefix(host_os): return CDS_URL + '/' + _HOST_OS_URL_MAP[host_os] + '/' -def DownloadAndUnpackPackage(package_file, - output_dir, - host_os, - version=PACKAGE_VERSION): - cds_file = "%s-%s.tar.xz" % (package_file, version) +def DownloadAndUnpackPackage(package_file, output_dir, host_os): + cds_file = "%s-%s.tgz" % (package_file, PACKAGE_VERSION) cds_full_url = GetPlatformUrlPrefix(host_os) + cds_file try: DownloadAndUnpack(cds_full_url, output_dir) - except urllib.error.URLError: + except URLError: print('Failed to download prebuilt clang package %s' % cds_file) print('Use build.py if you want to build locally.') print('Exiting.') @@ -199,7 +179,7 @@ def DownloadAndUnpackPackage(package_file, def DownloadAndUnpackClangMacRuntime(output_dir): - cds_file = "clang-%s.tar.xz" % PACKAGE_VERSION + cds_file = "clang-%s.tgz" % PACKAGE_VERSION # We run this only for the runtime libraries, and 'mac' and 'mac-arm64' both # have the same (universal) runtime libraries. It doesn't matter which one # we download here. @@ -209,7 +189,7 @@ def DownloadAndUnpackClangMacRuntime(output_dir): ] try: DownloadAndUnpack(cds_full_url, output_dir, path_prefixes) - except urllib.error.URLError: + except URLError: print('Failed to download prebuilt clang %s' % cds_file) print('Use build.py if you want to build locally.') print('Exiting.') @@ -218,14 +198,14 @@ def DownloadAndUnpackClangMacRuntime(output_dir): # TODO(hans): Create a clang-win-runtime package instead. def DownloadAndUnpackClangWinRuntime(output_dir): - cds_file = "clang-%s.tar.xz" % PACKAGE_VERSION + cds_file = "clang-%s.tgz" % PACKAGE_VERSION cds_full_url = GetPlatformUrlPrefix('win') + cds_file path_prefixes = [ 'lib/clang/' + RELEASE_VERSION + '/lib/windows', 'bin/llvm-symbolizer.exe' ] try: DownloadAndUnpack(cds_full_url, output_dir, path_prefixes) - except urllib.error.URLError: + except URLError: print('Failed to download prebuilt clang %s' % cds_file) print('Use build.py if you want to build locally.') print('Exiting.') @@ -240,14 +220,17 @@ def UpdatePackage(package_name, host_os): if package_name == 'clang': stamp_file = STAMP_FILE package_file = 'clang' + elif package_name == 'clang-tidy': + package_file = 'clang-tidy' + elif package_name == 'objdump': + package_file = 'llvmobjdump' + elif package_name == 'translation_unit': + package_file = 'translation_unit' elif package_name == 'coverage_tools': stamp_file = os.path.join(LLVM_BUILD_DIR, 'cr_coverage_revision') package_file = 'llvm-code-coverage' - elif package_name == 'objdump': - package_file = 'llvmobjdump' - elif package_name in ['clang-libs', 'clang-tidy', 'clangd', 'libclang', - 'translation_unit']: - package_file = package_name + elif package_name == 'libclang': + package_file = 'libclang' else: print('Unknown package: "%s".' % package_name) return 1 @@ -294,7 +277,7 @@ def UpdatePackage(package_name, host_os): return 0 -def GetDefaultHostOs(): +def main(): _PLATFORM_HOST_OS_MAP = { 'darwin': 'mac', 'cygwin': 'win', @@ -304,10 +287,7 @@ def GetDefaultHostOs(): default_host_os = _PLATFORM_HOST_OS_MAP.get(sys.platform, sys.platform) if default_host_os == 'mac' and platform.machine() == 'arm64': default_host_os = 'mac-arm64' - return default_host_os - -def main(): parser = argparse.ArgumentParser(description='Update clang.') parser.add_argument('--output-dir', help='Where to extract the package.') @@ -315,9 +295,9 @@ def main(): help='What package to update (default: clang)', default='clang') parser.add_argument('--host-os', - help=('Which host OS to download for ' - '(default: %(default)s)'), - default=GetDefaultHostOs(), + help='Which host OS to download for (default: %s)' % + default_host_os, + default=default_host_os, choices=('linux', 'mac', 'mac-arm64', 'win')) parser.add_argument('--print-revision', action='store_true', help='Print current clang revision and exit.') @@ -340,11 +320,6 @@ def main(): print(RELEASE_VERSION) return 0 - if args.output_dir: - global LLVM_BUILD_DIR, STAMP_FILE - LLVM_BUILD_DIR = os.path.abspath(args.output_dir) - STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision') - if args.print_revision: if args.llvm_force_head_revision: force_head_revision = ReadStampFile(FORCE_HEAD_REVISION_FILE) @@ -354,13 +329,6 @@ def main(): print(force_head_revision) return 0 - stamp_version = ReadStampFile(STAMP_FILE).partition(',')[0] - if PACKAGE_VERSION != stamp_version: - print('The expected clang version is %s but the actual version is %s' % - (PACKAGE_VERSION, stamp_version)) - print('Did you run "gclient sync"?') - return 1 - print(PACKAGE_VERSION) return 0 @@ -368,6 +336,11 @@ def main(): print('--llvm-force-head-revision can only be used for --print-revision') return 1 + if args.output_dir: + global LLVM_BUILD_DIR, STAMP_FILE + LLVM_BUILD_DIR = os.path.abspath(args.output_dir) + STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision') + return UpdatePackage(args.package, args.host_os)